sfc: Don't use enums as a bitmask.
[zen-stable.git] / drivers / net / tg3.c
blobd5a1f9e3794c309010c2fb93e83ac05c469b50e6
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
48 #include <net/ip.h>
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303 { "rx_octets" },
304 { "rx_fragments" },
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
308 { "rx_fcs_errors" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
315 { "rx_jabbers" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
330 { "tx_octets" },
331 { "tx_collisions" },
333 { "tx_xon_sent" },
334 { "tx_xoff_sent" },
335 { "tx_flow_control" },
336 { "tx_mac_errors" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
339 { "tx_deferred" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
360 { "tx_discards" },
361 { "tx_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
365 { "rxbds_empty" },
366 { "rx_discards" },
367 { "mbuf_lwm_thresh_hit" },
368 { "rx_errors" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
377 { "nic_irqs" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
385 static const struct {
386 const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
401 writel(val, tp->regs + off);
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
406 return readl(tp->regs + off);
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->aperegs + off);
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
416 return readl(tp->aperegs + off);
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
421 unsigned long flags;
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->regs + off);
432 readl(tp->regs + off);
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
437 unsigned long flags;
438 u32 val;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 return val;
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
449 unsigned long flags;
451 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453 TG3_64BIT_REG_LOW, val);
454 return;
456 if (off == TG3_RX_STD_PROD_IDX_REG) {
457 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
470 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471 (val == 0x1)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
479 unsigned long flags;
480 u32 val;
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 return val;
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497 /* Non-posted methods */
498 tp->write32(tp, off, val);
499 else {
500 /* Posted method */
501 tg3_write32(tp, off, val);
502 if (usec_wait)
503 udelay(usec_wait);
504 tp->read32(tp, off);
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
509 if (usec_wait)
510 udelay(usec_wait);
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
515 tp->write32_mbox(tp, off, val);
516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517 tp->read32_mbox(tp, off);
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
522 void __iomem *mbox = tp->regs + off;
523 writel(val, mbox);
524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
525 writel(val, mbox);
526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
527 readl(mbox);
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
532 return readl(tp->regs + off + GRCMBOX_BASE);
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
537 writel(val, tp->regs + off + GRCMBOX_BASE);
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
553 unsigned long flags;
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557 return;
559 spin_lock_irqsave(&tp->indirect_lock, flags);
560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566 } else {
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568 tw32_f(TG3PCI_MEM_WIN_DATA, val);
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
573 spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
578 unsigned long flags;
580 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582 *val = 0;
583 return;
586 spin_lock_irqsave(&tp->indirect_lock, flags);
587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 } else {
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595 *val = tr32(TG3PCI_MEM_WIN_DATA);
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
600 spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 static void tg3_ape_lock_init(struct tg3 *tp)
605 int i;
606 u32 regbase;
608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609 regbase = TG3_APE_LOCK_GRANT;
610 else
611 regbase = TG3_APE_PER_LOCK_GRANT;
613 /* Make sure the driver hasn't any stale locks. */
614 for (i = 0; i < 8; i++)
615 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
620 int i, off;
621 int ret = 0;
622 u32 status, req, gnt;
624 if (!tg3_flag(tp, ENABLE_APE))
625 return 0;
627 switch (locknum) {
628 case TG3_APE_LOCK_GRC:
629 case TG3_APE_LOCK_MEM:
630 break;
631 default:
632 return -EINVAL;
635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636 req = TG3_APE_LOCK_REQ;
637 gnt = TG3_APE_LOCK_GRANT;
638 } else {
639 req = TG3_APE_PER_LOCK_REQ;
640 gnt = TG3_APE_PER_LOCK_GRANT;
643 off = 4 * locknum;
645 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i = 0; i < 100; i++) {
649 status = tg3_ape_read32(tp, gnt + off);
650 if (status == APE_LOCK_GRANT_DRIVER)
651 break;
652 udelay(10);
655 if (status != APE_LOCK_GRANT_DRIVER) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp, gnt + off,
658 APE_LOCK_GRANT_DRIVER);
660 ret = -EBUSY;
663 return ret;
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
668 u32 gnt;
670 if (!tg3_flag(tp, ENABLE_APE))
671 return;
673 switch (locknum) {
674 case TG3_APE_LOCK_GRC:
675 case TG3_APE_LOCK_MEM:
676 break;
677 default:
678 return;
681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682 gnt = TG3_APE_LOCK_GRANT;
683 else
684 gnt = TG3_APE_PER_LOCK_GRANT;
686 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 static void tg3_disable_ints(struct tg3 *tp)
691 int i;
693 tw32(TG3PCI_MISC_HOST_CTRL,
694 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695 for (i = 0; i < tp->irq_max; i++)
696 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 static void tg3_enable_ints(struct tg3 *tp)
701 int i;
703 tp->irq_sync = 0;
704 wmb();
706 tw32(TG3PCI_MISC_HOST_CTRL,
707 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
709 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710 for (i = 0; i < tp->irq_cnt; i++) {
711 struct tg3_napi *tnapi = &tp->napi[i];
713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714 if (tg3_flag(tp, 1SHOT_MSI))
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 tp->coal_now |= tnapi->coal_now;
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp, TAGGED_STATUS) &&
722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724 else
725 tw32(HOSTCC_MODE, tp->coal_now);
727 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
732 struct tg3 *tp = tnapi->tp;
733 struct tg3_hw_status *sblk = tnapi->hw_status;
734 unsigned int work_exists = 0;
736 /* check for phy events */
737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738 if (sblk->status & SD_STATUS_LINK_CHG)
739 work_exists = 1;
741 /* check for RX/TX work to do */
742 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744 work_exists = 1;
746 return work_exists;
749 /* tg3_int_reenable
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
756 struct tg3 *tp = tnapi->tp;
758 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759 mmiowb();
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766 tw32(HOSTCC_MODE, tp->coalesce_mode |
767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 static void tg3_switch_clocks(struct tg3 *tp)
772 u32 clock_ctrl;
773 u32 orig_clock_ctrl;
775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776 return;
778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
780 orig_clock_ctrl = clock_ctrl;
781 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782 CLOCK_CTRL_CLKRUN_OENABLE |
783 0x1f);
784 tp->pci_clock_ctrl = clock_ctrl;
786 if (tg3_flag(tp, 5705_PLUS)) {
787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
791 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793 clock_ctrl |
794 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795 40);
796 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 clock_ctrl | (CLOCK_CTRL_ALTCLK),
798 40);
800 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 #define PHY_BUSY_LOOPS 5000
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
807 u32 frame_val;
808 unsigned int loops;
809 int ret;
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE,
813 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814 udelay(80);
817 *val = 0x0;
819 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820 MI_COM_PHY_ADDR_MASK);
821 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822 MI_COM_REG_ADDR_MASK);
823 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
825 tw32_f(MAC_MI_COM, frame_val);
827 loops = PHY_BUSY_LOOPS;
828 while (loops != 0) {
829 udelay(10);
830 frame_val = tr32(MAC_MI_COM);
832 if ((frame_val & MI_COM_BUSY) == 0) {
833 udelay(5);
834 frame_val = tr32(MAC_MI_COM);
835 break;
837 loops -= 1;
840 ret = -EBUSY;
841 if (loops != 0) {
842 *val = frame_val & MI_COM_DATA_MASK;
843 ret = 0;
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 tw32_f(MAC_MI_MODE, tp->mi_mode);
848 udelay(80);
851 return ret;
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
856 u32 frame_val;
857 unsigned int loops;
858 int ret;
860 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862 return 0;
864 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865 tw32_f(MAC_MI_MODE,
866 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867 udelay(80);
870 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871 MI_COM_PHY_ADDR_MASK);
872 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873 MI_COM_REG_ADDR_MASK);
874 frame_val |= (val & MI_COM_DATA_MASK);
875 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
877 tw32_f(MAC_MI_COM, frame_val);
879 loops = PHY_BUSY_LOOPS;
880 while (loops != 0) {
881 udelay(10);
882 frame_val = tr32(MAC_MI_COM);
883 if ((frame_val & MI_COM_BUSY) == 0) {
884 udelay(5);
885 frame_val = tr32(MAC_MI_COM);
886 break;
888 loops -= 1;
891 ret = -EBUSY;
892 if (loops != 0)
893 ret = 0;
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE, tp->mi_mode);
897 udelay(80);
900 return ret;
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
905 int err;
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908 if (err)
909 goto done;
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912 if (err)
913 goto done;
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917 if (err)
918 goto done;
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
922 done:
923 return err;
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
928 int err;
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931 if (err)
932 goto done;
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935 if (err)
936 goto done;
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940 if (err)
941 goto done;
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
945 done:
946 return err;
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
951 int err;
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954 if (!err)
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
957 return err;
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
962 int err;
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965 if (!err)
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
968 return err;
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
973 int err;
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
978 if (!err)
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
981 return err;
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1003 u32 phy_control;
1004 int limit, err;
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1009 phy_control = BMCR_RESET;
1010 err = tg3_writephy(tp, MII_BMCR, phy_control);
1011 if (err != 0)
1012 return -EBUSY;
1014 limit = 5000;
1015 while (limit--) {
1016 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017 if (err != 0)
1018 return -EBUSY;
1020 if ((phy_control & BMCR_RESET) == 0) {
1021 udelay(40);
1022 break;
1024 udelay(10);
1026 if (limit < 0)
1027 return -EBUSY;
1029 return 0;
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1034 struct tg3 *tp = bp->priv;
1035 u32 val;
1037 spin_lock_bh(&tp->lock);
1039 if (tg3_readphy(tp, reg, &val))
1040 val = -EIO;
1042 spin_unlock_bh(&tp->lock);
1044 return val;
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1049 struct tg3 *tp = bp->priv;
1050 u32 ret = 0;
1052 spin_lock_bh(&tp->lock);
1054 if (tg3_writephy(tp, reg, val))
1055 ret = -EIO;
1057 spin_unlock_bh(&tp->lock);
1059 return ret;
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1064 return 0;
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1069 u32 val;
1070 struct phy_device *phydev;
1072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074 case PHY_ID_BCM50610:
1075 case PHY_ID_BCM50610M:
1076 val = MAC_PHYCFG2_50610_LED_MODES;
1077 break;
1078 case PHY_ID_BCMAC131:
1079 val = MAC_PHYCFG2_AC131_LED_MODES;
1080 break;
1081 case PHY_ID_RTL8211C:
1082 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083 break;
1084 case PHY_ID_RTL8201E:
1085 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086 break;
1087 default:
1088 return;
1091 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098 tw32(MAC_PHYCFG1, val);
1100 return;
1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105 MAC_PHYCFG2_FMODE_MASK_MASK |
1106 MAC_PHYCFG2_GMODE_MASK_MASK |
1107 MAC_PHYCFG2_ACT_MASK_MASK |
1108 MAC_PHYCFG2_QUAL_MASK_MASK |
1109 MAC_PHYCFG2_INBAND_ENABLE;
1111 tw32(MAC_PHYCFG2, val);
1113 val = tr32(MAC_PHYCFG1);
1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124 tw32(MAC_PHYCFG1, val);
1126 val = tr32(MAC_EXT_RGMII_MODE);
1127 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128 MAC_RGMII_MODE_RX_QUALITY |
1129 MAC_RGMII_MODE_RX_ACTIVITY |
1130 MAC_RGMII_MODE_RX_ENG_DET |
1131 MAC_RGMII_MODE_TX_ENABLE |
1132 MAC_RGMII_MODE_TX_LOWPWR |
1133 MAC_RGMII_MODE_TX_RESET);
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136 val |= MAC_RGMII_MODE_RX_INT_B |
1137 MAC_RGMII_MODE_RX_QUALITY |
1138 MAC_RGMII_MODE_RX_ACTIVITY |
1139 MAC_RGMII_MODE_RX_ENG_DET;
1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1142 MAC_RGMII_MODE_TX_LOWPWR |
1143 MAC_RGMII_MODE_TX_RESET;
1145 tw32(MAC_EXT_RGMII_MODE, val);
1148 static void tg3_mdio_start(struct tg3 *tp)
1150 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156 tg3_mdio_config_5785(tp);
1159 static int tg3_mdio_init(struct tg3 *tp)
1161 int i;
1162 u32 reg;
1163 struct phy_device *phydev;
1165 if (tg3_flag(tp, 5717_PLUS)) {
1166 u32 is_serdes;
1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1170 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172 else
1173 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES;
1175 if (is_serdes)
1176 tp->phy_addr += 7;
1177 } else
1178 tp->phy_addr = TG3_PHY_MII_ADDR;
1180 tg3_mdio_start(tp);
1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183 return 0;
1185 tp->mdio_bus = mdiobus_alloc();
1186 if (tp->mdio_bus == NULL)
1187 return -ENOMEM;
1189 tp->mdio_bus->name = "tg3 mdio bus";
1190 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192 tp->mdio_bus->priv = tp;
1193 tp->mdio_bus->parent = &tp->pdev->dev;
1194 tp->mdio_bus->read = &tg3_mdio_read;
1195 tp->mdio_bus->write = &tg3_mdio_write;
1196 tp->mdio_bus->reset = &tg3_mdio_reset;
1197 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198 tp->mdio_bus->irq = &tp->mdio_irq[0];
1200 for (i = 0; i < PHY_MAX_ADDR; i++)
1201 tp->mdio_bus->irq[i] = PHY_POLL;
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1208 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209 tg3_bmcr_reset(tp);
1211 i = mdiobus_register(tp->mdio_bus);
1212 if (i) {
1213 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214 mdiobus_free(tp->mdio_bus);
1215 return i;
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220 if (!phydev || !phydev->drv) {
1221 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222 mdiobus_unregister(tp->mdio_bus);
1223 mdiobus_free(tp->mdio_bus);
1224 return -ENODEV;
1227 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228 case PHY_ID_BCM57780:
1229 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231 break;
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235 PHY_BRCM_RX_REFCLK_UNUSED |
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244 /* fallthru */
1245 case PHY_ID_RTL8211C:
1246 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247 break;
1248 case PHY_ID_RTL8201E:
1249 case PHY_ID_BCMAC131:
1250 phydev->interface = PHY_INTERFACE_MODE_MII;
1251 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253 break;
1256 tg3_flag_set(tp, MDIOBUS_INITED);
1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259 tg3_mdio_config_5785(tp);
1261 return 0;
1264 static void tg3_mdio_fini(struct tg3 *tp)
1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1268 mdiobus_unregister(tp->mdio_bus);
1269 mdiobus_free(tp->mdio_bus);
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1276 u32 val;
1278 val = tr32(GRC_RX_CPU_EVENT);
1279 val |= GRC_RX_CPU_DRIVER_EVENT;
1280 tw32_f(GRC_RX_CPU_EVENT, val);
1282 tp->last_event_jiffies = jiffies;
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1290 int i;
1291 unsigned int delay_cnt;
1292 long time_remain;
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain = (long)(tp->last_event_jiffies + 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297 (long)jiffies;
1298 if (time_remain < 0)
1299 return;
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt = jiffies_to_usecs(time_remain);
1303 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305 delay_cnt = (delay_cnt >> 3) + 1;
1307 for (i = 0; i < delay_cnt; i++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309 break;
1310 udelay(8);
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1317 u32 reg;
1318 u32 val;
1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321 return;
1323 tg3_wait_for_event_ack(tp);
1325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1329 val = 0;
1330 if (!tg3_readphy(tp, MII_BMCR, &reg))
1331 val = reg << 16;
1332 if (!tg3_readphy(tp, MII_BMSR, &reg))
1333 val |= (reg & 0xffff);
1334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1336 val = 0;
1337 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338 val = reg << 16;
1339 if (!tg3_readphy(tp, MII_LPA, &reg))
1340 val |= (reg & 0xffff);
1341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1343 val = 0;
1344 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346 val = reg << 16;
1347 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348 val |= (reg & 0xffff);
1350 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1352 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353 val = reg << 16;
1354 else
1355 val = 0;
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1358 tg3_generate_fw_event(tp);
1361 static void tg3_link_report(struct tg3 *tp)
1363 if (!netif_carrier_ok(tp->dev)) {
1364 netif_info(tp, link, tp->dev, "Link is down\n");
1365 tg3_ump_link_report(tp);
1366 } else if (netif_msg_link(tp)) {
1367 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368 (tp->link_config.active_speed == SPEED_1000 ?
1369 1000 :
1370 (tp->link_config.active_speed == SPEED_100 ?
1371 100 : 10)),
1372 (tp->link_config.active_duplex == DUPLEX_FULL ?
1373 "full" : "half"));
1375 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377 "on" : "off",
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379 "on" : "off");
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1385 tg3_ump_link_report(tp);
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1391 u16 miireg;
1393 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394 miireg = ADVERTISE_PAUSE_CAP;
1395 else if (flow_ctrl & FLOW_CTRL_TX)
1396 miireg = ADVERTISE_PAUSE_ASYM;
1397 else if (flow_ctrl & FLOW_CTRL_RX)
1398 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399 else
1400 miireg = 0;
1402 return miireg;
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1407 u16 miireg;
1409 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410 miireg = ADVERTISE_1000XPAUSE;
1411 else if (flow_ctrl & FLOW_CTRL_TX)
1412 miireg = ADVERTISE_1000XPSE_ASYM;
1413 else if (flow_ctrl & FLOW_CTRL_RX)
1414 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415 else
1416 miireg = 0;
1418 return miireg;
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1423 u8 cap = 0;
1425 if (lcladv & ADVERTISE_1000XPAUSE) {
1426 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427 if (rmtadv & LPA_1000XPAUSE)
1428 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430 cap = FLOW_CTRL_RX;
1431 } else {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1435 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437 cap = FLOW_CTRL_TX;
1440 return cap;
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1445 u8 autoneg;
1446 u8 flowctrl = 0;
1447 u32 old_rx_mode = tp->rx_mode;
1448 u32 old_tx_mode = tp->tx_mode;
1450 if (tg3_flag(tp, USE_PHYLIB))
1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452 else
1453 autoneg = tp->link_config.autoneg;
1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458 else
1459 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460 } else
1461 flowctrl = tp->link_config.flowctrl;
1463 tp->link_config.active_flowctrl = flowctrl;
1465 if (flowctrl & FLOW_CTRL_RX)
1466 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467 else
1468 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1470 if (old_rx_mode != tp->rx_mode)
1471 tw32_f(MAC_RX_MODE, tp->rx_mode);
1473 if (flowctrl & FLOW_CTRL_TX)
1474 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475 else
1476 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1478 if (old_tx_mode != tp->tx_mode)
1479 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 static void tg3_adjust_link(struct net_device *dev)
1484 u8 oldflowctrl, linkmesg = 0;
1485 u32 mac_mode, lcl_adv, rmt_adv;
1486 struct tg3 *tp = netdev_priv(dev);
1487 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489 spin_lock_bh(&tp->lock);
1491 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492 MAC_MODE_HALF_DUPLEX);
1494 oldflowctrl = tp->link_config.active_flowctrl;
1496 if (phydev->link) {
1497 lcl_adv = 0;
1498 rmt_adv = 0;
1500 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501 mac_mode |= MAC_MODE_PORT_MODE_MII;
1502 else if (phydev->speed == SPEED_1000 ||
1503 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505 else
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1508 if (phydev->duplex == DUPLEX_HALF)
1509 mac_mode |= MAC_MODE_HALF_DUPLEX;
1510 else {
1511 lcl_adv = tg3_advert_flowctrl_1000T(
1512 tp->link_config.flowctrl);
1514 if (phydev->pause)
1515 rmt_adv = LPA_PAUSE_CAP;
1516 if (phydev->asym_pause)
1517 rmt_adv |= LPA_PAUSE_ASYM;
1520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521 } else
1522 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1524 if (mac_mode != tp->mac_mode) {
1525 tp->mac_mode = mac_mode;
1526 tw32_f(MAC_MODE, tp->mac_mode);
1527 udelay(40);
1530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531 if (phydev->speed == SPEED_10)
1532 tw32(MAC_MI_STAT,
1533 MAC_MI_STAT_10MBPS_MODE |
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535 else
1536 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540 tw32(MAC_TX_LENGTHS,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542 (6 << TX_LENGTHS_IPG_SHIFT) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544 else
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1550 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552 phydev->speed != tp->link_config.active_speed ||
1553 phydev->duplex != tp->link_config.active_duplex ||
1554 oldflowctrl != tp->link_config.active_flowctrl)
1555 linkmesg = 1;
1557 tp->link_config.active_speed = phydev->speed;
1558 tp->link_config.active_duplex = phydev->duplex;
1560 spin_unlock_bh(&tp->lock);
1562 if (linkmesg)
1563 tg3_link_report(tp);
1566 static int tg3_phy_init(struct tg3 *tp)
1568 struct phy_device *phydev;
1570 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571 return 0;
1573 /* Bring the PHY back to a known state. */
1574 tg3_bmcr_reset(tp);
1576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1578 /* Attach the MAC to the PHY. */
1579 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580 phydev->dev_flags, phydev->interface);
1581 if (IS_ERR(phydev)) {
1582 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev);
1586 /* Mask with MAC supported features. */
1587 switch (phydev->interface) {
1588 case PHY_INTERFACE_MODE_GMII:
1589 case PHY_INTERFACE_MODE_RGMII:
1590 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591 phydev->supported &= (PHY_GBIT_FEATURES |
1592 SUPPORTED_Pause |
1593 SUPPORTED_Asym_Pause);
1594 break;
1596 /* fallthru */
1597 case PHY_INTERFACE_MODE_MII:
1598 phydev->supported &= (PHY_BASIC_FEATURES |
1599 SUPPORTED_Pause |
1600 SUPPORTED_Asym_Pause);
1601 break;
1602 default:
1603 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604 return -EINVAL;
1607 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1609 phydev->advertising = phydev->supported;
1611 return 0;
1614 static void tg3_phy_start(struct tg3 *tp)
1616 struct phy_device *phydev;
1618 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619 return;
1621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1623 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625 phydev->speed = tp->link_config.orig_speed;
1626 phydev->duplex = tp->link_config.orig_duplex;
1627 phydev->autoneg = tp->link_config.orig_autoneg;
1628 phydev->advertising = tp->link_config.orig_advertising;
1631 phy_start(phydev);
1633 phy_start_aneg(phydev);
1636 static void tg3_phy_stop(struct tg3 *tp)
1638 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639 return;
1641 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 static void tg3_phy_fini(struct tg3 *tp)
1646 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1654 u32 phytest;
1656 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657 u32 phy;
1659 tg3_writephy(tp, MII_TG3_FET_TEST,
1660 phytest | MII_TG3_FET_SHADOW_EN);
1661 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662 if (enable)
1663 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664 else
1665 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1668 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1674 u32 reg;
1676 if (!tg3_flag(tp, 5705_PLUS) ||
1677 (tg3_flag(tp, 5717_PLUS) &&
1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679 return;
1681 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682 tg3_phy_fet_toggle_apd(tp, enable);
1683 return;
1686 reg = MII_TG3_MISC_SHDW_WREN |
1687 MII_TG3_MISC_SHDW_SCR5_SEL |
1688 MII_TG3_MISC_SHDW_SCR5_LPED |
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690 MII_TG3_MISC_SHDW_SCR5_SDTL |
1691 MII_TG3_MISC_SHDW_SCR5_C125OE;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1695 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698 reg = MII_TG3_MISC_SHDW_WREN |
1699 MII_TG3_MISC_SHDW_APD_SEL |
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701 if (enable)
1702 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1704 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1709 u32 phy;
1711 if (!tg3_flag(tp, 5705_PLUS) ||
1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713 return;
1715 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716 u32 ephy;
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 ephy | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, reg, &phy)) {
1724 if (enable)
1725 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726 else
1727 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 tg3_writephy(tp, reg, phy);
1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1732 } else {
1733 int ret;
1735 ret = tg3_phy_auxctl_read(tp,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737 if (!ret) {
1738 if (enable)
1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740 else
1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 tg3_phy_auxctl_write(tp,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1750 int ret;
1751 u32 val;
1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754 return;
1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757 if (!ret)
1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1764 u32 otp, phy;
1766 if (!tp->phy_otp)
1767 return;
1769 otp = tp->phy_otp;
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772 return;
1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1778 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1782 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1786 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1789 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1792 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1801 u32 val;
1803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804 return;
1806 tp->setlpicnt = 0;
1808 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809 current_link_up == 1 &&
1810 tp->link_config.active_duplex == DUPLEX_FULL &&
1811 (tp->link_config.active_speed == SPEED_100 ||
1812 tp->link_config.active_speed == SPEED_1000)) {
1813 u32 eeectl;
1815 if (tp->link_config.active_speed == SPEED_1000)
1816 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817 else
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1820 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1822 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823 TG3_CL45_D7_EEERES_STAT, &val);
1825 switch (val) {
1826 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1828 case ASIC_REV_5717:
1829 case ASIC_REV_5719:
1830 case ASIC_REV_57765:
1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1833 0x0000);
1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1837 /* Fallthrough */
1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1839 tp->setlpicnt = 2;
1843 if (!tp->setlpicnt) {
1844 val = tr32(TG3_CPMU_EEE_MODE);
1845 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1851 int limit = 100;
1853 while (limit--) {
1854 u32 tmp32;
1856 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857 if ((tmp32 & 0x1000) == 0)
1858 break;
1861 if (limit < 0)
1862 return -EBUSY;
1864 return 0;
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1869 static const u32 test_pat[4][6] = {
1870 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1875 int chan;
1877 for (chan = 0; chan < 4; chan++) {
1878 int i;
1880 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881 (chan * 0x2000) | 0x0200);
1882 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1884 for (i = 0; i < 6; i++)
1885 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1886 test_pat[chan][i]);
1888 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889 if (tg3_wait_macro_done(tp)) {
1890 *resetp = 1;
1891 return -EBUSY;
1894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895 (chan * 0x2000) | 0x0200);
1896 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897 if (tg3_wait_macro_done(tp)) {
1898 *resetp = 1;
1899 return -EBUSY;
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903 if (tg3_wait_macro_done(tp)) {
1904 *resetp = 1;
1905 return -EBUSY;
1908 for (i = 0; i < 6; i += 2) {
1909 u32 low, high;
1911 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913 tg3_wait_macro_done(tp)) {
1914 *resetp = 1;
1915 return -EBUSY;
1917 low &= 0x7fff;
1918 high &= 0x000f;
1919 if (low != test_pat[chan][i] ||
1920 high != test_pat[chan][i+1]) {
1921 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1925 return -EBUSY;
1930 return 0;
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1935 int chan;
1937 for (chan = 0; chan < 4; chan++) {
1938 int i;
1940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941 (chan * 0x2000) | 0x0200);
1942 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943 for (i = 0; i < 6; i++)
1944 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946 if (tg3_wait_macro_done(tp))
1947 return -EBUSY;
1950 return 0;
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1955 u32 reg32, phy9_orig;
1956 int retries, do_phy_reset, err;
1958 retries = 10;
1959 do_phy_reset = 1;
1960 do {
1961 if (do_phy_reset) {
1962 err = tg3_bmcr_reset(tp);
1963 if (err)
1964 return err;
1965 do_phy_reset = 0;
1968 /* Disable transmitter and interrupt. */
1969 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1970 continue;
1972 reg32 |= 0x3000;
1973 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1975 /* Set full-duplex, 1000 mbps. */
1976 tg3_writephy(tp, MII_BMCR,
1977 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1979 /* Set to master mode. */
1980 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1981 continue;
1983 tg3_writephy(tp, MII_TG3_CTRL,
1984 (MII_TG3_CTRL_AS_MASTER |
1985 MII_TG3_CTRL_ENABLE_AS_MASTER));
1987 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1988 if (err)
1989 return err;
1991 /* Block the PHY control access. */
1992 tg3_phydsp_write(tp, 0x8005, 0x0800);
1994 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1995 if (!err)
1996 break;
1997 } while (--retries);
1999 err = tg3_phy_reset_chanpat(tp);
2000 if (err)
2001 return err;
2003 tg3_phydsp_write(tp, 0x8005, 0x0000);
2005 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2010 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2012 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2013 reg32 &= ~0x3000;
2014 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2015 } else if (!err)
2016 err = -EBUSY;
2018 return err;
2021 /* This will reset the tigon3 PHY if there is no valid
2022 * link unless the FORCE argument is non-zero.
2024 static int tg3_phy_reset(struct tg3 *tp)
2026 u32 val, cpmuctrl;
2027 int err;
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030 val = tr32(GRC_MISC_CFG);
2031 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2032 udelay(40);
2034 err = tg3_readphy(tp, MII_BMSR, &val);
2035 err |= tg3_readphy(tp, MII_BMSR, &val);
2036 if (err != 0)
2037 return -EBUSY;
2039 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040 netif_carrier_off(tp->dev);
2041 tg3_link_report(tp);
2044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047 err = tg3_phy_reset_5703_4_5(tp);
2048 if (err)
2049 return err;
2050 goto out;
2053 cpmuctrl = 0;
2054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2058 tw32(TG3_CPMU_CTRL,
2059 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2062 err = tg3_bmcr_reset(tp);
2063 if (err)
2064 return err;
2066 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2070 tw32(TG3_CPMU_CTRL, cpmuctrl);
2073 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077 CPMU_LSPD_1000MB_MACCLK_12_5) {
2078 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2079 udelay(40);
2080 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2084 if (tg3_flag(tp, 5717_PLUS) &&
2085 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2086 return 0;
2088 tg3_phy_apply_otp(tp);
2090 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091 tg3_phy_toggle_apd(tp, true);
2092 else
2093 tg3_phy_toggle_apd(tp, false);
2095 out:
2096 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2103 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2108 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110 tg3_phydsp_write(tp, 0x000a, 0x310b);
2111 tg3_phydsp_write(tp, 0x201f, 0x9506);
2112 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2115 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120 tg3_writephy(tp, MII_TG3_TEST1,
2121 MII_TG3_TEST1_TRIM_EN | 0x4);
2122 } else
2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2129 /* Set Extended packet length bit (bit 14) on all chips that */
2130 /* support jumbo frames */
2131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132 /* Cannot do read-modify-write on 5401 */
2133 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135 /* Set bit 14 with read-modify-write to preserve other bits */
2136 err = tg3_phy_auxctl_read(tp,
2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2138 if (!err)
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144 * jumbo frames transmission.
2146 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153 /* adjust output voltage */
2154 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2157 tg3_phy_toggle_automdix(tp, 1);
2158 tg3_phy_set_wirespeed(tp);
2159 return 0;
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2164 bool need_vaux = false;
2166 /* The GPIOs do something completely different on 57765. */
2167 if (!tg3_flag(tp, IS_NIC) ||
2168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2170 return;
2172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176 tp->pdev_peer != tp->pdev) {
2177 struct net_device *dev_peer;
2179 dev_peer = pci_get_drvdata(tp->pdev_peer);
2181 /* remove_one() may have been run on the peer. */
2182 if (dev_peer) {
2183 struct tg3 *tp_peer = netdev_priv(dev_peer);
2185 if (tg3_flag(tp_peer, INIT_COMPLETE))
2186 return;
2188 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189 tg3_flag(tp_peer, ENABLE_ASF))
2190 need_vaux = true;
2194 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2195 need_vaux = true;
2197 if (need_vaux) {
2198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201 (GRC_LCLCTRL_GPIO_OE0 |
2202 GRC_LCLCTRL_GPIO_OE1 |
2203 GRC_LCLCTRL_GPIO_OE2 |
2204 GRC_LCLCTRL_GPIO_OUTPUT0 |
2205 GRC_LCLCTRL_GPIO_OUTPUT1),
2206 100);
2207 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211 GRC_LCLCTRL_GPIO_OE1 |
2212 GRC_LCLCTRL_GPIO_OE2 |
2213 GRC_LCLCTRL_GPIO_OUTPUT0 |
2214 GRC_LCLCTRL_GPIO_OUTPUT1 |
2215 tp->grc_local_ctrl;
2216 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2218 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2221 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 } else {
2224 u32 no_gpio2;
2225 u32 grc_local_ctrl = 0;
2227 /* Workaround to prevent overdrawing Amps. */
2228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2229 ASIC_REV_5714) {
2230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232 grc_local_ctrl, 100);
2235 /* On 5753 and variants, GPIO2 cannot be used. */
2236 no_gpio2 = tp->nic_sram_data_cfg &
2237 NIC_SRAM_DATA_CFG_NO_GPIO2;
2239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240 GRC_LCLCTRL_GPIO_OE1 |
2241 GRC_LCLCTRL_GPIO_OE2 |
2242 GRC_LCLCTRL_GPIO_OUTPUT1 |
2243 GRC_LCLCTRL_GPIO_OUTPUT2;
2244 if (no_gpio2) {
2245 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246 GRC_LCLCTRL_GPIO_OUTPUT2);
2248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249 grc_local_ctrl, 100);
2251 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2256 if (!no_gpio2) {
2257 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2262 } else {
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 (GRC_LCLCTRL_GPIO_OE1 |
2267 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2269 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270 GRC_LCLCTRL_GPIO_OE1, 100);
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2281 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2282 return 1;
2283 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284 if (speed != SPEED_10)
2285 return 1;
2286 } else if (speed == SPEED_10)
2287 return 1;
2289 return 0;
2292 static int tg3_setup_phy(struct tg3 *, int);
2294 #define RESET_KIND_SHUTDOWN 0
2295 #define RESET_KIND_INIT 1
2296 #define RESET_KIND_SUSPEND 2
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2303 u32 val;
2305 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2310 sg_dig_ctrl |=
2311 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2315 return;
2318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2319 tg3_bmcr_reset(tp);
2320 val = tr32(GRC_MISC_CFG);
2321 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2322 udelay(40);
2323 return;
2324 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2325 u32 phytest;
2326 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2327 u32 phy;
2329 tg3_writephy(tp, MII_ADVERTISE, 0);
2330 tg3_writephy(tp, MII_BMCR,
2331 BMCR_ANENABLE | BMCR_ANRESTART);
2333 tg3_writephy(tp, MII_TG3_FET_TEST,
2334 phytest | MII_TG3_FET_SHADOW_EN);
2335 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2337 tg3_writephy(tp,
2338 MII_TG3_FET_SHDW_AUXMODE4,
2339 phy);
2341 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2343 return;
2344 } else if (do_low_power) {
2345 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2348 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350 MII_TG3_AUXCTL_PCTL_VREG_11V;
2351 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2354 /* The PHY should not be powered down on some chips because
2355 * of bugs.
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2361 return;
2363 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2371 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2377 if (tg3_flag(tp, NVRAM)) {
2378 int i;
2380 if (tp->nvram_lock_cnt == 0) {
2381 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382 for (i = 0; i < 8000; i++) {
2383 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2384 break;
2385 udelay(20);
2387 if (i == 8000) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2389 return -ENODEV;
2392 tp->nvram_lock_cnt++;
2394 return 0;
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2400 if (tg3_flag(tp, NVRAM)) {
2401 if (tp->nvram_lock_cnt > 0)
2402 tp->nvram_lock_cnt--;
2403 if (tp->nvram_lock_cnt == 0)
2404 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2411 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412 u32 nvaccess = tr32(NVRAM_ACCESS);
2414 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422 u32 nvaccess = tr32(NVRAM_ACCESS);
2424 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429 u32 offset, u32 *val)
2431 u32 tmp;
2432 int i;
2434 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2435 return -EINVAL;
2437 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438 EEPROM_ADDR_DEVID_MASK |
2439 EEPROM_ADDR_READ);
2440 tw32(GRC_EEPROM_ADDR,
2441 tmp |
2442 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444 EEPROM_ADDR_ADDR_MASK) |
2445 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2447 for (i = 0; i < 1000; i++) {
2448 tmp = tr32(GRC_EEPROM_ADDR);
2450 if (tmp & EEPROM_ADDR_COMPLETE)
2451 break;
2452 msleep(1);
2454 if (!(tmp & EEPROM_ADDR_COMPLETE))
2455 return -EBUSY;
2457 tmp = tr32(GRC_EEPROM_DATA);
2460 * The data will always be opposite the native endian
2461 * format. Perform a blind byteswap to compensate.
2463 *val = swab32(tmp);
2465 return 0;
2468 #define NVRAM_CMD_TIMEOUT 10000
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2472 int i;
2474 tw32(NVRAM_CMD, nvram_cmd);
2475 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2476 udelay(10);
2477 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2478 udelay(10);
2479 break;
2483 if (i == NVRAM_CMD_TIMEOUT)
2484 return -EBUSY;
2486 return 0;
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2491 if (tg3_flag(tp, NVRAM) &&
2492 tg3_flag(tp, NVRAM_BUFFERED) &&
2493 tg3_flag(tp, FLASH) &&
2494 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495 (tp->nvram_jedecnum == JEDEC_ATMEL))
2497 addr = ((addr / tp->nvram_pagesize) <<
2498 ATMEL_AT45DB0X1B_PAGE_POS) +
2499 (addr % tp->nvram_pagesize);
2501 return addr;
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2506 if (tg3_flag(tp, NVRAM) &&
2507 tg3_flag(tp, NVRAM_BUFFERED) &&
2508 tg3_flag(tp, FLASH) &&
2509 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510 (tp->nvram_jedecnum == JEDEC_ATMEL))
2512 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513 tp->nvram_pagesize) +
2514 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2516 return addr;
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520 * the byteswapping settings for all other register accesses.
2521 * tg3 devices are BE devices, so on a BE machine, the data
2522 * returned will be exactly as it is seen in NVRAM. On a LE
2523 * machine, the 32-bit value will be byteswapped.
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2527 int ret;
2529 if (!tg3_flag(tp, NVRAM))
2530 return tg3_nvram_read_using_eeprom(tp, offset, val);
2532 offset = tg3_nvram_phys_addr(tp, offset);
2534 if (offset > NVRAM_ADDR_MSK)
2535 return -EINVAL;
2537 ret = tg3_nvram_lock(tp);
2538 if (ret)
2539 return ret;
2541 tg3_enable_nvram_access(tp);
2543 tw32(NVRAM_ADDR, offset);
2544 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2547 if (ret == 0)
2548 *val = tr32(NVRAM_RDDATA);
2550 tg3_disable_nvram_access(tp);
2552 tg3_nvram_unlock(tp);
2554 return ret;
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2560 u32 v;
2561 int res = tg3_nvram_read(tp, offset, &v);
2562 if (!res)
2563 *val = cpu_to_be32(v);
2564 return res;
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2570 u32 addr_high, addr_low;
2571 int i;
2573 addr_high = ((tp->dev->dev_addr[0] << 8) |
2574 tp->dev->dev_addr[1]);
2575 addr_low = ((tp->dev->dev_addr[2] << 24) |
2576 (tp->dev->dev_addr[3] << 16) |
2577 (tp->dev->dev_addr[4] << 8) |
2578 (tp->dev->dev_addr[5] << 0));
2579 for (i = 0; i < 4; i++) {
2580 if (i == 1 && skip_mac_1)
2581 continue;
2582 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588 for (i = 0; i < 12; i++) {
2589 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2594 addr_high = (tp->dev->dev_addr[0] +
2595 tp->dev->dev_addr[1] +
2596 tp->dev->dev_addr[2] +
2597 tp->dev->dev_addr[3] +
2598 tp->dev->dev_addr[4] +
2599 tp->dev->dev_addr[5]) &
2600 TX_BACKOFF_SEED_MASK;
2601 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2604 static void tg3_enable_register_access(struct tg3 *tp)
2607 * Make sure register accesses (indirect or otherwise) will function
2608 * correctly.
2610 pci_write_config_dword(tp->pdev,
2611 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2614 static int tg3_power_up(struct tg3 *tp)
2616 tg3_enable_register_access(tp);
2618 pci_set_power_state(tp->pdev, PCI_D0);
2620 /* Switch out of Vaux if it is a NIC */
2621 if (tg3_flag(tp, IS_NIC))
2622 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2624 return 0;
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2629 u32 misc_host_ctrl;
2630 bool device_should_wake, do_low_power;
2632 tg3_enable_register_access(tp);
2634 /* Restore the CLKREQ setting. */
2635 if (tg3_flag(tp, CLKREQ_BUG)) {
2636 u16 lnkctl;
2638 pci_read_config_word(tp->pdev,
2639 tp->pcie_cap + PCI_EXP_LNKCTL,
2640 &lnkctl);
2641 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642 pci_write_config_word(tp->pdev,
2643 tp->pcie_cap + PCI_EXP_LNKCTL,
2644 lnkctl);
2647 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648 tw32(TG3PCI_MISC_HOST_CTRL,
2649 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2651 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652 tg3_flag(tp, WOL_ENABLE);
2654 if (tg3_flag(tp, USE_PHYLIB)) {
2655 do_low_power = false;
2656 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658 struct phy_device *phydev;
2659 u32 phyid, advertising;
2661 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2663 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2665 tp->link_config.orig_speed = phydev->speed;
2666 tp->link_config.orig_duplex = phydev->duplex;
2667 tp->link_config.orig_autoneg = phydev->autoneg;
2668 tp->link_config.orig_advertising = phydev->advertising;
2670 advertising = ADVERTISED_TP |
2671 ADVERTISED_Pause |
2672 ADVERTISED_Autoneg |
2673 ADVERTISED_10baseT_Half;
2675 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676 if (tg3_flag(tp, WOL_SPEED_100MB))
2677 advertising |=
2678 ADVERTISED_100baseT_Half |
2679 ADVERTISED_100baseT_Full |
2680 ADVERTISED_10baseT_Full;
2681 else
2682 advertising |= ADVERTISED_10baseT_Full;
2685 phydev->advertising = advertising;
2687 phy_start_aneg(phydev);
2689 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690 if (phyid != PHY_ID_BCMAC131) {
2691 phyid &= PHY_BCM_OUI_MASK;
2692 if (phyid == PHY_BCM_OUI_1 ||
2693 phyid == PHY_BCM_OUI_2 ||
2694 phyid == PHY_BCM_OUI_3)
2695 do_low_power = true;
2698 } else {
2699 do_low_power = true;
2701 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703 tp->link_config.orig_speed = tp->link_config.speed;
2704 tp->link_config.orig_duplex = tp->link_config.duplex;
2705 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2708 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709 tp->link_config.speed = SPEED_10;
2710 tp->link_config.duplex = DUPLEX_HALF;
2711 tp->link_config.autoneg = AUTONEG_ENABLE;
2712 tg3_setup_phy(tp, 0);
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2717 u32 val;
2719 val = tr32(GRC_VCPU_EXT_CTRL);
2720 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2722 int i;
2723 u32 val;
2725 for (i = 0; i < 200; i++) {
2726 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2728 break;
2729 msleep(1);
2732 if (tg3_flag(tp, WOL_CAP))
2733 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734 WOL_DRV_STATE_SHUTDOWN |
2735 WOL_DRV_WOL |
2736 WOL_SET_MAGIC_PKT);
2738 if (device_should_wake) {
2739 u32 mac_mode;
2741 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2742 if (do_low_power &&
2743 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744 tg3_phy_auxctl_write(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN |
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2749 udelay(40);
2752 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753 mac_mode = MAC_MODE_PORT_MODE_GMII;
2754 else
2755 mac_mode = MAC_MODE_PORT_MODE_MII;
2757 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2759 ASIC_REV_5700) {
2760 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761 SPEED_100 : SPEED_10;
2762 if (tg3_5700_link_polarity(tp, speed))
2763 mac_mode |= MAC_MODE_LINK_POLARITY;
2764 else
2765 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2767 } else {
2768 mac_mode = MAC_MODE_PORT_MODE_TBI;
2771 if (!tg3_flag(tp, 5750_PLUS))
2772 tw32(MAC_LED_CTRL, tp->led_ctrl);
2774 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2779 if (tg3_flag(tp, ENABLE_APE))
2780 mac_mode |= MAC_MODE_APE_TX_EN |
2781 MAC_MODE_APE_RX_EN |
2782 MAC_MODE_TDE_ENABLE;
2784 tw32_f(MAC_MODE, mac_mode);
2785 udelay(100);
2787 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2788 udelay(10);
2791 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2794 u32 base_val;
2796 base_val = tp->pci_clock_ctrl;
2797 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798 CLOCK_CTRL_TXCLK_DISABLE);
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802 } else if (tg3_flag(tp, 5780_CLASS) ||
2803 tg3_flag(tp, CPMU_PRESENT) ||
2804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2805 /* do nothing */
2806 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807 u32 newbits1, newbits2;
2809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812 CLOCK_CTRL_TXCLK_DISABLE |
2813 CLOCK_CTRL_ALTCLK);
2814 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 } else if (tg3_flag(tp, 5705_PLUS)) {
2816 newbits1 = CLOCK_CTRL_625_CORE;
2817 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2818 } else {
2819 newbits1 = CLOCK_CTRL_ALTCLK;
2820 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2823 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2824 40);
2826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2827 40);
2829 if (!tg3_flag(tp, 5705_PLUS)) {
2830 u32 newbits3;
2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835 CLOCK_CTRL_TXCLK_DISABLE |
2836 CLOCK_CTRL_44MHZ_CORE);
2837 } else {
2838 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2841 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842 tp->pci_clock_ctrl | newbits3, 40);
2846 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847 tg3_power_down_phy(tp, do_low_power);
2849 tg3_frob_aux_power(tp);
2851 /* Workaround for unstable PLL clock */
2852 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854 u32 val = tr32(0x7d00);
2856 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2857 tw32(0x7d00, val);
2858 if (!tg3_flag(tp, ENABLE_ASF)) {
2859 int err;
2861 err = tg3_nvram_lock(tp);
2862 tg3_halt_cpu(tp, RX_CPU_BASE);
2863 if (!err)
2864 tg3_nvram_unlock(tp);
2868 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2870 return 0;
2873 static void tg3_power_down(struct tg3 *tp)
2875 tg3_power_down_prepare(tp);
2877 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878 pci_set_power_state(tp->pdev, PCI_D3hot);
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2883 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884 case MII_TG3_AUX_STAT_10HALF:
2885 *speed = SPEED_10;
2886 *duplex = DUPLEX_HALF;
2887 break;
2889 case MII_TG3_AUX_STAT_10FULL:
2890 *speed = SPEED_10;
2891 *duplex = DUPLEX_FULL;
2892 break;
2894 case MII_TG3_AUX_STAT_100HALF:
2895 *speed = SPEED_100;
2896 *duplex = DUPLEX_HALF;
2897 break;
2899 case MII_TG3_AUX_STAT_100FULL:
2900 *speed = SPEED_100;
2901 *duplex = DUPLEX_FULL;
2902 break;
2904 case MII_TG3_AUX_STAT_1000HALF:
2905 *speed = SPEED_1000;
2906 *duplex = DUPLEX_HALF;
2907 break;
2909 case MII_TG3_AUX_STAT_1000FULL:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_FULL;
2912 break;
2914 default:
2915 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2917 SPEED_10;
2918 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2919 DUPLEX_HALF;
2920 break;
2922 *speed = SPEED_INVALID;
2923 *duplex = DUPLEX_INVALID;
2924 break;
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2930 u32 new_adv;
2931 int i;
2933 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934 /* Entering low power mode. Disable gigabit and
2935 * 100baseT advertisements.
2937 tg3_writephy(tp, MII_TG3_CTRL, 0);
2939 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941 if (tg3_flag(tp, WOL_SPEED_100MB))
2942 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2944 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945 } else if (tp->link_config.speed == SPEED_INVALID) {
2946 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947 tp->link_config.advertising &=
2948 ~(ADVERTISED_1000baseT_Half |
2949 ADVERTISED_1000baseT_Full);
2951 new_adv = ADVERTISE_CSMA;
2952 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953 new_adv |= ADVERTISE_10HALF;
2954 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955 new_adv |= ADVERTISE_10FULL;
2956 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957 new_adv |= ADVERTISE_100HALF;
2958 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959 new_adv |= ADVERTISE_100FULL;
2961 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2963 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2965 if (tp->link_config.advertising &
2966 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2967 new_adv = 0;
2968 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976 MII_TG3_CTRL_ENABLE_AS_MASTER);
2977 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978 } else {
2979 tg3_writephy(tp, MII_TG3_CTRL, 0);
2981 } else {
2982 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983 new_adv |= ADVERTISE_CSMA;
2985 /* Asking for a specific link mode. */
2986 if (tp->link_config.speed == SPEED_1000) {
2987 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989 if (tp->link_config.duplex == DUPLEX_FULL)
2990 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2991 else
2992 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996 MII_TG3_CTRL_ENABLE_AS_MASTER);
2997 } else {
2998 if (tp->link_config.speed == SPEED_100) {
2999 if (tp->link_config.duplex == DUPLEX_FULL)
3000 new_adv |= ADVERTISE_100FULL;
3001 else
3002 new_adv |= ADVERTISE_100HALF;
3003 } else {
3004 if (tp->link_config.duplex == DUPLEX_FULL)
3005 new_adv |= ADVERTISE_10FULL;
3006 else
3007 new_adv |= ADVERTISE_10HALF;
3009 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3011 new_adv = 0;
3014 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3017 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3018 u32 val;
3020 tw32(TG3_CPMU_EEE_MODE,
3021 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3025 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3026 case ASIC_REV_5717:
3027 case ASIC_REV_57765:
3028 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030 MII_TG3_DSP_CH34TP2_HIBW01);
3031 /* Fall through */
3032 case ASIC_REV_5719:
3033 val = MII_TG3_DSP_TAP26_ALNOKO |
3034 MII_TG3_DSP_TAP26_RMRXSTO |
3035 MII_TG3_DSP_TAP26_OPCSINPT;
3036 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3039 val = 0;
3040 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041 /* Advertise 100-BaseTX EEE ability */
3042 if (tp->link_config.advertising &
3043 ADVERTISED_100baseT_Full)
3044 val |= MDIO_AN_EEE_ADV_100TX;
3045 /* Advertise 1000-BaseT EEE ability */
3046 if (tp->link_config.advertising &
3047 ADVERTISED_1000baseT_Full)
3048 val |= MDIO_AN_EEE_ADV_1000T;
3050 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3055 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056 tp->link_config.speed != SPEED_INVALID) {
3057 u32 bmcr, orig_bmcr;
3059 tp->link_config.active_speed = tp->link_config.speed;
3060 tp->link_config.active_duplex = tp->link_config.duplex;
3062 bmcr = 0;
3063 switch (tp->link_config.speed) {
3064 default:
3065 case SPEED_10:
3066 break;
3068 case SPEED_100:
3069 bmcr |= BMCR_SPEED100;
3070 break;
3072 case SPEED_1000:
3073 bmcr |= TG3_BMCR_SPEED1000;
3074 break;
3077 if (tp->link_config.duplex == DUPLEX_FULL)
3078 bmcr |= BMCR_FULLDPLX;
3080 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081 (bmcr != orig_bmcr)) {
3082 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083 for (i = 0; i < 1500; i++) {
3084 u32 tmp;
3086 udelay(10);
3087 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088 tg3_readphy(tp, MII_BMSR, &tmp))
3089 continue;
3090 if (!(tmp & BMSR_LSTATUS)) {
3091 udelay(40);
3092 break;
3095 tg3_writephy(tp, MII_BMCR, bmcr);
3096 udelay(40);
3098 } else {
3099 tg3_writephy(tp, MII_BMCR,
3100 BMCR_ANENABLE | BMCR_ANRESTART);
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3106 int err;
3108 /* Turn off tap power management. */
3109 /* Set Extended packet length bit */
3110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3118 udelay(40);
3120 return err;
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3125 u32 adv_reg, all_mask = 0;
3127 if (mask & ADVERTISED_10baseT_Half)
3128 all_mask |= ADVERTISE_10HALF;
3129 if (mask & ADVERTISED_10baseT_Full)
3130 all_mask |= ADVERTISE_10FULL;
3131 if (mask & ADVERTISED_100baseT_Half)
3132 all_mask |= ADVERTISE_100HALF;
3133 if (mask & ADVERTISED_100baseT_Full)
3134 all_mask |= ADVERTISE_100FULL;
3136 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3137 return 0;
3139 if ((adv_reg & all_mask) != all_mask)
3140 return 0;
3141 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3142 u32 tg3_ctrl;
3144 all_mask = 0;
3145 if (mask & ADVERTISED_1000baseT_Half)
3146 all_mask |= ADVERTISE_1000HALF;
3147 if (mask & ADVERTISED_1000baseT_Full)
3148 all_mask |= ADVERTISE_1000FULL;
3150 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3151 return 0;
3153 if ((tg3_ctrl & all_mask) != all_mask)
3154 return 0;
3156 return 1;
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3161 u32 curadv, reqadv;
3163 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3164 return 1;
3166 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3169 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170 if (curadv != reqadv)
3171 return 0;
3173 if (tg3_flag(tp, PAUSE_AUTONEG))
3174 tg3_readphy(tp, MII_LPA, rmtadv);
3175 } else {
3176 /* Reprogram the advertisement register, even if it
3177 * does not affect the current link. If the link
3178 * gets renegotiated in the future, we can save an
3179 * additional renegotiation cycle by advertising
3180 * it correctly in the first place.
3182 if (curadv != reqadv) {
3183 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184 ADVERTISE_PAUSE_ASYM);
3185 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3189 return 1;
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3194 int current_link_up;
3195 u32 bmsr, val;
3196 u32 lcl_adv, rmt_adv;
3197 u16 current_speed;
3198 u8 current_duplex;
3199 int i, err;
3201 tw32(MAC_EVENT, 0);
3203 tw32_f(MAC_STATUS,
3204 (MAC_STATUS_SYNC_CHANGED |
3205 MAC_STATUS_CFG_CHANGED |
3206 MAC_STATUS_MI_COMPLETION |
3207 MAC_STATUS_LNKSTATE_CHANGED));
3208 udelay(40);
3210 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3211 tw32_f(MAC_MI_MODE,
3212 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3213 udelay(80);
3216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3218 /* Some third-party PHYs need to be reset on link going
3219 * down.
3221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224 netif_carrier_ok(tp->dev)) {
3225 tg3_readphy(tp, MII_BMSR, &bmsr);
3226 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227 !(bmsr & BMSR_LSTATUS))
3228 force_reset = 1;
3230 if (force_reset)
3231 tg3_phy_reset(tp);
3233 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234 tg3_readphy(tp, MII_BMSR, &bmsr);
3235 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236 !tg3_flag(tp, INIT_COMPLETE))
3237 bmsr = 0;
3239 if (!(bmsr & BMSR_LSTATUS)) {
3240 err = tg3_init_5401phy_dsp(tp);
3241 if (err)
3242 return err;
3244 tg3_readphy(tp, MII_BMSR, &bmsr);
3245 for (i = 0; i < 1000; i++) {
3246 udelay(10);
3247 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248 (bmsr & BMSR_LSTATUS)) {
3249 udelay(40);
3250 break;
3254 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255 TG3_PHY_REV_BCM5401_B0 &&
3256 !(bmsr & BMSR_LSTATUS) &&
3257 tp->link_config.active_speed == SPEED_1000) {
3258 err = tg3_phy_reset(tp);
3259 if (!err)
3260 err = tg3_init_5401phy_dsp(tp);
3261 if (err)
3262 return err;
3265 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267 /* 5701 {A0,B0} CRC bug workaround */
3268 tg3_writephy(tp, 0x15, 0x0a75);
3269 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274 /* Clear pending interrupts... */
3275 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3278 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3288 else
3289 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3292 current_link_up = 0;
3293 current_speed = SPEED_INVALID;
3294 current_duplex = DUPLEX_INVALID;
3296 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297 err = tg3_phy_auxctl_read(tp,
3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3299 &val);
3300 if (!err && !(val & (1 << 10))) {
3301 tg3_phy_auxctl_write(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303 val | (1 << 10));
3304 goto relink;
3308 bmsr = 0;
3309 for (i = 0; i < 100; i++) {
3310 tg3_readphy(tp, MII_BMSR, &bmsr);
3311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312 (bmsr & BMSR_LSTATUS))
3313 break;
3314 udelay(40);
3317 if (bmsr & BMSR_LSTATUS) {
3318 u32 aux_stat, bmcr;
3320 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321 for (i = 0; i < 2000; i++) {
3322 udelay(10);
3323 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3324 aux_stat)
3325 break;
3328 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3329 &current_speed,
3330 &current_duplex);
3332 bmcr = 0;
3333 for (i = 0; i < 200; i++) {
3334 tg3_readphy(tp, MII_BMCR, &bmcr);
3335 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3336 continue;
3337 if (bmcr && bmcr != 0x7fff)
3338 break;
3339 udelay(10);
3342 lcl_adv = 0;
3343 rmt_adv = 0;
3345 tp->link_config.active_speed = current_speed;
3346 tp->link_config.active_duplex = current_duplex;
3348 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349 if ((bmcr & BMCR_ANENABLE) &&
3350 tg3_copper_is_advertising_all(tp,
3351 tp->link_config.advertising)) {
3352 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3353 &rmt_adv))
3354 current_link_up = 1;
3356 } else {
3357 if (!(bmcr & BMCR_ANENABLE) &&
3358 tp->link_config.speed == current_speed &&
3359 tp->link_config.duplex == current_duplex &&
3360 tp->link_config.flowctrl ==
3361 tp->link_config.active_flowctrl) {
3362 current_link_up = 1;
3366 if (current_link_up == 1 &&
3367 tp->link_config.active_duplex == DUPLEX_FULL)
3368 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3371 relink:
3372 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373 tg3_phy_copper_begin(tp);
3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3376 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3377 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3378 current_link_up = 1;
3381 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382 if (current_link_up == 1) {
3383 if (tp->link_config.active_speed == SPEED_100 ||
3384 tp->link_config.active_speed == SPEED_10)
3385 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3386 else
3387 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390 else
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394 if (tp->link_config.active_duplex == DUPLEX_HALF)
3395 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398 if (current_link_up == 1 &&
3399 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3401 else
3402 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3405 /* ??? Without this setting Netgear GA302T PHY does not
3406 * ??? send/receive packets...
3408 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411 tw32_f(MAC_MI_MODE, tp->mi_mode);
3412 udelay(80);
3415 tw32_f(MAC_MODE, tp->mac_mode);
3416 udelay(40);
3418 tg3_phy_eee_adjust(tp, current_link_up);
3420 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421 /* Polled via timer. */
3422 tw32_f(MAC_EVENT, 0);
3423 } else {
3424 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3426 udelay(40);
3428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429 current_link_up == 1 &&
3430 tp->link_config.active_speed == SPEED_1000 &&
3431 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3432 udelay(120);
3433 tw32_f(MAC_STATUS,
3434 (MAC_STATUS_SYNC_CHANGED |
3435 MAC_STATUS_CFG_CHANGED));
3436 udelay(40);
3437 tg3_write_mem(tp,
3438 NIC_SRAM_FIRMWARE_MBOX,
3439 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3442 /* Prevent send BD corruption. */
3443 if (tg3_flag(tp, CLKREQ_BUG)) {
3444 u16 oldlnkctl, newlnkctl;
3446 pci_read_config_word(tp->pdev,
3447 tp->pcie_cap + PCI_EXP_LNKCTL,
3448 &oldlnkctl);
3449 if (tp->link_config.active_speed == SPEED_100 ||
3450 tp->link_config.active_speed == SPEED_10)
3451 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3452 else
3453 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454 if (newlnkctl != oldlnkctl)
3455 pci_write_config_word(tp->pdev,
3456 tp->pcie_cap + PCI_EXP_LNKCTL,
3457 newlnkctl);
3460 if (current_link_up != netif_carrier_ok(tp->dev)) {
3461 if (current_link_up)
3462 netif_carrier_on(tp->dev);
3463 else
3464 netif_carrier_off(tp->dev);
3465 tg3_link_report(tp);
3468 return 0;
3471 struct tg3_fiber_aneginfo {
3472 int state;
3473 #define ANEG_STATE_UNKNOWN 0
3474 #define ANEG_STATE_AN_ENABLE 1
3475 #define ANEG_STATE_RESTART_INIT 2
3476 #define ANEG_STATE_RESTART 3
3477 #define ANEG_STATE_DISABLE_LINK_OK 4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3479 #define ANEG_STATE_ABILITY_DETECT 6
3480 #define ANEG_STATE_ACK_DETECT_INIT 7
3481 #define ANEG_STATE_ACK_DETECT 8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3483 #define ANEG_STATE_COMPLETE_ACK 10
3484 #define ANEG_STATE_IDLE_DETECT_INIT 11
3485 #define ANEG_STATE_IDLE_DETECT 12
3486 #define ANEG_STATE_LINK_OK 13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3490 u32 flags;
3491 #define MR_AN_ENABLE 0x00000001
3492 #define MR_RESTART_AN 0x00000002
3493 #define MR_AN_COMPLETE 0x00000004
3494 #define MR_PAGE_RX 0x00000008
3495 #define MR_NP_LOADED 0x00000010
3496 #define MR_TOGGLE_TX 0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3504 #define MR_TOGGLE_RX 0x00002000
3505 #define MR_NP_RX 0x00004000
3507 #define MR_LINK_OK 0x80000000
3509 unsigned long link_time, cur_time;
3511 u32 ability_match_cfg;
3512 int ability_match_count;
3514 char ability_match, idle_match, ack_match;
3516 u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP 0x00000080
3518 #define ANEG_CFG_ACK 0x00000040
3519 #define ANEG_CFG_RF2 0x00000020
3520 #define ANEG_CFG_RF1 0x00000010
3521 #define ANEG_CFG_PS2 0x00000001
3522 #define ANEG_CFG_PS1 0x00008000
3523 #define ANEG_CFG_HD 0x00004000
3524 #define ANEG_CFG_FD 0x00002000
3525 #define ANEG_CFG_INVAL 0x00001f06
3528 #define ANEG_OK 0
3529 #define ANEG_DONE 1
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED -1
3533 #define ANEG_STATE_SETTLE_TIME 10000
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536 struct tg3_fiber_aneginfo *ap)
3538 u16 flowctrl;
3539 unsigned long delta;
3540 u32 rx_cfg_reg;
3541 int ret;
3543 if (ap->state == ANEG_STATE_UNKNOWN) {
3544 ap->rxconfig = 0;
3545 ap->link_time = 0;
3546 ap->cur_time = 0;
3547 ap->ability_match_cfg = 0;
3548 ap->ability_match_count = 0;
3549 ap->ability_match = 0;
3550 ap->idle_match = 0;
3551 ap->ack_match = 0;
3553 ap->cur_time++;
3555 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3558 if (rx_cfg_reg != ap->ability_match_cfg) {
3559 ap->ability_match_cfg = rx_cfg_reg;
3560 ap->ability_match = 0;
3561 ap->ability_match_count = 0;
3562 } else {
3563 if (++ap->ability_match_count > 1) {
3564 ap->ability_match = 1;
3565 ap->ability_match_cfg = rx_cfg_reg;
3568 if (rx_cfg_reg & ANEG_CFG_ACK)
3569 ap->ack_match = 1;
3570 else
3571 ap->ack_match = 0;
3573 ap->idle_match = 0;
3574 } else {
3575 ap->idle_match = 1;
3576 ap->ability_match_cfg = 0;
3577 ap->ability_match_count = 0;
3578 ap->ability_match = 0;
3579 ap->ack_match = 0;
3581 rx_cfg_reg = 0;
3584 ap->rxconfig = rx_cfg_reg;
3585 ret = ANEG_OK;
3587 switch (ap->state) {
3588 case ANEG_STATE_UNKNOWN:
3589 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590 ap->state = ANEG_STATE_AN_ENABLE;
3592 /* fallthru */
3593 case ANEG_STATE_AN_ENABLE:
3594 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595 if (ap->flags & MR_AN_ENABLE) {
3596 ap->link_time = 0;
3597 ap->cur_time = 0;
3598 ap->ability_match_cfg = 0;
3599 ap->ability_match_count = 0;
3600 ap->ability_match = 0;
3601 ap->idle_match = 0;
3602 ap->ack_match = 0;
3604 ap->state = ANEG_STATE_RESTART_INIT;
3605 } else {
3606 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3608 break;
3610 case ANEG_STATE_RESTART_INIT:
3611 ap->link_time = ap->cur_time;
3612 ap->flags &= ~(MR_NP_LOADED);
3613 ap->txconfig = 0;
3614 tw32(MAC_TX_AUTO_NEG, 0);
3615 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616 tw32_f(MAC_MODE, tp->mac_mode);
3617 udelay(40);
3619 ret = ANEG_TIMER_ENAB;
3620 ap->state = ANEG_STATE_RESTART;
3622 /* fallthru */
3623 case ANEG_STATE_RESTART:
3624 delta = ap->cur_time - ap->link_time;
3625 if (delta > ANEG_STATE_SETTLE_TIME)
3626 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3627 else
3628 ret = ANEG_TIMER_ENAB;
3629 break;
3631 case ANEG_STATE_DISABLE_LINK_OK:
3632 ret = ANEG_DONE;
3633 break;
3635 case ANEG_STATE_ABILITY_DETECT_INIT:
3636 ap->flags &= ~(MR_TOGGLE_TX);
3637 ap->txconfig = ANEG_CFG_FD;
3638 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639 if (flowctrl & ADVERTISE_1000XPAUSE)
3640 ap->txconfig |= ANEG_CFG_PS1;
3641 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642 ap->txconfig |= ANEG_CFG_PS2;
3643 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3646 udelay(40);
3648 ap->state = ANEG_STATE_ABILITY_DETECT;
3649 break;
3651 case ANEG_STATE_ABILITY_DETECT:
3652 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3654 break;
3656 case ANEG_STATE_ACK_DETECT_INIT:
3657 ap->txconfig |= ANEG_CFG_ACK;
3658 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660 tw32_f(MAC_MODE, tp->mac_mode);
3661 udelay(40);
3663 ap->state = ANEG_STATE_ACK_DETECT;
3665 /* fallthru */
3666 case ANEG_STATE_ACK_DETECT:
3667 if (ap->ack_match != 0) {
3668 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3671 } else {
3672 ap->state = ANEG_STATE_AN_ENABLE;
3674 } else if (ap->ability_match != 0 &&
3675 ap->rxconfig == 0) {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3678 break;
3680 case ANEG_STATE_COMPLETE_ACK_INIT:
3681 if (ap->rxconfig & ANEG_CFG_INVAL) {
3682 ret = ANEG_FAILED;
3683 break;
3685 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686 MR_LP_ADV_HALF_DUPLEX |
3687 MR_LP_ADV_SYM_PAUSE |
3688 MR_LP_ADV_ASYM_PAUSE |
3689 MR_LP_ADV_REMOTE_FAULT1 |
3690 MR_LP_ADV_REMOTE_FAULT2 |
3691 MR_LP_ADV_NEXT_PAGE |
3692 MR_TOGGLE_RX |
3693 MR_NP_RX);
3694 if (ap->rxconfig & ANEG_CFG_FD)
3695 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696 if (ap->rxconfig & ANEG_CFG_HD)
3697 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698 if (ap->rxconfig & ANEG_CFG_PS1)
3699 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700 if (ap->rxconfig & ANEG_CFG_PS2)
3701 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702 if (ap->rxconfig & ANEG_CFG_RF1)
3703 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704 if (ap->rxconfig & ANEG_CFG_RF2)
3705 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706 if (ap->rxconfig & ANEG_CFG_NP)
3707 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3709 ap->link_time = ap->cur_time;
3711 ap->flags ^= (MR_TOGGLE_TX);
3712 if (ap->rxconfig & 0x0008)
3713 ap->flags |= MR_TOGGLE_RX;
3714 if (ap->rxconfig & ANEG_CFG_NP)
3715 ap->flags |= MR_NP_RX;
3716 ap->flags |= MR_PAGE_RX;
3718 ap->state = ANEG_STATE_COMPLETE_ACK;
3719 ret = ANEG_TIMER_ENAB;
3720 break;
3722 case ANEG_STATE_COMPLETE_ACK:
3723 if (ap->ability_match != 0 &&
3724 ap->rxconfig == 0) {
3725 ap->state = ANEG_STATE_AN_ENABLE;
3726 break;
3728 delta = ap->cur_time - ap->link_time;
3729 if (delta > ANEG_STATE_SETTLE_TIME) {
3730 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3732 } else {
3733 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734 !(ap->flags & MR_NP_RX)) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736 } else {
3737 ret = ANEG_FAILED;
3741 break;
3743 case ANEG_STATE_IDLE_DETECT_INIT:
3744 ap->link_time = ap->cur_time;
3745 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746 tw32_f(MAC_MODE, tp->mac_mode);
3747 udelay(40);
3749 ap->state = ANEG_STATE_IDLE_DETECT;
3750 ret = ANEG_TIMER_ENAB;
3751 break;
3753 case ANEG_STATE_IDLE_DETECT:
3754 if (ap->ability_match != 0 &&
3755 ap->rxconfig == 0) {
3756 ap->state = ANEG_STATE_AN_ENABLE;
3757 break;
3759 delta = ap->cur_time - ap->link_time;
3760 if (delta > ANEG_STATE_SETTLE_TIME) {
3761 /* XXX another gem from the Broadcom driver :( */
3762 ap->state = ANEG_STATE_LINK_OK;
3764 break;
3766 case ANEG_STATE_LINK_OK:
3767 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3768 ret = ANEG_DONE;
3769 break;
3771 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772 /* ??? unimplemented */
3773 break;
3775 case ANEG_STATE_NEXT_PAGE_WAIT:
3776 /* ??? unimplemented */
3777 break;
3779 default:
3780 ret = ANEG_FAILED;
3781 break;
3784 return ret;
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3789 int res = 0;
3790 struct tg3_fiber_aneginfo aninfo;
3791 int status = ANEG_FAILED;
3792 unsigned int tick;
3793 u32 tmp;
3795 tw32_f(MAC_TX_AUTO_NEG, 0);
3797 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3799 udelay(40);
3801 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3802 udelay(40);
3804 memset(&aninfo, 0, sizeof(aninfo));
3805 aninfo.flags |= MR_AN_ENABLE;
3806 aninfo.state = ANEG_STATE_UNKNOWN;
3807 aninfo.cur_time = 0;
3808 tick = 0;
3809 while (++tick < 195000) {
3810 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811 if (status == ANEG_DONE || status == ANEG_FAILED)
3812 break;
3814 udelay(1);
3817 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818 tw32_f(MAC_MODE, tp->mac_mode);
3819 udelay(40);
3821 *txflags = aninfo.txconfig;
3822 *rxflags = aninfo.flags;
3824 if (status == ANEG_DONE &&
3825 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826 MR_LP_ADV_FULL_DUPLEX)))
3827 res = 1;
3829 return res;
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3834 u32 mac_status = tr32(MAC_STATUS);
3835 int i;
3837 /* Reset when initting first time or we have a link. */
3838 if (tg3_flag(tp, INIT_COMPLETE) &&
3839 !(mac_status & MAC_STATUS_PCS_SYNCED))
3840 return;
3842 /* Set PLL lock range. */
3843 tg3_writephy(tp, 0x16, 0x8007);
3845 /* SW reset */
3846 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3848 /* Wait for reset to complete. */
3849 /* XXX schedule_timeout() ... */
3850 for (i = 0; i < 500; i++)
3851 udelay(10);
3853 /* Config mode; select PMA/Ch 1 regs. */
3854 tg3_writephy(tp, 0x10, 0x8411);
3856 /* Enable auto-lock and comdet, select txclk for tx. */
3857 tg3_writephy(tp, 0x11, 0x0a10);
3859 tg3_writephy(tp, 0x18, 0x00a0);
3860 tg3_writephy(tp, 0x16, 0x41ff);
3862 /* Assert and deassert POR. */
3863 tg3_writephy(tp, 0x13, 0x0400);
3864 udelay(40);
3865 tg3_writephy(tp, 0x13, 0x0000);
3867 tg3_writephy(tp, 0x11, 0x0a50);
3868 udelay(40);
3869 tg3_writephy(tp, 0x11, 0x0a10);
3871 /* Wait for signal to stabilize */
3872 /* XXX schedule_timeout() ... */
3873 for (i = 0; i < 15000; i++)
3874 udelay(10);
3876 /* Deselect the channel register so we can read the PHYID
3877 * later.
3879 tg3_writephy(tp, 0x10, 0x8011);
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3884 u16 flowctrl;
3885 u32 sg_dig_ctrl, sg_dig_status;
3886 u32 serdes_cfg, expected_sg_dig_ctrl;
3887 int workaround, port_a;
3888 int current_link_up;
3890 serdes_cfg = 0;
3891 expected_sg_dig_ctrl = 0;
3892 workaround = 0;
3893 port_a = 1;
3894 current_link_up = 0;
3896 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3898 workaround = 1;
3899 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3900 port_a = 0;
3902 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903 /* preserve bits 20-23 for voltage regulator */
3904 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3907 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3909 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3911 if (workaround) {
3912 u32 val = serdes_cfg;
3914 if (port_a)
3915 val |= 0xc010000;
3916 else
3917 val |= 0x4010000;
3918 tw32_f(MAC_SERDES_CFG, val);
3921 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3923 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924 tg3_setup_flow_control(tp, 0, 0);
3925 current_link_up = 1;
3927 goto out;
3930 /* Want auto-negotiation. */
3931 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3933 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934 if (flowctrl & ADVERTISE_1000XPAUSE)
3935 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3939 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941 tp->serdes_counter &&
3942 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943 MAC_STATUS_RCVD_CFG)) ==
3944 MAC_STATUS_PCS_SYNCED)) {
3945 tp->serdes_counter--;
3946 current_link_up = 1;
3947 goto out;
3949 restart_autoneg:
3950 if (workaround)
3951 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3953 udelay(5);
3954 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3956 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959 MAC_STATUS_SIGNAL_DET)) {
3960 sg_dig_status = tr32(SG_DIG_STATUS);
3961 mac_status = tr32(MAC_STATUS);
3963 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965 u32 local_adv = 0, remote_adv = 0;
3967 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968 local_adv |= ADVERTISE_1000XPAUSE;
3969 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970 local_adv |= ADVERTISE_1000XPSE_ASYM;
3972 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973 remote_adv |= LPA_1000XPAUSE;
3974 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975 remote_adv |= LPA_1000XPAUSE_ASYM;
3977 tg3_setup_flow_control(tp, local_adv, remote_adv);
3978 current_link_up = 1;
3979 tp->serdes_counter = 0;
3980 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982 if (tp->serdes_counter)
3983 tp->serdes_counter--;
3984 else {
3985 if (workaround) {
3986 u32 val = serdes_cfg;
3988 if (port_a)
3989 val |= 0xc010000;
3990 else
3991 val |= 0x4010000;
3993 tw32_f(MAC_SERDES_CFG, val);
3996 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3997 udelay(40);
3999 /* Link parallel detection - link is up */
4000 /* only if we have PCS_SYNC and not */
4001 /* receiving config code words */
4002 mac_status = tr32(MAC_STATUS);
4003 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005 tg3_setup_flow_control(tp, 0, 0);
4006 current_link_up = 1;
4007 tp->phy_flags |=
4008 TG3_PHYFLG_PARALLEL_DETECT;
4009 tp->serdes_counter =
4010 SERDES_PARALLEL_DET_TIMEOUT;
4011 } else
4012 goto restart_autoneg;
4015 } else {
4016 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4020 out:
4021 return current_link_up;
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4026 int current_link_up = 0;
4028 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4029 goto out;
4031 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032 u32 txflags, rxflags;
4033 int i;
4035 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036 u32 local_adv = 0, remote_adv = 0;
4038 if (txflags & ANEG_CFG_PS1)
4039 local_adv |= ADVERTISE_1000XPAUSE;
4040 if (txflags & ANEG_CFG_PS2)
4041 local_adv |= ADVERTISE_1000XPSE_ASYM;
4043 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044 remote_adv |= LPA_1000XPAUSE;
4045 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046 remote_adv |= LPA_1000XPAUSE_ASYM;
4048 tg3_setup_flow_control(tp, local_adv, remote_adv);
4050 current_link_up = 1;
4052 for (i = 0; i < 30; i++) {
4053 udelay(20);
4054 tw32_f(MAC_STATUS,
4055 (MAC_STATUS_SYNC_CHANGED |
4056 MAC_STATUS_CFG_CHANGED));
4057 udelay(40);
4058 if ((tr32(MAC_STATUS) &
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED)) == 0)
4061 break;
4064 mac_status = tr32(MAC_STATUS);
4065 if (current_link_up == 0 &&
4066 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067 !(mac_status & MAC_STATUS_RCVD_CFG))
4068 current_link_up = 1;
4069 } else {
4070 tg3_setup_flow_control(tp, 0, 0);
4072 /* Forcing 1000FD link up. */
4073 current_link_up = 1;
4075 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4076 udelay(40);
4078 tw32_f(MAC_MODE, tp->mac_mode);
4079 udelay(40);
4082 out:
4083 return current_link_up;
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4088 u32 orig_pause_cfg;
4089 u16 orig_active_speed;
4090 u8 orig_active_duplex;
4091 u32 mac_status;
4092 int current_link_up;
4093 int i;
4095 orig_pause_cfg = tp->link_config.active_flowctrl;
4096 orig_active_speed = tp->link_config.active_speed;
4097 orig_active_duplex = tp->link_config.active_duplex;
4099 if (!tg3_flag(tp, HW_AUTONEG) &&
4100 netif_carrier_ok(tp->dev) &&
4101 tg3_flag(tp, INIT_COMPLETE)) {
4102 mac_status = tr32(MAC_STATUS);
4103 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104 MAC_STATUS_SIGNAL_DET |
4105 MAC_STATUS_CFG_CHANGED |
4106 MAC_STATUS_RCVD_CFG);
4107 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET)) {
4109 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110 MAC_STATUS_CFG_CHANGED));
4111 return 0;
4115 tw32_f(MAC_TX_AUTO_NEG, 0);
4117 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119 tw32_f(MAC_MODE, tp->mac_mode);
4120 udelay(40);
4122 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123 tg3_init_bcm8002(tp);
4125 /* Enable link change event even when serdes polling. */
4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127 udelay(40);
4129 current_link_up = 0;
4130 mac_status = tr32(MAC_STATUS);
4132 if (tg3_flag(tp, HW_AUTONEG))
4133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4134 else
4135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4137 tp->napi[0].hw_status->status =
4138 (SD_STATUS_UPDATED |
4139 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4141 for (i = 0; i < 100; i++) {
4142 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143 MAC_STATUS_CFG_CHANGED));
4144 udelay(5);
4145 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED |
4147 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4148 break;
4151 mac_status = tr32(MAC_STATUS);
4152 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153 current_link_up = 0;
4154 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155 tp->serdes_counter == 0) {
4156 tw32_f(MAC_MODE, (tp->mac_mode |
4157 MAC_MODE_SEND_CONFIGS));
4158 udelay(1);
4159 tw32_f(MAC_MODE, tp->mac_mode);
4163 if (current_link_up == 1) {
4164 tp->link_config.active_speed = SPEED_1000;
4165 tp->link_config.active_duplex = DUPLEX_FULL;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_1000MBPS_ON));
4169 } else {
4170 tp->link_config.active_speed = SPEED_INVALID;
4171 tp->link_config.active_duplex = DUPLEX_INVALID;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_TRAFFIC_OVERRIDE));
4177 if (current_link_up != netif_carrier_ok(tp->dev)) {
4178 if (current_link_up)
4179 netif_carrier_on(tp->dev);
4180 else
4181 netif_carrier_off(tp->dev);
4182 tg3_link_report(tp);
4183 } else {
4184 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185 if (orig_pause_cfg != now_pause_cfg ||
4186 orig_active_speed != tp->link_config.active_speed ||
4187 orig_active_duplex != tp->link_config.active_duplex)
4188 tg3_link_report(tp);
4191 return 0;
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4196 int current_link_up, err = 0;
4197 u32 bmsr, bmcr;
4198 u16 current_speed;
4199 u8 current_duplex;
4200 u32 local_adv, remote_adv;
4202 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203 tw32_f(MAC_MODE, tp->mac_mode);
4204 udelay(40);
4206 tw32(MAC_EVENT, 0);
4208 tw32_f(MAC_STATUS,
4209 (MAC_STATUS_SYNC_CHANGED |
4210 MAC_STATUS_CFG_CHANGED |
4211 MAC_STATUS_MI_COMPLETION |
4212 MAC_STATUS_LNKSTATE_CHANGED));
4213 udelay(40);
4215 if (force_reset)
4216 tg3_phy_reset(tp);
4218 current_link_up = 0;
4219 current_speed = SPEED_INVALID;
4220 current_duplex = DUPLEX_INVALID;
4222 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226 bmsr |= BMSR_LSTATUS;
4227 else
4228 bmsr &= ~BMSR_LSTATUS;
4231 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4233 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235 /* do nothing, just check for link up at the end */
4236 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4237 u32 adv, new_adv;
4239 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241 ADVERTISE_1000XPAUSE |
4242 ADVERTISE_1000XPSE_ASYM |
4243 ADVERTISE_SLCT);
4245 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4247 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248 new_adv |= ADVERTISE_1000XHALF;
4249 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250 new_adv |= ADVERTISE_1000XFULL;
4252 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255 tg3_writephy(tp, MII_BMCR, bmcr);
4257 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4261 return err;
4263 } else {
4264 u32 new_bmcr;
4266 bmcr &= ~BMCR_SPEED1000;
4267 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4269 if (tp->link_config.duplex == DUPLEX_FULL)
4270 new_bmcr |= BMCR_FULLDPLX;
4272 if (new_bmcr != bmcr) {
4273 /* BMCR_SPEED1000 is a reserved bit that needs
4274 * to be set on write.
4276 new_bmcr |= BMCR_SPEED1000;
4278 /* Force a linkdown */
4279 if (netif_carrier_ok(tp->dev)) {
4280 u32 adv;
4282 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283 adv &= ~(ADVERTISE_1000XFULL |
4284 ADVERTISE_1000XHALF |
4285 ADVERTISE_SLCT);
4286 tg3_writephy(tp, MII_ADVERTISE, adv);
4287 tg3_writephy(tp, MII_BMCR, bmcr |
4288 BMCR_ANRESTART |
4289 BMCR_ANENABLE);
4290 udelay(10);
4291 netif_carrier_off(tp->dev);
4293 tg3_writephy(tp, MII_BMCR, new_bmcr);
4294 bmcr = new_bmcr;
4295 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4298 ASIC_REV_5714) {
4299 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300 bmsr |= BMSR_LSTATUS;
4301 else
4302 bmsr &= ~BMSR_LSTATUS;
4304 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4308 if (bmsr & BMSR_LSTATUS) {
4309 current_speed = SPEED_1000;
4310 current_link_up = 1;
4311 if (bmcr & BMCR_FULLDPLX)
4312 current_duplex = DUPLEX_FULL;
4313 else
4314 current_duplex = DUPLEX_HALF;
4316 local_adv = 0;
4317 remote_adv = 0;
4319 if (bmcr & BMCR_ANENABLE) {
4320 u32 common;
4322 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324 common = local_adv & remote_adv;
4325 if (common & (ADVERTISE_1000XHALF |
4326 ADVERTISE_1000XFULL)) {
4327 if (common & ADVERTISE_1000XFULL)
4328 current_duplex = DUPLEX_FULL;
4329 else
4330 current_duplex = DUPLEX_HALF;
4331 } else if (!tg3_flag(tp, 5780_CLASS)) {
4332 /* Link is up via parallel detect */
4333 } else {
4334 current_link_up = 0;
4339 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340 tg3_setup_flow_control(tp, local_adv, remote_adv);
4342 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343 if (tp->link_config.active_duplex == DUPLEX_HALF)
4344 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4346 tw32_f(MAC_MODE, tp->mac_mode);
4347 udelay(40);
4349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4351 tp->link_config.active_speed = current_speed;
4352 tp->link_config.active_duplex = current_duplex;
4354 if (current_link_up != netif_carrier_ok(tp->dev)) {
4355 if (current_link_up)
4356 netif_carrier_on(tp->dev);
4357 else {
4358 netif_carrier_off(tp->dev);
4359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4361 tg3_link_report(tp);
4363 return err;
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4368 if (tp->serdes_counter) {
4369 /* Give autoneg time to complete. */
4370 tp->serdes_counter--;
4371 return;
4374 if (!netif_carrier_ok(tp->dev) &&
4375 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4376 u32 bmcr;
4378 tg3_readphy(tp, MII_BMCR, &bmcr);
4379 if (bmcr & BMCR_ANENABLE) {
4380 u32 phy1, phy2;
4382 /* Select shadow register 0x1f */
4383 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4386 /* Select expansion interrupt status register */
4387 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388 MII_TG3_DSP_EXP1_INT_STAT);
4389 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4392 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393 /* We have signal detect and not receiving
4394 * config code words, link is up by parallel
4395 * detection.
4398 bmcr &= ~BMCR_ANENABLE;
4399 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400 tg3_writephy(tp, MII_BMCR, bmcr);
4401 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4404 } else if (netif_carrier_ok(tp->dev) &&
4405 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4407 u32 phy2;
4409 /* Select expansion interrupt status register */
4410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411 MII_TG3_DSP_EXP1_INT_STAT);
4412 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4413 if (phy2 & 0x20) {
4414 u32 bmcr;
4416 /* Config code words received, turn on autoneg. */
4417 tg3_readphy(tp, MII_BMCR, &bmcr);
4418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4420 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4428 u32 val;
4429 int err;
4431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432 err = tg3_setup_fiber_phy(tp, force_reset);
4433 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4435 else
4436 err = tg3_setup_copper_phy(tp, force_reset);
4438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4439 u32 scale;
4441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4443 scale = 65;
4444 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4445 scale = 6;
4446 else
4447 scale = 12;
4449 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451 tw32(GRC_MISC_CFG, val);
4454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455 (6 << TX_LENGTHS_IPG_SHIFT);
4456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457 val |= tr32(MAC_TX_LENGTHS) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459 TX_LENGTHS_CNT_DWN_VAL_MSK);
4461 if (tp->link_config.active_speed == SPEED_1000 &&
4462 tp->link_config.active_duplex == DUPLEX_HALF)
4463 tw32(MAC_TX_LENGTHS, val |
4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4465 else
4466 tw32(MAC_TX_LENGTHS, val |
4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4469 if (!tg3_flag(tp, 5705_PLUS)) {
4470 if (netif_carrier_ok(tp->dev)) {
4471 tw32(HOSTCC_STAT_COAL_TICKS,
4472 tp->coal.stats_block_coalesce_usecs);
4473 } else {
4474 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479 val = tr32(PCIE_PWR_MGMT_THRESH);
4480 if (!netif_carrier_ok(tp->dev))
4481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4482 tp->pwrmgmt_thresh;
4483 else
4484 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485 tw32(PCIE_PWR_MGMT_THRESH, val);
4488 return err;
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4493 return tp->irq_sync;
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4498 int i;
4500 dst = (u32 *)((u8 *)dst + off);
4501 for (i = 0; i < len; i += sizeof(u32))
4502 *dst++ = tr32(off + i);
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4527 if (tg3_flag(tp, SUPPORT_MSIX))
4528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4539 if (!tg3_flag(tp, 5705_PLUS)) {
4540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4551 if (tg3_flag(tp, NVRAM))
4552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4555 static void tg3_dump_state(struct tg3 *tp)
4557 int i;
4558 u32 *regs;
4560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561 if (!regs) {
4562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563 return;
4566 if (tg3_flag(tp, PCI_EXPRESS)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569 regs[i / sizeof(u32)] = tr32(i);
4570 } else
4571 tg3_dump_legacy_regs(tp, regs);
4573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574 if (!regs[i + 0] && !regs[i + 1] &&
4575 !regs[i + 2] && !regs[i + 3])
4576 continue;
4578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579 i * 4,
4580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4583 kfree(regs);
4585 for (i = 0; i < tp->irq_cnt; i++) {
4586 struct tg3_napi *tnapi = &tp->napi[i];
4588 /* SW status block */
4589 netdev_err(tp->dev,
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4592 tnapi->hw_status->status,
4593 tnapi->hw_status->status_tag,
4594 tnapi->hw_status->rx_jumbo_consumer,
4595 tnapi->hw_status->rx_consumer,
4596 tnapi->hw_status->rx_mini_consumer,
4597 tnapi->hw_status->idx[0].rx_producer,
4598 tnapi->hw_status->idx[0].tx_consumer);
4600 netdev_err(tp->dev,
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4603 tnapi->last_tag, tnapi->last_irq_tag,
4604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605 tnapi->rx_rcb_ptr,
4606 tnapi->prodring.rx_std_prod_idx,
4607 tnapi->prodring.rx_std_cons_idx,
4608 tnapi->prodring.rx_jmb_prod_idx,
4609 tnapi->prodring.rx_jmb_cons_idx);
4613 /* This is called whenever we suspect that the system chipset is re-
4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615 * is bogus tx completions. We try to recover by setting the
4616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4617 * in the workqueue.
4619 static void tg3_tx_recover(struct tg3 *tp)
4621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4624 netdev_warn(tp->dev,
4625 "The system may be re-ordering memory-mapped I/O "
4626 "cycles to the network device, attempting to recover. "
4627 "Please report the problem to the driver maintainer "
4628 "and include system chipset information.\n");
4630 spin_lock(&tp->lock);
4631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632 spin_unlock(&tp->lock);
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4637 /* Tell compiler to fetch tx indices from memory. */
4638 barrier();
4639 return tnapi->tx_pending -
4640 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4643 /* Tigon3 never reports partial packet sends. So we do not
4644 * need special logic to handle SKBs that have not had all
4645 * of their frags sent yet, like SunGEM does.
4647 static void tg3_tx(struct tg3_napi *tnapi)
4649 struct tg3 *tp = tnapi->tp;
4650 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651 u32 sw_idx = tnapi->tx_cons;
4652 struct netdev_queue *txq;
4653 int index = tnapi - tp->napi;
4655 if (tg3_flag(tp, ENABLE_TSS))
4656 index--;
4658 txq = netdev_get_tx_queue(tp->dev, index);
4660 while (sw_idx != hw_idx) {
4661 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662 struct sk_buff *skb = ri->skb;
4663 int i, tx_bug = 0;
4665 if (unlikely(skb == NULL)) {
4666 tg3_tx_recover(tp);
4667 return;
4670 pci_unmap_single(tp->pdev,
4671 dma_unmap_addr(ri, mapping),
4672 skb_headlen(skb),
4673 PCI_DMA_TODEVICE);
4675 ri->skb = NULL;
4677 sw_idx = NEXT_TX(sw_idx);
4679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680 ri = &tnapi->tx_buffers[sw_idx];
4681 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4682 tx_bug = 1;
4684 pci_unmap_page(tp->pdev,
4685 dma_unmap_addr(ri, mapping),
4686 skb_shinfo(skb)->frags[i].size,
4687 PCI_DMA_TODEVICE);
4688 sw_idx = NEXT_TX(sw_idx);
4691 dev_kfree_skb(skb);
4693 if (unlikely(tx_bug)) {
4694 tg3_tx_recover(tp);
4695 return;
4699 tnapi->tx_cons = sw_idx;
4701 /* Need to make the tx_cons update visible to tg3_start_xmit()
4702 * before checking for netif_queue_stopped(). Without the
4703 * memory barrier, there is a small possibility that tg3_start_xmit()
4704 * will miss it and cause the queue to be stopped forever.
4706 smp_mb();
4708 if (unlikely(netif_tx_queue_stopped(txq) &&
4709 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710 __netif_tx_lock(txq, smp_processor_id());
4711 if (netif_tx_queue_stopped(txq) &&
4712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713 netif_tx_wake_queue(txq);
4714 __netif_tx_unlock(txq);
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4720 if (!ri->skb)
4721 return;
4723 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724 map_sz, PCI_DMA_FROMDEVICE);
4725 dev_kfree_skb_any(ri->skb);
4726 ri->skb = NULL;
4729 /* Returns size of skb allocated or < 0 on error.
4731 * We only need to fill in the address because the other members
4732 * of the RX descriptor are invariant, see tg3_init_rings.
4734 * Note the purposeful assymetry of cpu vs. chip accesses. For
4735 * posting buffers we only dirty the first cache line of the RX
4736 * descriptor (containing the address). Whereas for the RX status
4737 * buffers the cpu only reads the last cacheline of the RX descriptor
4738 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741 u32 opaque_key, u32 dest_idx_unmasked)
4743 struct tg3_rx_buffer_desc *desc;
4744 struct ring_info *map;
4745 struct sk_buff *skb;
4746 dma_addr_t mapping;
4747 int skb_size, dest_idx;
4749 switch (opaque_key) {
4750 case RXD_OPAQUE_RING_STD:
4751 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752 desc = &tpr->rx_std[dest_idx];
4753 map = &tpr->rx_std_buffers[dest_idx];
4754 skb_size = tp->rx_pkt_map_sz;
4755 break;
4757 case RXD_OPAQUE_RING_JUMBO:
4758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759 desc = &tpr->rx_jmb[dest_idx].std;
4760 map = &tpr->rx_jmb_buffers[dest_idx];
4761 skb_size = TG3_RX_JMB_MAP_SZ;
4762 break;
4764 default:
4765 return -EINVAL;
4768 /* Do not overwrite any of the map or rp information
4769 * until we are sure we can commit to a new buffer.
4771 * Callers depend upon this behavior and assume that
4772 * we leave everything unchanged if we fail.
4774 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4775 if (skb == NULL)
4776 return -ENOMEM;
4778 skb_reserve(skb, tp->rx_offset);
4780 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781 PCI_DMA_FROMDEVICE);
4782 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4783 dev_kfree_skb(skb);
4784 return -EIO;
4787 map->skb = skb;
4788 dma_unmap_addr_set(map, mapping, mapping);
4790 desc->addr_hi = ((u64)mapping >> 32);
4791 desc->addr_lo = ((u64)mapping & 0xffffffff);
4793 return skb_size;
4796 /* We only need to move over in the address because the other
4797 * members of the RX descriptor are invariant. See notes above
4798 * tg3_alloc_rx_skb for full details.
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801 struct tg3_rx_prodring_set *dpr,
4802 u32 opaque_key, int src_idx,
4803 u32 dest_idx_unmasked)
4805 struct tg3 *tp = tnapi->tp;
4806 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807 struct ring_info *src_map, *dest_map;
4808 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4809 int dest_idx;
4811 switch (opaque_key) {
4812 case RXD_OPAQUE_RING_STD:
4813 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814 dest_desc = &dpr->rx_std[dest_idx];
4815 dest_map = &dpr->rx_std_buffers[dest_idx];
4816 src_desc = &spr->rx_std[src_idx];
4817 src_map = &spr->rx_std_buffers[src_idx];
4818 break;
4820 case RXD_OPAQUE_RING_JUMBO:
4821 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824 src_desc = &spr->rx_jmb[src_idx].std;
4825 src_map = &spr->rx_jmb_buffers[src_idx];
4826 break;
4828 default:
4829 return;
4832 dest_map->skb = src_map->skb;
4833 dma_unmap_addr_set(dest_map, mapping,
4834 dma_unmap_addr(src_map, mapping));
4835 dest_desc->addr_hi = src_desc->addr_hi;
4836 dest_desc->addr_lo = src_desc->addr_lo;
4838 /* Ensure that the update to the skb happens after the physical
4839 * addresses have been transferred to the new BD location.
4841 smp_wmb();
4843 src_map->skb = NULL;
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847 * buffers to the chip, and one special ring the chip uses to report
4848 * status back to the host.
4850 * The special ring reports the status of received packets to the
4851 * host. The chip does not write into the original descriptor the
4852 * RX buffer was obtained from. The chip simply takes the original
4853 * descriptor as provided by the host, updates the status and length
4854 * field, then writes this into the next status ring entry.
4856 * Each ring the host uses to post buffers to the chip is described
4857 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4858 * it is first placed into the on-chip ram. When the packet's length
4859 * is known, it walks down the TG3_BDINFO entries to select the ring.
4860 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861 * which is within the range of the new packet's length is chosen.
4863 * The "separate ring for rx status" scheme may sound queer, but it makes
4864 * sense from a cache coherency perspective. If only the host writes
4865 * to the buffer post rings, and only the chip writes to the rx status
4866 * rings, then cache lines never move beyond shared-modified state.
4867 * If both the host and chip were to write into the same ring, cache line
4868 * eviction could occur since both entities want it in an exclusive state.
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4872 struct tg3 *tp = tnapi->tp;
4873 u32 work_mask, rx_std_posted = 0;
4874 u32 std_prod_idx, jmb_prod_idx;
4875 u32 sw_idx = tnapi->rx_rcb_ptr;
4876 u16 hw_idx;
4877 int received;
4878 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4880 hw_idx = *(tnapi->rx_rcb_prod_idx);
4882 * We need to order the read of hw_idx and the read of
4883 * the opaque cookie.
4885 rmb();
4886 work_mask = 0;
4887 received = 0;
4888 std_prod_idx = tpr->rx_std_prod_idx;
4889 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890 while (sw_idx != hw_idx && budget > 0) {
4891 struct ring_info *ri;
4892 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4893 unsigned int len;
4894 struct sk_buff *skb;
4895 dma_addr_t dma_addr;
4896 u32 opaque_key, desc_idx, *post_ptr;
4898 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4903 skb = ri->skb;
4904 post_ptr = &std_prod_idx;
4905 rx_std_posted++;
4906 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4909 skb = ri->skb;
4910 post_ptr = &jmb_prod_idx;
4911 } else
4912 goto next_pkt_nopost;
4914 work_mask |= opaque_key;
4916 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4918 drop_it:
4919 tg3_recycle_rx(tnapi, tpr, opaque_key,
4920 desc_idx, *post_ptr);
4921 drop_it_no_recycle:
4922 /* Other statistics kept track of by card. */
4923 tp->rx_dropped++;
4924 goto next_pkt;
4927 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4928 ETH_FCS_LEN;
4930 if (len > TG3_RX_COPY_THRESH(tp)) {
4931 int skb_size;
4933 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4934 *post_ptr);
4935 if (skb_size < 0)
4936 goto drop_it;
4938 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939 PCI_DMA_FROMDEVICE);
4941 /* Ensure that the update to the skb happens
4942 * after the usage of the old DMA mapping.
4944 smp_wmb();
4946 ri->skb = NULL;
4948 skb_put(skb, len);
4949 } else {
4950 struct sk_buff *copy_skb;
4952 tg3_recycle_rx(tnapi, tpr, opaque_key,
4953 desc_idx, *post_ptr);
4955 copy_skb = netdev_alloc_skb(tp->dev, len +
4956 TG3_RAW_IP_ALIGN);
4957 if (copy_skb == NULL)
4958 goto drop_it_no_recycle;
4960 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961 skb_put(copy_skb, len);
4962 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963 skb_copy_from_linear_data(skb, copy_skb->data, len);
4964 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4966 /* We'll reuse the original ring buffer. */
4967 skb = copy_skb;
4970 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974 skb->ip_summed = CHECKSUM_UNNECESSARY;
4975 else
4976 skb_checksum_none_assert(skb);
4978 skb->protocol = eth_type_trans(skb, tp->dev);
4980 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981 skb->protocol != htons(ETH_P_8021Q)) {
4982 dev_kfree_skb(skb);
4983 goto drop_it_no_recycle;
4986 if (desc->type_flags & RXD_FLAG_VLAN &&
4987 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988 __vlan_hwaccel_put_tag(skb,
4989 desc->err_vlan & RXD_VLAN_MASK);
4991 napi_gro_receive(&tnapi->napi, skb);
4993 received++;
4994 budget--;
4996 next_pkt:
4997 (*post_ptr)++;
4999 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000 tpr->rx_std_prod_idx = std_prod_idx &
5001 tp->rx_std_ring_mask;
5002 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003 tpr->rx_std_prod_idx);
5004 work_mask &= ~RXD_OPAQUE_RING_STD;
5005 rx_std_posted = 0;
5007 next_pkt_nopost:
5008 sw_idx++;
5009 sw_idx &= tp->rx_ret_ring_mask;
5011 /* Refresh hw_idx to see if there is new work */
5012 if (sw_idx == hw_idx) {
5013 hw_idx = *(tnapi->rx_rcb_prod_idx);
5014 rmb();
5018 /* ACK the status ring. */
5019 tnapi->rx_rcb_ptr = sw_idx;
5020 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5022 /* Refill RX ring(s). */
5023 if (!tg3_flag(tp, ENABLE_RSS)) {
5024 if (work_mask & RXD_OPAQUE_RING_STD) {
5025 tpr->rx_std_prod_idx = std_prod_idx &
5026 tp->rx_std_ring_mask;
5027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028 tpr->rx_std_prod_idx);
5030 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032 tp->rx_jmb_ring_mask;
5033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034 tpr->rx_jmb_prod_idx);
5036 mmiowb();
5037 } else if (work_mask) {
5038 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039 * updated before the producer indices can be updated.
5041 smp_wmb();
5043 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5046 if (tnapi != &tp->napi[1])
5047 napi_schedule(&tp->napi[1].napi);
5050 return received;
5053 static void tg3_poll_link(struct tg3 *tp)
5055 /* handle link change and other phy events */
5056 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5059 if (sblk->status & SD_STATUS_LINK_CHG) {
5060 sblk->status = SD_STATUS_UPDATED |
5061 (sblk->status & ~SD_STATUS_LINK_CHG);
5062 spin_lock(&tp->lock);
5063 if (tg3_flag(tp, USE_PHYLIB)) {
5064 tw32_f(MAC_STATUS,
5065 (MAC_STATUS_SYNC_CHANGED |
5066 MAC_STATUS_CFG_CHANGED |
5067 MAC_STATUS_MI_COMPLETION |
5068 MAC_STATUS_LNKSTATE_CHANGED));
5069 udelay(40);
5070 } else
5071 tg3_setup_phy(tp, 0);
5072 spin_unlock(&tp->lock);
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078 struct tg3_rx_prodring_set *dpr,
5079 struct tg3_rx_prodring_set *spr)
5081 u32 si, di, cpycnt, src_prod_idx;
5082 int i, err = 0;
5084 while (1) {
5085 src_prod_idx = spr->rx_std_prod_idx;
5087 /* Make sure updates to the rx_std_buffers[] entries and the
5088 * standard producer index are seen in the correct order.
5090 smp_rmb();
5092 if (spr->rx_std_cons_idx == src_prod_idx)
5093 break;
5095 if (spr->rx_std_cons_idx < src_prod_idx)
5096 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5097 else
5098 cpycnt = tp->rx_std_ring_mask + 1 -
5099 spr->rx_std_cons_idx;
5101 cpycnt = min(cpycnt,
5102 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5104 si = spr->rx_std_cons_idx;
5105 di = dpr->rx_std_prod_idx;
5107 for (i = di; i < di + cpycnt; i++) {
5108 if (dpr->rx_std_buffers[i].skb) {
5109 cpycnt = i - di;
5110 err = -ENOSPC;
5111 break;
5115 if (!cpycnt)
5116 break;
5118 /* Ensure that updates to the rx_std_buffers ring and the
5119 * shadowed hardware producer ring from tg3_recycle_skb() are
5120 * ordered correctly WRT the skb check above.
5122 smp_rmb();
5124 memcpy(&dpr->rx_std_buffers[di],
5125 &spr->rx_std_buffers[si],
5126 cpycnt * sizeof(struct ring_info));
5128 for (i = 0; i < cpycnt; i++, di++, si++) {
5129 struct tg3_rx_buffer_desc *sbd, *dbd;
5130 sbd = &spr->rx_std[si];
5131 dbd = &dpr->rx_std[di];
5132 dbd->addr_hi = sbd->addr_hi;
5133 dbd->addr_lo = sbd->addr_lo;
5136 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137 tp->rx_std_ring_mask;
5138 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139 tp->rx_std_ring_mask;
5142 while (1) {
5143 src_prod_idx = spr->rx_jmb_prod_idx;
5145 /* Make sure updates to the rx_jmb_buffers[] entries and
5146 * the jumbo producer index are seen in the correct order.
5148 smp_rmb();
5150 if (spr->rx_jmb_cons_idx == src_prod_idx)
5151 break;
5153 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5155 else
5156 cpycnt = tp->rx_jmb_ring_mask + 1 -
5157 spr->rx_jmb_cons_idx;
5159 cpycnt = min(cpycnt,
5160 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5162 si = spr->rx_jmb_cons_idx;
5163 di = dpr->rx_jmb_prod_idx;
5165 for (i = di; i < di + cpycnt; i++) {
5166 if (dpr->rx_jmb_buffers[i].skb) {
5167 cpycnt = i - di;
5168 err = -ENOSPC;
5169 break;
5173 if (!cpycnt)
5174 break;
5176 /* Ensure that updates to the rx_jmb_buffers ring and the
5177 * shadowed hardware producer ring from tg3_recycle_skb() are
5178 * ordered correctly WRT the skb check above.
5180 smp_rmb();
5182 memcpy(&dpr->rx_jmb_buffers[di],
5183 &spr->rx_jmb_buffers[si],
5184 cpycnt * sizeof(struct ring_info));
5186 for (i = 0; i < cpycnt; i++, di++, si++) {
5187 struct tg3_rx_buffer_desc *sbd, *dbd;
5188 sbd = &spr->rx_jmb[si].std;
5189 dbd = &dpr->rx_jmb[di].std;
5190 dbd->addr_hi = sbd->addr_hi;
5191 dbd->addr_lo = sbd->addr_lo;
5194 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195 tp->rx_jmb_ring_mask;
5196 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197 tp->rx_jmb_ring_mask;
5200 return err;
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5205 struct tg3 *tp = tnapi->tp;
5207 /* run TX completion thread */
5208 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5209 tg3_tx(tnapi);
5210 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5211 return work_done;
5214 /* run RX thread, within the bounds set by NAPI.
5215 * All RX "locking" is done by ensuring outside
5216 * code synchronizes with tg3->napi.poll()
5218 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219 work_done += tg3_rx(tnapi, budget - work_done);
5221 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5223 int i, err = 0;
5224 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5227 for (i = 1; i < tp->irq_cnt; i++)
5228 err |= tg3_rx_prodring_xfer(tp, dpr,
5229 &tp->napi[i].prodring);
5231 wmb();
5233 if (std_prod_idx != dpr->rx_std_prod_idx)
5234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235 dpr->rx_std_prod_idx);
5237 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239 dpr->rx_jmb_prod_idx);
5241 mmiowb();
5243 if (err)
5244 tw32_f(HOSTCC_MODE, tp->coal_now);
5247 return work_done;
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5252 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253 struct tg3 *tp = tnapi->tp;
5254 int work_done = 0;
5255 struct tg3_hw_status *sblk = tnapi->hw_status;
5257 while (1) {
5258 work_done = tg3_poll_work(tnapi, work_done, budget);
5260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5261 goto tx_recovery;
5263 if (unlikely(work_done >= budget))
5264 break;
5266 /* tp->last_tag is used in tg3_int_reenable() below
5267 * to tell the hw how much work has been processed,
5268 * so we must read it before checking for more work.
5270 tnapi->last_tag = sblk->status_tag;
5271 tnapi->last_irq_tag = tnapi->last_tag;
5272 rmb();
5274 /* check for RX/TX work to do */
5275 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277 napi_complete(napi);
5278 /* Reenable interrupts. */
5279 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5280 mmiowb();
5281 break;
5285 return work_done;
5287 tx_recovery:
5288 /* work_done is guaranteed to be less than budget. */
5289 napi_complete(napi);
5290 schedule_work(&tp->reset_task);
5291 return work_done;
5294 static void tg3_process_error(struct tg3 *tp)
5296 u32 val;
5297 bool real_error = false;
5299 if (tg3_flag(tp, ERROR_PROCESSED))
5300 return;
5302 /* Check Flow Attention register */
5303 val = tr32(HOSTCC_FLOW_ATTN);
5304 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5306 real_error = true;
5309 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5311 real_error = true;
5314 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5316 real_error = true;
5319 if (!real_error)
5320 return;
5322 tg3_dump_state(tp);
5324 tg3_flag_set(tp, ERROR_PROCESSED);
5325 schedule_work(&tp->reset_task);
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5330 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331 struct tg3 *tp = tnapi->tp;
5332 int work_done = 0;
5333 struct tg3_hw_status *sblk = tnapi->hw_status;
5335 while (1) {
5336 if (sblk->status & SD_STATUS_ERROR)
5337 tg3_process_error(tp);
5339 tg3_poll_link(tp);
5341 work_done = tg3_poll_work(tnapi, work_done, budget);
5343 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5344 goto tx_recovery;
5346 if (unlikely(work_done >= budget))
5347 break;
5349 if (tg3_flag(tp, TAGGED_STATUS)) {
5350 /* tp->last_tag is used in tg3_int_reenable() below
5351 * to tell the hw how much work has been processed,
5352 * so we must read it before checking for more work.
5354 tnapi->last_tag = sblk->status_tag;
5355 tnapi->last_irq_tag = tnapi->last_tag;
5356 rmb();
5357 } else
5358 sblk->status &= ~SD_STATUS_UPDATED;
5360 if (likely(!tg3_has_work(tnapi))) {
5361 napi_complete(napi);
5362 tg3_int_reenable(tnapi);
5363 break;
5367 return work_done;
5369 tx_recovery:
5370 /* work_done is guaranteed to be less than budget. */
5371 napi_complete(napi);
5372 schedule_work(&tp->reset_task);
5373 return work_done;
5376 static void tg3_napi_disable(struct tg3 *tp)
5378 int i;
5380 for (i = tp->irq_cnt - 1; i >= 0; i--)
5381 napi_disable(&tp->napi[i].napi);
5384 static void tg3_napi_enable(struct tg3 *tp)
5386 int i;
5388 for (i = 0; i < tp->irq_cnt; i++)
5389 napi_enable(&tp->napi[i].napi);
5392 static void tg3_napi_init(struct tg3 *tp)
5394 int i;
5396 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397 for (i = 1; i < tp->irq_cnt; i++)
5398 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5401 static void tg3_napi_fini(struct tg3 *tp)
5403 int i;
5405 for (i = 0; i < tp->irq_cnt; i++)
5406 netif_napi_del(&tp->napi[i].napi);
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5411 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412 tg3_napi_disable(tp);
5413 netif_tx_disable(tp->dev);
5416 static inline void tg3_netif_start(struct tg3 *tp)
5418 /* NOTE: unconditional netif_tx_wake_all_queues is only
5419 * appropriate so long as all callers are assured to
5420 * have free tx slots (such as after tg3_init_hw)
5422 netif_tx_wake_all_queues(tp->dev);
5424 tg3_napi_enable(tp);
5425 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426 tg3_enable_ints(tp);
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5431 int i;
5433 BUG_ON(tp->irq_sync);
5435 tp->irq_sync = 1;
5436 smp_mb();
5438 for (i = 0; i < tp->irq_cnt; i++)
5439 synchronize_irq(tp->napi[i].irq_vec);
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444 * with as well. Most of the time, this is not necessary except when
5445 * shutting down the device.
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5449 spin_lock_bh(&tp->lock);
5450 if (irq_sync)
5451 tg3_irq_quiesce(tp);
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5456 spin_unlock_bh(&tp->lock);
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460 * after sending MSI so driver doesn't have to do it.
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5464 struct tg3_napi *tnapi = dev_id;
5465 struct tg3 *tp = tnapi->tp;
5467 prefetch(tnapi->hw_status);
5468 if (tnapi->rx_rcb)
5469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5471 if (likely(!tg3_irq_sync(tp)))
5472 napi_schedule(&tnapi->napi);
5474 return IRQ_HANDLED;
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478 * flush status block and interrupt mailbox. PCI ordering rules
5479 * guarantee that MSI will arrive after the status block.
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5483 struct tg3_napi *tnapi = dev_id;
5484 struct tg3 *tp = tnapi->tp;
5486 prefetch(tnapi->hw_status);
5487 if (tnapi->rx_rcb)
5488 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5490 * Writing any value to intr-mbox-0 clears PCI INTA# and
5491 * chip-internal interrupt pending events.
5492 * Writing non-zero to intr-mbox-0 additional tells the
5493 * NIC to stop sending us irqs, engaging "in-intr-handler"
5494 * event coalescing.
5496 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497 if (likely(!tg3_irq_sync(tp)))
5498 napi_schedule(&tnapi->napi);
5500 return IRQ_RETVAL(1);
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5505 struct tg3_napi *tnapi = dev_id;
5506 struct tg3 *tp = tnapi->tp;
5507 struct tg3_hw_status *sblk = tnapi->hw_status;
5508 unsigned int handled = 1;
5510 /* In INTx mode, it is possible for the interrupt to arrive at
5511 * the CPU before the status block posted prior to the interrupt.
5512 * Reading the PCI State register will confirm whether the
5513 * interrupt is ours and will flush the status block.
5515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516 if (tg3_flag(tp, CHIP_RESETTING) ||
5517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5518 handled = 0;
5519 goto out;
5524 * Writing any value to intr-mbox-0 clears PCI INTA# and
5525 * chip-internal interrupt pending events.
5526 * Writing non-zero to intr-mbox-0 additional tells the
5527 * NIC to stop sending us irqs, engaging "in-intr-handler"
5528 * event coalescing.
5530 * Flush the mailbox to de-assert the IRQ immediately to prevent
5531 * spurious interrupts. The flush impacts performance but
5532 * excessive spurious interrupts can be worse in some cases.
5534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535 if (tg3_irq_sync(tp))
5536 goto out;
5537 sblk->status &= ~SD_STATUS_UPDATED;
5538 if (likely(tg3_has_work(tnapi))) {
5539 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540 napi_schedule(&tnapi->napi);
5541 } else {
5542 /* No work, shared interrupt perhaps? re-enable
5543 * interrupts, and flush that PCI write
5545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5546 0x00000000);
5548 out:
5549 return IRQ_RETVAL(handled);
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5554 struct tg3_napi *tnapi = dev_id;
5555 struct tg3 *tp = tnapi->tp;
5556 struct tg3_hw_status *sblk = tnapi->hw_status;
5557 unsigned int handled = 1;
5559 /* In INTx mode, it is possible for the interrupt to arrive at
5560 * the CPU before the status block posted prior to the interrupt.
5561 * Reading the PCI State register will confirm whether the
5562 * interrupt is ours and will flush the status block.
5564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565 if (tg3_flag(tp, CHIP_RESETTING) ||
5566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5567 handled = 0;
5568 goto out;
5573 * writing any value to intr-mbox-0 clears PCI INTA# and
5574 * chip-internal interrupt pending events.
5575 * writing non-zero to intr-mbox-0 additional tells the
5576 * NIC to stop sending us irqs, engaging "in-intr-handler"
5577 * event coalescing.
5579 * Flush the mailbox to de-assert the IRQ immediately to prevent
5580 * spurious interrupts. The flush impacts performance but
5581 * excessive spurious interrupts can be worse in some cases.
5583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5586 * In a shared interrupt configuration, sometimes other devices'
5587 * interrupts will scream. We record the current status tag here
5588 * so that the above check can report that the screaming interrupts
5589 * are unhandled. Eventually they will be silenced.
5591 tnapi->last_irq_tag = sblk->status_tag;
5593 if (tg3_irq_sync(tp))
5594 goto out;
5596 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5598 napi_schedule(&tnapi->napi);
5600 out:
5601 return IRQ_RETVAL(handled);
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5607 struct tg3_napi *tnapi = dev_id;
5608 struct tg3 *tp = tnapi->tp;
5609 struct tg3_hw_status *sblk = tnapi->hw_status;
5611 if ((sblk->status & SD_STATUS_UPDATED) ||
5612 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613 tg3_disable_ints(tp);
5614 return IRQ_RETVAL(1);
5616 return IRQ_RETVAL(0);
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5622 /* Restart hardware after configuration changes, self-test, etc.
5623 * Invoked with tp->lock held.
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626 __releases(tp->lock)
5627 __acquires(tp->lock)
5629 int err;
5631 err = tg3_init_hw(tp, reset_phy);
5632 if (err) {
5633 netdev_err(tp->dev,
5634 "Failed to re-initialize device, aborting\n");
5635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636 tg3_full_unlock(tp);
5637 del_timer_sync(&tp->timer);
5638 tp->irq_sync = 0;
5639 tg3_napi_enable(tp);
5640 dev_close(tp->dev);
5641 tg3_full_lock(tp, 0);
5643 return err;
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5649 int i;
5650 struct tg3 *tp = netdev_priv(dev);
5652 for (i = 0; i < tp->irq_cnt; i++)
5653 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5655 #endif
5657 static void tg3_reset_task(struct work_struct *work)
5659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5660 int err;
5661 unsigned int restart_timer;
5663 tg3_full_lock(tp, 0);
5665 if (!netif_running(tp->dev)) {
5666 tg3_full_unlock(tp);
5667 return;
5670 tg3_full_unlock(tp);
5672 tg3_phy_stop(tp);
5674 tg3_netif_stop(tp);
5676 tg3_full_lock(tp, 1);
5678 restart_timer = tg3_flag(tp, RESTART_TIMER);
5679 tg3_flag_clear(tp, RESTART_TIMER);
5681 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689 err = tg3_init_hw(tp, 1);
5690 if (err)
5691 goto out;
5693 tg3_netif_start(tp);
5695 if (restart_timer)
5696 mod_timer(&tp->timer, jiffies + 1);
5698 out:
5699 tg3_full_unlock(tp);
5701 if (!err)
5702 tg3_phy_start(tp);
5705 static void tg3_tx_timeout(struct net_device *dev)
5707 struct tg3 *tp = netdev_priv(dev);
5709 if (netif_msg_tx_err(tp)) {
5710 netdev_err(dev, "transmit timed out, resetting\n");
5711 tg3_dump_state(tp);
5714 schedule_work(&tp->reset_task);
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5720 u32 base = (u32) mapping & 0xffffffff;
5722 return (base > 0xffffdcc0) && (base + len + 8 < base);
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5727 int len)
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730 if (tg3_flag(tp, 40BIT_DMA_BUG))
5731 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5732 return 0;
5733 #else
5734 return 0;
5735 #endif
5738 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5740 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5741 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5742 struct sk_buff *skb, u32 last_plus_one,
5743 u32 *start, u32 base_flags, u32 mss)
5745 struct tg3 *tp = tnapi->tp;
5746 struct sk_buff *new_skb;
5747 dma_addr_t new_addr = 0;
5748 u32 entry = *start;
5749 int i, ret = 0;
5751 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5752 new_skb = skb_copy(skb, GFP_ATOMIC);
5753 else {
5754 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5756 new_skb = skb_copy_expand(skb,
5757 skb_headroom(skb) + more_headroom,
5758 skb_tailroom(skb), GFP_ATOMIC);
5761 if (!new_skb) {
5762 ret = -1;
5763 } else {
5764 /* New SKB is guaranteed to be linear. */
5765 entry = *start;
5766 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5767 PCI_DMA_TODEVICE);
5768 /* Make sure the mapping succeeded */
5769 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5770 ret = -1;
5771 dev_kfree_skb(new_skb);
5772 new_skb = NULL;
5774 /* Make sure new skb does not cross any 4G boundaries.
5775 * Drop the packet if it does.
5777 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5778 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5779 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5780 PCI_DMA_TODEVICE);
5781 ret = -1;
5782 dev_kfree_skb(new_skb);
5783 new_skb = NULL;
5784 } else {
5785 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5786 base_flags, 1 | (mss << 1));
5787 *start = NEXT_TX(entry);
5791 /* Now clean up the sw ring entries. */
5792 i = 0;
5793 while (entry != last_plus_one) {
5794 int len;
5796 if (i == 0)
5797 len = skb_headlen(skb);
5798 else
5799 len = skb_shinfo(skb)->frags[i-1].size;
5801 pci_unmap_single(tp->pdev,
5802 dma_unmap_addr(&tnapi->tx_buffers[entry],
5803 mapping),
5804 len, PCI_DMA_TODEVICE);
5805 if (i == 0) {
5806 tnapi->tx_buffers[entry].skb = new_skb;
5807 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5808 new_addr);
5809 } else {
5810 tnapi->tx_buffers[entry].skb = NULL;
5812 entry = NEXT_TX(entry);
5813 i++;
5816 dev_kfree_skb(skb);
5818 return ret;
5821 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5822 dma_addr_t mapping, int len, u32 flags,
5823 u32 mss_and_is_end)
5825 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5826 int is_end = (mss_and_is_end & 0x1);
5827 u32 mss = (mss_and_is_end >> 1);
5828 u32 vlan_tag = 0;
5830 if (is_end)
5831 flags |= TXD_FLAG_END;
5832 if (flags & TXD_FLAG_VLAN) {
5833 vlan_tag = flags >> 16;
5834 flags &= 0xffff;
5836 vlan_tag |= (mss << TXD_MSS_SHIFT);
5838 txd->addr_hi = ((u64) mapping >> 32);
5839 txd->addr_lo = ((u64) mapping & 0xffffffff);
5840 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5841 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5844 /* hard_start_xmit for devices that don't have any bugs and
5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5847 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5848 struct net_device *dev)
5850 struct tg3 *tp = netdev_priv(dev);
5851 u32 len, entry, base_flags, mss;
5852 dma_addr_t mapping;
5853 struct tg3_napi *tnapi;
5854 struct netdev_queue *txq;
5855 unsigned int i, last;
5857 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5858 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5859 if (tg3_flag(tp, ENABLE_TSS))
5860 tnapi++;
5862 /* We are running in BH disabled context with netif_tx_lock
5863 * and TX reclaim runs via tp->napi.poll inside of a software
5864 * interrupt. Furthermore, IRQ processing runs lockless so we have
5865 * no IRQ context deadlocks to worry about either. Rejoice!
5867 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5868 if (!netif_tx_queue_stopped(txq)) {
5869 netif_tx_stop_queue(txq);
5871 /* This is a hard error, log it. */
5872 netdev_err(dev,
5873 "BUG! Tx Ring full when queue awake!\n");
5875 return NETDEV_TX_BUSY;
5878 entry = tnapi->tx_prod;
5879 base_flags = 0;
5880 mss = skb_shinfo(skb)->gso_size;
5881 if (mss) {
5882 int tcp_opt_len, ip_tcp_len;
5883 u32 hdrlen;
5885 if (skb_header_cloned(skb) &&
5886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 dev_kfree_skb(skb);
5888 goto out_unlock;
5891 if (skb_is_gso_v6(skb)) {
5892 hdrlen = skb_headlen(skb) - ETH_HLEN;
5893 } else {
5894 struct iphdr *iph = ip_hdr(skb);
5896 tcp_opt_len = tcp_optlen(skb);
5897 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5899 iph->check = 0;
5900 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5901 hdrlen = ip_tcp_len + tcp_opt_len;
5904 if (tg3_flag(tp, HW_TSO_3)) {
5905 mss |= (hdrlen & 0xc) << 12;
5906 if (hdrlen & 0x10)
5907 base_flags |= 0x00000010;
5908 base_flags |= (hdrlen & 0x3e0) << 5;
5909 } else
5910 mss |= hdrlen << 9;
5912 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5913 TXD_FLAG_CPU_POST_DMA);
5915 tcp_hdr(skb)->check = 0;
5917 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5921 if (vlan_tx_tag_present(skb))
5922 base_flags |= (TXD_FLAG_VLAN |
5923 (vlan_tx_tag_get(skb) << 16));
5925 len = skb_headlen(skb);
5927 /* Queue skb data, a.k.a. the main skb fragment. */
5928 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5929 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 dev_kfree_skb(skb);
5931 goto out_unlock;
5934 tnapi->tx_buffers[entry].skb = skb;
5935 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5937 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5938 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5939 base_flags |= TXD_FLAG_JMB_PKT;
5941 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5942 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5944 entry = NEXT_TX(entry);
5946 /* Now loop through additional data fragments, and queue them. */
5947 if (skb_shinfo(skb)->nr_frags > 0) {
5948 last = skb_shinfo(skb)->nr_frags - 1;
5949 for (i = 0; i <= last; i++) {
5950 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5952 len = frag->size;
5953 mapping = pci_map_page(tp->pdev,
5954 frag->page,
5955 frag->page_offset,
5956 len, PCI_DMA_TODEVICE);
5957 if (pci_dma_mapping_error(tp->pdev, mapping))
5958 goto dma_error;
5960 tnapi->tx_buffers[entry].skb = NULL;
5961 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5962 mapping);
5964 tg3_set_txd(tnapi, entry, mapping, len,
5965 base_flags, (i == last) | (mss << 1));
5967 entry = NEXT_TX(entry);
5971 /* Packets are ready, update Tx producer idx local and on card. */
5972 tw32_tx_mbox(tnapi->prodmbox, entry);
5974 tnapi->tx_prod = entry;
5975 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5976 netif_tx_stop_queue(txq);
5978 /* netif_tx_stop_queue() must be done before checking
5979 * checking tx index in tg3_tx_avail() below, because in
5980 * tg3_tx(), we update tx index before checking for
5981 * netif_tx_queue_stopped().
5983 smp_mb();
5984 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5985 netif_tx_wake_queue(txq);
5988 out_unlock:
5989 mmiowb();
5991 return NETDEV_TX_OK;
5993 dma_error:
5994 last = i;
5995 entry = tnapi->tx_prod;
5996 tnapi->tx_buffers[entry].skb = NULL;
5997 pci_unmap_single(tp->pdev,
5998 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5999 skb_headlen(skb),
6000 PCI_DMA_TODEVICE);
6001 for (i = 0; i <= last; i++) {
6002 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6003 entry = NEXT_TX(entry);
6005 pci_unmap_page(tp->pdev,
6006 dma_unmap_addr(&tnapi->tx_buffers[entry],
6007 mapping),
6008 frag->size, PCI_DMA_TODEVICE);
6011 dev_kfree_skb(skb);
6012 return NETDEV_TX_OK;
6015 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6016 struct net_device *);
6018 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6019 * TSO header is greater than 80 bytes.
6021 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6023 struct sk_buff *segs, *nskb;
6024 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6026 /* Estimate the number of fragments in the worst case */
6027 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6028 netif_stop_queue(tp->dev);
6030 /* netif_tx_stop_queue() must be done before checking
6031 * checking tx index in tg3_tx_avail() below, because in
6032 * tg3_tx(), we update tx index before checking for
6033 * netif_tx_queue_stopped().
6035 smp_mb();
6036 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6037 return NETDEV_TX_BUSY;
6039 netif_wake_queue(tp->dev);
6042 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6043 if (IS_ERR(segs))
6044 goto tg3_tso_bug_end;
6046 do {
6047 nskb = segs;
6048 segs = segs->next;
6049 nskb->next = NULL;
6050 tg3_start_xmit_dma_bug(nskb, tp->dev);
6051 } while (segs);
6053 tg3_tso_bug_end:
6054 dev_kfree_skb(skb);
6056 return NETDEV_TX_OK;
6059 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6062 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6063 struct net_device *dev)
6065 struct tg3 *tp = netdev_priv(dev);
6066 u32 len, entry, base_flags, mss;
6067 int would_hit_hwbug;
6068 dma_addr_t mapping;
6069 struct tg3_napi *tnapi;
6070 struct netdev_queue *txq;
6071 unsigned int i, last;
6073 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6074 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6075 if (tg3_flag(tp, ENABLE_TSS))
6076 tnapi++;
6078 /* We are running in BH disabled context with netif_tx_lock
6079 * and TX reclaim runs via tp->napi.poll inside of a software
6080 * interrupt. Furthermore, IRQ processing runs lockless so we have
6081 * no IRQ context deadlocks to worry about either. Rejoice!
6083 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6084 if (!netif_tx_queue_stopped(txq)) {
6085 netif_tx_stop_queue(txq);
6087 /* This is a hard error, log it. */
6088 netdev_err(dev,
6089 "BUG! Tx Ring full when queue awake!\n");
6091 return NETDEV_TX_BUSY;
6094 entry = tnapi->tx_prod;
6095 base_flags = 0;
6096 if (skb->ip_summed == CHECKSUM_PARTIAL)
6097 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6099 mss = skb_shinfo(skb)->gso_size;
6100 if (mss) {
6101 struct iphdr *iph;
6102 u32 tcp_opt_len, hdr_len;
6104 if (skb_header_cloned(skb) &&
6105 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6106 dev_kfree_skb(skb);
6107 goto out_unlock;
6110 iph = ip_hdr(skb);
6111 tcp_opt_len = tcp_optlen(skb);
6113 if (skb_is_gso_v6(skb)) {
6114 hdr_len = skb_headlen(skb) - ETH_HLEN;
6115 } else {
6116 u32 ip_tcp_len;
6118 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6119 hdr_len = ip_tcp_len + tcp_opt_len;
6121 iph->check = 0;
6122 iph->tot_len = htons(mss + hdr_len);
6125 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6126 tg3_flag(tp, TSO_BUG))
6127 return tg3_tso_bug(tp, skb);
6129 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6130 TXD_FLAG_CPU_POST_DMA);
6132 if (tg3_flag(tp, HW_TSO_1) ||
6133 tg3_flag(tp, HW_TSO_2) ||
6134 tg3_flag(tp, HW_TSO_3)) {
6135 tcp_hdr(skb)->check = 0;
6136 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6137 } else
6138 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6139 iph->daddr, 0,
6140 IPPROTO_TCP,
6143 if (tg3_flag(tp, HW_TSO_3)) {
6144 mss |= (hdr_len & 0xc) << 12;
6145 if (hdr_len & 0x10)
6146 base_flags |= 0x00000010;
6147 base_flags |= (hdr_len & 0x3e0) << 5;
6148 } else if (tg3_flag(tp, HW_TSO_2))
6149 mss |= hdr_len << 9;
6150 else if (tg3_flag(tp, HW_TSO_1) ||
6151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6152 if (tcp_opt_len || iph->ihl > 5) {
6153 int tsflags;
6155 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6156 mss |= (tsflags << 11);
6158 } else {
6159 if (tcp_opt_len || iph->ihl > 5) {
6160 int tsflags;
6162 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6163 base_flags |= tsflags << 12;
6168 if (vlan_tx_tag_present(skb))
6169 base_flags |= (TXD_FLAG_VLAN |
6170 (vlan_tx_tag_get(skb) << 16));
6172 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6173 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6174 base_flags |= TXD_FLAG_JMB_PKT;
6176 len = skb_headlen(skb);
6178 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6179 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6180 dev_kfree_skb(skb);
6181 goto out_unlock;
6184 tnapi->tx_buffers[entry].skb = skb;
6185 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6187 would_hit_hwbug = 0;
6189 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6190 would_hit_hwbug = 1;
6192 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6193 tg3_4g_overflow_test(mapping, len))
6194 would_hit_hwbug = 1;
6196 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6197 tg3_40bit_overflow_test(tp, mapping, len))
6198 would_hit_hwbug = 1;
6200 if (tg3_flag(tp, 5701_DMA_BUG))
6201 would_hit_hwbug = 1;
6203 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6204 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6206 entry = NEXT_TX(entry);
6208 /* Now loop through additional data fragments, and queue them. */
6209 if (skb_shinfo(skb)->nr_frags > 0) {
6210 last = skb_shinfo(skb)->nr_frags - 1;
6211 for (i = 0; i <= last; i++) {
6212 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6214 len = frag->size;
6215 mapping = pci_map_page(tp->pdev,
6216 frag->page,
6217 frag->page_offset,
6218 len, PCI_DMA_TODEVICE);
6220 tnapi->tx_buffers[entry].skb = NULL;
6221 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6222 mapping);
6223 if (pci_dma_mapping_error(tp->pdev, mapping))
6224 goto dma_error;
6226 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6227 len <= 8)
6228 would_hit_hwbug = 1;
6230 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6231 tg3_4g_overflow_test(mapping, len))
6232 would_hit_hwbug = 1;
6234 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6235 tg3_40bit_overflow_test(tp, mapping, len))
6236 would_hit_hwbug = 1;
6238 if (tg3_flag(tp, HW_TSO_1) ||
6239 tg3_flag(tp, HW_TSO_2) ||
6240 tg3_flag(tp, HW_TSO_3))
6241 tg3_set_txd(tnapi, entry, mapping, len,
6242 base_flags, (i == last)|(mss << 1));
6243 else
6244 tg3_set_txd(tnapi, entry, mapping, len,
6245 base_flags, (i == last));
6247 entry = NEXT_TX(entry);
6251 if (would_hit_hwbug) {
6252 u32 last_plus_one = entry;
6253 u32 start;
6255 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6256 start &= (TG3_TX_RING_SIZE - 1);
6258 /* If the workaround fails due to memory/mapping
6259 * failure, silently drop this packet.
6261 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6262 &start, base_flags, mss))
6263 goto out_unlock;
6265 entry = start;
6268 /* Packets are ready, update Tx producer idx local and on card. */
6269 tw32_tx_mbox(tnapi->prodmbox, entry);
6271 tnapi->tx_prod = entry;
6272 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6273 netif_tx_stop_queue(txq);
6275 /* netif_tx_stop_queue() must be done before checking
6276 * checking tx index in tg3_tx_avail() below, because in
6277 * tg3_tx(), we update tx index before checking for
6278 * netif_tx_queue_stopped().
6280 smp_mb();
6281 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6282 netif_tx_wake_queue(txq);
6285 out_unlock:
6286 mmiowb();
6288 return NETDEV_TX_OK;
6290 dma_error:
6291 last = i;
6292 entry = tnapi->tx_prod;
6293 tnapi->tx_buffers[entry].skb = NULL;
6294 pci_unmap_single(tp->pdev,
6295 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6296 skb_headlen(skb),
6297 PCI_DMA_TODEVICE);
6298 for (i = 0; i <= last; i++) {
6299 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6300 entry = NEXT_TX(entry);
6302 pci_unmap_page(tp->pdev,
6303 dma_unmap_addr(&tnapi->tx_buffers[entry],
6304 mapping),
6305 frag->size, PCI_DMA_TODEVICE);
6308 dev_kfree_skb(skb);
6309 return NETDEV_TX_OK;
6312 static void tg3_set_loopback(struct net_device *dev, u32 features)
6314 struct tg3 *tp = netdev_priv(dev);
6316 if (features & NETIF_F_LOOPBACK) {
6317 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6318 return;
6321 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6322 * loopback mode if Half-Duplex mode was negotiated earlier.
6324 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6326 /* Enable internal MAC loopback mode */
6327 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6328 spin_lock_bh(&tp->lock);
6329 tw32(MAC_MODE, tp->mac_mode);
6330 netif_carrier_on(tp->dev);
6331 spin_unlock_bh(&tp->lock);
6332 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6333 } else {
6334 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6335 return;
6337 /* Disable internal MAC loopback mode */
6338 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6339 spin_lock_bh(&tp->lock);
6340 tw32(MAC_MODE, tp->mac_mode);
6341 /* Force link status check */
6342 tg3_setup_phy(tp, 1);
6343 spin_unlock_bh(&tp->lock);
6344 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6348 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6350 struct tg3 *tp = netdev_priv(dev);
6352 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6353 features &= ~NETIF_F_ALL_TSO;
6355 return features;
6358 static int tg3_set_features(struct net_device *dev, u32 features)
6360 u32 changed = dev->features ^ features;
6362 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6363 tg3_set_loopback(dev, features);
6365 return 0;
6368 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6369 int new_mtu)
6371 dev->mtu = new_mtu;
6373 if (new_mtu > ETH_DATA_LEN) {
6374 if (tg3_flag(tp, 5780_CLASS)) {
6375 netdev_update_features(dev);
6376 tg3_flag_clear(tp, TSO_CAPABLE);
6377 } else {
6378 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6380 } else {
6381 if (tg3_flag(tp, 5780_CLASS)) {
6382 tg3_flag_set(tp, TSO_CAPABLE);
6383 netdev_update_features(dev);
6385 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6389 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6391 struct tg3 *tp = netdev_priv(dev);
6392 int err;
6394 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6395 return -EINVAL;
6397 if (!netif_running(dev)) {
6398 /* We'll just catch it later when the
6399 * device is up'd.
6401 tg3_set_mtu(dev, tp, new_mtu);
6402 return 0;
6405 tg3_phy_stop(tp);
6407 tg3_netif_stop(tp);
6409 tg3_full_lock(tp, 1);
6411 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6413 tg3_set_mtu(dev, tp, new_mtu);
6415 err = tg3_restart_hw(tp, 0);
6417 if (!err)
6418 tg3_netif_start(tp);
6420 tg3_full_unlock(tp);
6422 if (!err)
6423 tg3_phy_start(tp);
6425 return err;
6428 static void tg3_rx_prodring_free(struct tg3 *tp,
6429 struct tg3_rx_prodring_set *tpr)
6431 int i;
6433 if (tpr != &tp->napi[0].prodring) {
6434 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6435 i = (i + 1) & tp->rx_std_ring_mask)
6436 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6437 tp->rx_pkt_map_sz);
6439 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6440 for (i = tpr->rx_jmb_cons_idx;
6441 i != tpr->rx_jmb_prod_idx;
6442 i = (i + 1) & tp->rx_jmb_ring_mask) {
6443 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6444 TG3_RX_JMB_MAP_SZ);
6448 return;
6451 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6452 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6453 tp->rx_pkt_map_sz);
6455 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6456 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6457 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6458 TG3_RX_JMB_MAP_SZ);
6462 /* Initialize rx rings for packet processing.
6464 * The chip has been shut down and the driver detached from
6465 * the networking, so no interrupts or new tx packets will
6466 * end up in the driver. tp->{tx,}lock are held and thus
6467 * we may not sleep.
6469 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6470 struct tg3_rx_prodring_set *tpr)
6472 u32 i, rx_pkt_dma_sz;
6474 tpr->rx_std_cons_idx = 0;
6475 tpr->rx_std_prod_idx = 0;
6476 tpr->rx_jmb_cons_idx = 0;
6477 tpr->rx_jmb_prod_idx = 0;
6479 if (tpr != &tp->napi[0].prodring) {
6480 memset(&tpr->rx_std_buffers[0], 0,
6481 TG3_RX_STD_BUFF_RING_SIZE(tp));
6482 if (tpr->rx_jmb_buffers)
6483 memset(&tpr->rx_jmb_buffers[0], 0,
6484 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6485 goto done;
6488 /* Zero out all descriptors. */
6489 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6491 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6492 if (tg3_flag(tp, 5780_CLASS) &&
6493 tp->dev->mtu > ETH_DATA_LEN)
6494 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6495 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6497 /* Initialize invariants of the rings, we only set this
6498 * stuff once. This works because the card does not
6499 * write into the rx buffer posting rings.
6501 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6502 struct tg3_rx_buffer_desc *rxd;
6504 rxd = &tpr->rx_std[i];
6505 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6506 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6507 rxd->opaque = (RXD_OPAQUE_RING_STD |
6508 (i << RXD_OPAQUE_INDEX_SHIFT));
6511 /* Now allocate fresh SKBs for each rx ring. */
6512 for (i = 0; i < tp->rx_pending; i++) {
6513 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6514 netdev_warn(tp->dev,
6515 "Using a smaller RX standard ring. Only "
6516 "%d out of %d buffers were allocated "
6517 "successfully\n", i, tp->rx_pending);
6518 if (i == 0)
6519 goto initfail;
6520 tp->rx_pending = i;
6521 break;
6525 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6526 goto done;
6528 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6530 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6531 goto done;
6533 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6534 struct tg3_rx_buffer_desc *rxd;
6536 rxd = &tpr->rx_jmb[i].std;
6537 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6538 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6539 RXD_FLAG_JUMBO;
6540 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6541 (i << RXD_OPAQUE_INDEX_SHIFT));
6544 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6545 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6546 netdev_warn(tp->dev,
6547 "Using a smaller RX jumbo ring. Only %d "
6548 "out of %d buffers were allocated "
6549 "successfully\n", i, tp->rx_jumbo_pending);
6550 if (i == 0)
6551 goto initfail;
6552 tp->rx_jumbo_pending = i;
6553 break;
6557 done:
6558 return 0;
6560 initfail:
6561 tg3_rx_prodring_free(tp, tpr);
6562 return -ENOMEM;
6565 static void tg3_rx_prodring_fini(struct tg3 *tp,
6566 struct tg3_rx_prodring_set *tpr)
6568 kfree(tpr->rx_std_buffers);
6569 tpr->rx_std_buffers = NULL;
6570 kfree(tpr->rx_jmb_buffers);
6571 tpr->rx_jmb_buffers = NULL;
6572 if (tpr->rx_std) {
6573 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6574 tpr->rx_std, tpr->rx_std_mapping);
6575 tpr->rx_std = NULL;
6577 if (tpr->rx_jmb) {
6578 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6579 tpr->rx_jmb, tpr->rx_jmb_mapping);
6580 tpr->rx_jmb = NULL;
6584 static int tg3_rx_prodring_init(struct tg3 *tp,
6585 struct tg3_rx_prodring_set *tpr)
6587 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6588 GFP_KERNEL);
6589 if (!tpr->rx_std_buffers)
6590 return -ENOMEM;
6592 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6593 TG3_RX_STD_RING_BYTES(tp),
6594 &tpr->rx_std_mapping,
6595 GFP_KERNEL);
6596 if (!tpr->rx_std)
6597 goto err_out;
6599 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6600 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6601 GFP_KERNEL);
6602 if (!tpr->rx_jmb_buffers)
6603 goto err_out;
6605 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6606 TG3_RX_JMB_RING_BYTES(tp),
6607 &tpr->rx_jmb_mapping,
6608 GFP_KERNEL);
6609 if (!tpr->rx_jmb)
6610 goto err_out;
6613 return 0;
6615 err_out:
6616 tg3_rx_prodring_fini(tp, tpr);
6617 return -ENOMEM;
6620 /* Free up pending packets in all rx/tx rings.
6622 * The chip has been shut down and the driver detached from
6623 * the networking, so no interrupts or new tx packets will
6624 * end up in the driver. tp->{tx,}lock is not held and we are not
6625 * in an interrupt context and thus may sleep.
6627 static void tg3_free_rings(struct tg3 *tp)
6629 int i, j;
6631 for (j = 0; j < tp->irq_cnt; j++) {
6632 struct tg3_napi *tnapi = &tp->napi[j];
6634 tg3_rx_prodring_free(tp, &tnapi->prodring);
6636 if (!tnapi->tx_buffers)
6637 continue;
6639 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6640 struct ring_info *txp;
6641 struct sk_buff *skb;
6642 unsigned int k;
6644 txp = &tnapi->tx_buffers[i];
6645 skb = txp->skb;
6647 if (skb == NULL) {
6648 i++;
6649 continue;
6652 pci_unmap_single(tp->pdev,
6653 dma_unmap_addr(txp, mapping),
6654 skb_headlen(skb),
6655 PCI_DMA_TODEVICE);
6656 txp->skb = NULL;
6658 i++;
6660 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6661 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6662 pci_unmap_page(tp->pdev,
6663 dma_unmap_addr(txp, mapping),
6664 skb_shinfo(skb)->frags[k].size,
6665 PCI_DMA_TODEVICE);
6666 i++;
6669 dev_kfree_skb_any(skb);
6674 /* Initialize tx/rx rings for packet processing.
6676 * The chip has been shut down and the driver detached from
6677 * the networking, so no interrupts or new tx packets will
6678 * end up in the driver. tp->{tx,}lock are held and thus
6679 * we may not sleep.
6681 static int tg3_init_rings(struct tg3 *tp)
6683 int i;
6685 /* Free up all the SKBs. */
6686 tg3_free_rings(tp);
6688 for (i = 0; i < tp->irq_cnt; i++) {
6689 struct tg3_napi *tnapi = &tp->napi[i];
6691 tnapi->last_tag = 0;
6692 tnapi->last_irq_tag = 0;
6693 tnapi->hw_status->status = 0;
6694 tnapi->hw_status->status_tag = 0;
6695 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6697 tnapi->tx_prod = 0;
6698 tnapi->tx_cons = 0;
6699 if (tnapi->tx_ring)
6700 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6702 tnapi->rx_rcb_ptr = 0;
6703 if (tnapi->rx_rcb)
6704 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6706 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6707 tg3_free_rings(tp);
6708 return -ENOMEM;
6712 return 0;
6716 * Must not be invoked with interrupt sources disabled and
6717 * the hardware shutdown down.
6719 static void tg3_free_consistent(struct tg3 *tp)
6721 int i;
6723 for (i = 0; i < tp->irq_cnt; i++) {
6724 struct tg3_napi *tnapi = &tp->napi[i];
6726 if (tnapi->tx_ring) {
6727 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6728 tnapi->tx_ring, tnapi->tx_desc_mapping);
6729 tnapi->tx_ring = NULL;
6732 kfree(tnapi->tx_buffers);
6733 tnapi->tx_buffers = NULL;
6735 if (tnapi->rx_rcb) {
6736 dma_free_coherent(&tp->pdev->dev,
6737 TG3_RX_RCB_RING_BYTES(tp),
6738 tnapi->rx_rcb,
6739 tnapi->rx_rcb_mapping);
6740 tnapi->rx_rcb = NULL;
6743 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6745 if (tnapi->hw_status) {
6746 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6747 tnapi->hw_status,
6748 tnapi->status_mapping);
6749 tnapi->hw_status = NULL;
6753 if (tp->hw_stats) {
6754 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6755 tp->hw_stats, tp->stats_mapping);
6756 tp->hw_stats = NULL;
6761 * Must not be invoked with interrupt sources disabled and
6762 * the hardware shutdown down. Can sleep.
6764 static int tg3_alloc_consistent(struct tg3 *tp)
6766 int i;
6768 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6769 sizeof(struct tg3_hw_stats),
6770 &tp->stats_mapping,
6771 GFP_KERNEL);
6772 if (!tp->hw_stats)
6773 goto err_out;
6775 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6777 for (i = 0; i < tp->irq_cnt; i++) {
6778 struct tg3_napi *tnapi = &tp->napi[i];
6779 struct tg3_hw_status *sblk;
6781 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6782 TG3_HW_STATUS_SIZE,
6783 &tnapi->status_mapping,
6784 GFP_KERNEL);
6785 if (!tnapi->hw_status)
6786 goto err_out;
6788 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6789 sblk = tnapi->hw_status;
6791 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6792 goto err_out;
6794 /* If multivector TSS is enabled, vector 0 does not handle
6795 * tx interrupts. Don't allocate any resources for it.
6797 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6798 (i && tg3_flag(tp, ENABLE_TSS))) {
6799 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6800 TG3_TX_RING_SIZE,
6801 GFP_KERNEL);
6802 if (!tnapi->tx_buffers)
6803 goto err_out;
6805 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6806 TG3_TX_RING_BYTES,
6807 &tnapi->tx_desc_mapping,
6808 GFP_KERNEL);
6809 if (!tnapi->tx_ring)
6810 goto err_out;
6814 * When RSS is enabled, the status block format changes
6815 * slightly. The "rx_jumbo_consumer", "reserved",
6816 * and "rx_mini_consumer" members get mapped to the
6817 * other three rx return ring producer indexes.
6819 switch (i) {
6820 default:
6821 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6822 break;
6823 case 2:
6824 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6825 break;
6826 case 3:
6827 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6828 break;
6829 case 4:
6830 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6831 break;
6835 * If multivector RSS is enabled, vector 0 does not handle
6836 * rx or tx interrupts. Don't allocate any resources for it.
6838 if (!i && tg3_flag(tp, ENABLE_RSS))
6839 continue;
6841 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6842 TG3_RX_RCB_RING_BYTES(tp),
6843 &tnapi->rx_rcb_mapping,
6844 GFP_KERNEL);
6845 if (!tnapi->rx_rcb)
6846 goto err_out;
6848 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6851 return 0;
6853 err_out:
6854 tg3_free_consistent(tp);
6855 return -ENOMEM;
6858 #define MAX_WAIT_CNT 1000
6860 /* To stop a block, clear the enable bit and poll till it
6861 * clears. tp->lock is held.
6863 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6865 unsigned int i;
6866 u32 val;
6868 if (tg3_flag(tp, 5705_PLUS)) {
6869 switch (ofs) {
6870 case RCVLSC_MODE:
6871 case DMAC_MODE:
6872 case MBFREE_MODE:
6873 case BUFMGR_MODE:
6874 case MEMARB_MODE:
6875 /* We can't enable/disable these bits of the
6876 * 5705/5750, just say success.
6878 return 0;
6880 default:
6881 break;
6885 val = tr32(ofs);
6886 val &= ~enable_bit;
6887 tw32_f(ofs, val);
6889 for (i = 0; i < MAX_WAIT_CNT; i++) {
6890 udelay(100);
6891 val = tr32(ofs);
6892 if ((val & enable_bit) == 0)
6893 break;
6896 if (i == MAX_WAIT_CNT && !silent) {
6897 dev_err(&tp->pdev->dev,
6898 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6899 ofs, enable_bit);
6900 return -ENODEV;
6903 return 0;
6906 /* tp->lock is held. */
6907 static int tg3_abort_hw(struct tg3 *tp, int silent)
6909 int i, err;
6911 tg3_disable_ints(tp);
6913 tp->rx_mode &= ~RX_MODE_ENABLE;
6914 tw32_f(MAC_RX_MODE, tp->rx_mode);
6915 udelay(10);
6917 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6918 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6919 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6920 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6921 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6922 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6924 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6925 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6926 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6927 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6928 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6929 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6930 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6932 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6933 tw32_f(MAC_MODE, tp->mac_mode);
6934 udelay(40);
6936 tp->tx_mode &= ~TX_MODE_ENABLE;
6937 tw32_f(MAC_TX_MODE, tp->tx_mode);
6939 for (i = 0; i < MAX_WAIT_CNT; i++) {
6940 udelay(100);
6941 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6942 break;
6944 if (i >= MAX_WAIT_CNT) {
6945 dev_err(&tp->pdev->dev,
6946 "%s timed out, TX_MODE_ENABLE will not clear "
6947 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6948 err |= -ENODEV;
6951 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6952 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6953 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6955 tw32(FTQ_RESET, 0xffffffff);
6956 tw32(FTQ_RESET, 0x00000000);
6958 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6959 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6961 for (i = 0; i < tp->irq_cnt; i++) {
6962 struct tg3_napi *tnapi = &tp->napi[i];
6963 if (tnapi->hw_status)
6964 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6966 if (tp->hw_stats)
6967 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6969 return err;
6972 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6974 int i;
6975 u32 apedata;
6977 /* NCSI does not support APE events */
6978 if (tg3_flag(tp, APE_HAS_NCSI))
6979 return;
6981 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6982 if (apedata != APE_SEG_SIG_MAGIC)
6983 return;
6985 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6986 if (!(apedata & APE_FW_STATUS_READY))
6987 return;
6989 /* Wait for up to 1 millisecond for APE to service previous event. */
6990 for (i = 0; i < 10; i++) {
6991 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6992 return;
6994 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6996 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6997 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6998 event | APE_EVENT_STATUS_EVENT_PENDING);
7000 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7002 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7003 break;
7005 udelay(100);
7008 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7009 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7012 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7014 u32 event;
7015 u32 apedata;
7017 if (!tg3_flag(tp, ENABLE_APE))
7018 return;
7020 switch (kind) {
7021 case RESET_KIND_INIT:
7022 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7023 APE_HOST_SEG_SIG_MAGIC);
7024 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7025 APE_HOST_SEG_LEN_MAGIC);
7026 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7027 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7028 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7029 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7030 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7031 APE_HOST_BEHAV_NO_PHYLOCK);
7032 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7033 TG3_APE_HOST_DRVR_STATE_START);
7035 event = APE_EVENT_STATUS_STATE_START;
7036 break;
7037 case RESET_KIND_SHUTDOWN:
7038 /* With the interface we are currently using,
7039 * APE does not track driver state. Wiping
7040 * out the HOST SEGMENT SIGNATURE forces
7041 * the APE to assume OS absent status.
7043 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7045 if (device_may_wakeup(&tp->pdev->dev) &&
7046 tg3_flag(tp, WOL_ENABLE)) {
7047 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7048 TG3_APE_HOST_WOL_SPEED_AUTO);
7049 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7050 } else
7051 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7053 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7055 event = APE_EVENT_STATUS_STATE_UNLOAD;
7056 break;
7057 case RESET_KIND_SUSPEND:
7058 event = APE_EVENT_STATUS_STATE_SUSPEND;
7059 break;
7060 default:
7061 return;
7064 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7066 tg3_ape_send_event(tp, event);
7069 /* tp->lock is held. */
7070 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7072 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7073 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7075 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7076 switch (kind) {
7077 case RESET_KIND_INIT:
7078 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7079 DRV_STATE_START);
7080 break;
7082 case RESET_KIND_SHUTDOWN:
7083 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7084 DRV_STATE_UNLOAD);
7085 break;
7087 case RESET_KIND_SUSPEND:
7088 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7089 DRV_STATE_SUSPEND);
7090 break;
7092 default:
7093 break;
7097 if (kind == RESET_KIND_INIT ||
7098 kind == RESET_KIND_SUSPEND)
7099 tg3_ape_driver_state_change(tp, kind);
7102 /* tp->lock is held. */
7103 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7105 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7106 switch (kind) {
7107 case RESET_KIND_INIT:
7108 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7109 DRV_STATE_START_DONE);
7110 break;
7112 case RESET_KIND_SHUTDOWN:
7113 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7114 DRV_STATE_UNLOAD_DONE);
7115 break;
7117 default:
7118 break;
7122 if (kind == RESET_KIND_SHUTDOWN)
7123 tg3_ape_driver_state_change(tp, kind);
7126 /* tp->lock is held. */
7127 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7129 if (tg3_flag(tp, ENABLE_ASF)) {
7130 switch (kind) {
7131 case RESET_KIND_INIT:
7132 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7133 DRV_STATE_START);
7134 break;
7136 case RESET_KIND_SHUTDOWN:
7137 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7138 DRV_STATE_UNLOAD);
7139 break;
7141 case RESET_KIND_SUSPEND:
7142 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7143 DRV_STATE_SUSPEND);
7144 break;
7146 default:
7147 break;
7152 static int tg3_poll_fw(struct tg3 *tp)
7154 int i;
7155 u32 val;
7157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7158 /* Wait up to 20ms for init done. */
7159 for (i = 0; i < 200; i++) {
7160 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7161 return 0;
7162 udelay(100);
7164 return -ENODEV;
7167 /* Wait for firmware initialization to complete. */
7168 for (i = 0; i < 100000; i++) {
7169 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7170 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7171 break;
7172 udelay(10);
7175 /* Chip might not be fitted with firmware. Some Sun onboard
7176 * parts are configured like that. So don't signal the timeout
7177 * of the above loop as an error, but do report the lack of
7178 * running firmware once.
7180 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7181 tg3_flag_set(tp, NO_FWARE_REPORTED);
7183 netdev_info(tp->dev, "No firmware running\n");
7186 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7187 /* The 57765 A0 needs a little more
7188 * time to do some important work.
7190 mdelay(10);
7193 return 0;
7196 /* Save PCI command register before chip reset */
7197 static void tg3_save_pci_state(struct tg3 *tp)
7199 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7202 /* Restore PCI state after chip reset */
7203 static void tg3_restore_pci_state(struct tg3 *tp)
7205 u32 val;
7207 /* Re-enable indirect register accesses. */
7208 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7209 tp->misc_host_ctrl);
7211 /* Set MAX PCI retry to zero. */
7212 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7213 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7214 tg3_flag(tp, PCIX_MODE))
7215 val |= PCISTATE_RETRY_SAME_DMA;
7216 /* Allow reads and writes to the APE register and memory space. */
7217 if (tg3_flag(tp, ENABLE_APE))
7218 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7219 PCISTATE_ALLOW_APE_SHMEM_WR |
7220 PCISTATE_ALLOW_APE_PSPACE_WR;
7221 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7223 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7225 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7226 if (tg3_flag(tp, PCI_EXPRESS))
7227 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7228 else {
7229 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7230 tp->pci_cacheline_sz);
7231 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7232 tp->pci_lat_timer);
7236 /* Make sure PCI-X relaxed ordering bit is clear. */
7237 if (tg3_flag(tp, PCIX_MODE)) {
7238 u16 pcix_cmd;
7240 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7241 &pcix_cmd);
7242 pcix_cmd &= ~PCI_X_CMD_ERO;
7243 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7244 pcix_cmd);
7247 if (tg3_flag(tp, 5780_CLASS)) {
7249 /* Chip reset on 5780 will reset MSI enable bit,
7250 * so need to restore it.
7252 if (tg3_flag(tp, USING_MSI)) {
7253 u16 ctrl;
7255 pci_read_config_word(tp->pdev,
7256 tp->msi_cap + PCI_MSI_FLAGS,
7257 &ctrl);
7258 pci_write_config_word(tp->pdev,
7259 tp->msi_cap + PCI_MSI_FLAGS,
7260 ctrl | PCI_MSI_FLAGS_ENABLE);
7261 val = tr32(MSGINT_MODE);
7262 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7267 static void tg3_stop_fw(struct tg3 *);
7269 /* tp->lock is held. */
7270 static int tg3_chip_reset(struct tg3 *tp)
7272 u32 val;
7273 void (*write_op)(struct tg3 *, u32, u32);
7274 int i, err;
7276 tg3_nvram_lock(tp);
7278 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7280 /* No matching tg3_nvram_unlock() after this because
7281 * chip reset below will undo the nvram lock.
7283 tp->nvram_lock_cnt = 0;
7285 /* GRC_MISC_CFG core clock reset will clear the memory
7286 * enable bit in PCI register 4 and the MSI enable bit
7287 * on some chips, so we save relevant registers here.
7289 tg3_save_pci_state(tp);
7291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7292 tg3_flag(tp, 5755_PLUS))
7293 tw32(GRC_FASTBOOT_PC, 0);
7296 * We must avoid the readl() that normally takes place.
7297 * It locks machines, causes machine checks, and other
7298 * fun things. So, temporarily disable the 5701
7299 * hardware workaround, while we do the reset.
7301 write_op = tp->write32;
7302 if (write_op == tg3_write_flush_reg32)
7303 tp->write32 = tg3_write32;
7305 /* Prevent the irq handler from reading or writing PCI registers
7306 * during chip reset when the memory enable bit in the PCI command
7307 * register may be cleared. The chip does not generate interrupt
7308 * at this time, but the irq handler may still be called due to irq
7309 * sharing or irqpoll.
7311 tg3_flag_set(tp, CHIP_RESETTING);
7312 for (i = 0; i < tp->irq_cnt; i++) {
7313 struct tg3_napi *tnapi = &tp->napi[i];
7314 if (tnapi->hw_status) {
7315 tnapi->hw_status->status = 0;
7316 tnapi->hw_status->status_tag = 0;
7318 tnapi->last_tag = 0;
7319 tnapi->last_irq_tag = 0;
7321 smp_mb();
7323 for (i = 0; i < tp->irq_cnt; i++)
7324 synchronize_irq(tp->napi[i].irq_vec);
7326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7327 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7328 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7331 /* do the reset */
7332 val = GRC_MISC_CFG_CORECLK_RESET;
7334 if (tg3_flag(tp, PCI_EXPRESS)) {
7335 /* Force PCIe 1.0a mode */
7336 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7337 !tg3_flag(tp, 57765_PLUS) &&
7338 tr32(TG3_PCIE_PHY_TSTCTL) ==
7339 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7340 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7342 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7343 tw32(GRC_MISC_CFG, (1 << 29));
7344 val |= (1 << 29);
7348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7349 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7350 tw32(GRC_VCPU_EXT_CTRL,
7351 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7354 /* Manage gphy power for all CPMU absent PCIe devices. */
7355 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7356 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7358 tw32(GRC_MISC_CFG, val);
7360 /* restore 5701 hardware bug workaround write method */
7361 tp->write32 = write_op;
7363 /* Unfortunately, we have to delay before the PCI read back.
7364 * Some 575X chips even will not respond to a PCI cfg access
7365 * when the reset command is given to the chip.
7367 * How do these hardware designers expect things to work
7368 * properly if the PCI write is posted for a long period
7369 * of time? It is always necessary to have some method by
7370 * which a register read back can occur to push the write
7371 * out which does the reset.
7373 * For most tg3 variants the trick below was working.
7374 * Ho hum...
7376 udelay(120);
7378 /* Flush PCI posted writes. The normal MMIO registers
7379 * are inaccessible at this time so this is the only
7380 * way to make this reliably (actually, this is no longer
7381 * the case, see above). I tried to use indirect
7382 * register read/write but this upset some 5701 variants.
7384 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7386 udelay(120);
7388 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7389 u16 val16;
7391 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7392 int i;
7393 u32 cfg_val;
7395 /* Wait for link training to complete. */
7396 for (i = 0; i < 5000; i++)
7397 udelay(100);
7399 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7400 pci_write_config_dword(tp->pdev, 0xc4,
7401 cfg_val | (1 << 15));
7404 /* Clear the "no snoop" and "relaxed ordering" bits. */
7405 pci_read_config_word(tp->pdev,
7406 tp->pcie_cap + PCI_EXP_DEVCTL,
7407 &val16);
7408 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7409 PCI_EXP_DEVCTL_NOSNOOP_EN);
7411 * Older PCIe devices only support the 128 byte
7412 * MPS setting. Enforce the restriction.
7414 if (!tg3_flag(tp, CPMU_PRESENT))
7415 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7416 pci_write_config_word(tp->pdev,
7417 tp->pcie_cap + PCI_EXP_DEVCTL,
7418 val16);
7420 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7422 /* Clear error status */
7423 pci_write_config_word(tp->pdev,
7424 tp->pcie_cap + PCI_EXP_DEVSTA,
7425 PCI_EXP_DEVSTA_CED |
7426 PCI_EXP_DEVSTA_NFED |
7427 PCI_EXP_DEVSTA_FED |
7428 PCI_EXP_DEVSTA_URD);
7431 tg3_restore_pci_state(tp);
7433 tg3_flag_clear(tp, CHIP_RESETTING);
7434 tg3_flag_clear(tp, ERROR_PROCESSED);
7436 val = 0;
7437 if (tg3_flag(tp, 5780_CLASS))
7438 val = tr32(MEMARB_MODE);
7439 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7441 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7442 tg3_stop_fw(tp);
7443 tw32(0x5000, 0x400);
7446 tw32(GRC_MODE, tp->grc_mode);
7448 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7449 val = tr32(0xc4);
7451 tw32(0xc4, val | (1 << 15));
7454 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7456 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7457 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7458 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7459 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7462 if (tg3_flag(tp, ENABLE_APE))
7463 tp->mac_mode = MAC_MODE_APE_TX_EN |
7464 MAC_MODE_APE_RX_EN |
7465 MAC_MODE_TDE_ENABLE;
7467 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7468 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7469 val = tp->mac_mode;
7470 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7471 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7472 val = tp->mac_mode;
7473 } else
7474 val = 0;
7476 tw32_f(MAC_MODE, val);
7477 udelay(40);
7479 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7481 err = tg3_poll_fw(tp);
7482 if (err)
7483 return err;
7485 tg3_mdio_start(tp);
7487 if (tg3_flag(tp, PCI_EXPRESS) &&
7488 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7489 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7490 !tg3_flag(tp, 57765_PLUS)) {
7491 val = tr32(0x7c00);
7493 tw32(0x7c00, val | (1 << 25));
7496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7497 val = tr32(TG3_CPMU_CLCK_ORIDE);
7498 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7501 /* Reprobe ASF enable state. */
7502 tg3_flag_clear(tp, ENABLE_ASF);
7503 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7504 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7505 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7506 u32 nic_cfg;
7508 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7509 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7510 tg3_flag_set(tp, ENABLE_ASF);
7511 tp->last_event_jiffies = jiffies;
7512 if (tg3_flag(tp, 5750_PLUS))
7513 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7517 return 0;
7520 /* tp->lock is held. */
7521 static void tg3_stop_fw(struct tg3 *tp)
7523 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7524 /* Wait for RX cpu to ACK the previous event. */
7525 tg3_wait_for_event_ack(tp);
7527 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7529 tg3_generate_fw_event(tp);
7531 /* Wait for RX cpu to ACK this event. */
7532 tg3_wait_for_event_ack(tp);
7536 /* tp->lock is held. */
7537 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7539 int err;
7541 tg3_stop_fw(tp);
7543 tg3_write_sig_pre_reset(tp, kind);
7545 tg3_abort_hw(tp, silent);
7546 err = tg3_chip_reset(tp);
7548 __tg3_set_mac_addr(tp, 0);
7550 tg3_write_sig_legacy(tp, kind);
7551 tg3_write_sig_post_reset(tp, kind);
7553 if (err)
7554 return err;
7556 return 0;
7559 #define RX_CPU_SCRATCH_BASE 0x30000
7560 #define RX_CPU_SCRATCH_SIZE 0x04000
7561 #define TX_CPU_SCRATCH_BASE 0x34000
7562 #define TX_CPU_SCRATCH_SIZE 0x04000
7564 /* tp->lock is held. */
7565 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7567 int i;
7569 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7572 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7574 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7575 return 0;
7577 if (offset == RX_CPU_BASE) {
7578 for (i = 0; i < 10000; i++) {
7579 tw32(offset + CPU_STATE, 0xffffffff);
7580 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7581 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7582 break;
7585 tw32(offset + CPU_STATE, 0xffffffff);
7586 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7587 udelay(10);
7588 } else {
7589 for (i = 0; i < 10000; i++) {
7590 tw32(offset + CPU_STATE, 0xffffffff);
7591 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7592 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7593 break;
7597 if (i >= 10000) {
7598 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7599 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7600 return -ENODEV;
7603 /* Clear firmware's nvram arbitration. */
7604 if (tg3_flag(tp, NVRAM))
7605 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7606 return 0;
7609 struct fw_info {
7610 unsigned int fw_base;
7611 unsigned int fw_len;
7612 const __be32 *fw_data;
7615 /* tp->lock is held. */
7616 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7617 int cpu_scratch_size, struct fw_info *info)
7619 int err, lock_err, i;
7620 void (*write_op)(struct tg3 *, u32, u32);
7622 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7623 netdev_err(tp->dev,
7624 "%s: Trying to load TX cpu firmware which is 5705\n",
7625 __func__);
7626 return -EINVAL;
7629 if (tg3_flag(tp, 5705_PLUS))
7630 write_op = tg3_write_mem;
7631 else
7632 write_op = tg3_write_indirect_reg32;
7634 /* It is possible that bootcode is still loading at this point.
7635 * Get the nvram lock first before halting the cpu.
7637 lock_err = tg3_nvram_lock(tp);
7638 err = tg3_halt_cpu(tp, cpu_base);
7639 if (!lock_err)
7640 tg3_nvram_unlock(tp);
7641 if (err)
7642 goto out;
7644 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7645 write_op(tp, cpu_scratch_base + i, 0);
7646 tw32(cpu_base + CPU_STATE, 0xffffffff);
7647 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7648 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7649 write_op(tp, (cpu_scratch_base +
7650 (info->fw_base & 0xffff) +
7651 (i * sizeof(u32))),
7652 be32_to_cpu(info->fw_data[i]));
7654 err = 0;
7656 out:
7657 return err;
7660 /* tp->lock is held. */
7661 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7663 struct fw_info info;
7664 const __be32 *fw_data;
7665 int err, i;
7667 fw_data = (void *)tp->fw->data;
7669 /* Firmware blob starts with version numbers, followed by
7670 start address and length. We are setting complete length.
7671 length = end_address_of_bss - start_address_of_text.
7672 Remainder is the blob to be loaded contiguously
7673 from start address. */
7675 info.fw_base = be32_to_cpu(fw_data[1]);
7676 info.fw_len = tp->fw->size - 12;
7677 info.fw_data = &fw_data[3];
7679 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7680 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7681 &info);
7682 if (err)
7683 return err;
7685 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7686 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7687 &info);
7688 if (err)
7689 return err;
7691 /* Now startup only the RX cpu. */
7692 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7693 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7695 for (i = 0; i < 5; i++) {
7696 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7697 break;
7698 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7699 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7700 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7701 udelay(1000);
7703 if (i >= 5) {
7704 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7705 "should be %08x\n", __func__,
7706 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7707 return -ENODEV;
7709 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7710 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7712 return 0;
7715 /* tp->lock is held. */
7716 static int tg3_load_tso_firmware(struct tg3 *tp)
7718 struct fw_info info;
7719 const __be32 *fw_data;
7720 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7721 int err, i;
7723 if (tg3_flag(tp, HW_TSO_1) ||
7724 tg3_flag(tp, HW_TSO_2) ||
7725 tg3_flag(tp, HW_TSO_3))
7726 return 0;
7728 fw_data = (void *)tp->fw->data;
7730 /* Firmware blob starts with version numbers, followed by
7731 start address and length. We are setting complete length.
7732 length = end_address_of_bss - start_address_of_text.
7733 Remainder is the blob to be loaded contiguously
7734 from start address. */
7736 info.fw_base = be32_to_cpu(fw_data[1]);
7737 cpu_scratch_size = tp->fw_len;
7738 info.fw_len = tp->fw->size - 12;
7739 info.fw_data = &fw_data[3];
7741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7742 cpu_base = RX_CPU_BASE;
7743 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7744 } else {
7745 cpu_base = TX_CPU_BASE;
7746 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7747 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7750 err = tg3_load_firmware_cpu(tp, cpu_base,
7751 cpu_scratch_base, cpu_scratch_size,
7752 &info);
7753 if (err)
7754 return err;
7756 /* Now startup the cpu. */
7757 tw32(cpu_base + CPU_STATE, 0xffffffff);
7758 tw32_f(cpu_base + CPU_PC, info.fw_base);
7760 for (i = 0; i < 5; i++) {
7761 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7762 break;
7763 tw32(cpu_base + CPU_STATE, 0xffffffff);
7764 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7765 tw32_f(cpu_base + CPU_PC, info.fw_base);
7766 udelay(1000);
7768 if (i >= 5) {
7769 netdev_err(tp->dev,
7770 "%s fails to set CPU PC, is %08x should be %08x\n",
7771 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7772 return -ENODEV;
7774 tw32(cpu_base + CPU_STATE, 0xffffffff);
7775 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7776 return 0;
7780 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7782 struct tg3 *tp = netdev_priv(dev);
7783 struct sockaddr *addr = p;
7784 int err = 0, skip_mac_1 = 0;
7786 if (!is_valid_ether_addr(addr->sa_data))
7787 return -EINVAL;
7789 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7791 if (!netif_running(dev))
7792 return 0;
7794 if (tg3_flag(tp, ENABLE_ASF)) {
7795 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7797 addr0_high = tr32(MAC_ADDR_0_HIGH);
7798 addr0_low = tr32(MAC_ADDR_0_LOW);
7799 addr1_high = tr32(MAC_ADDR_1_HIGH);
7800 addr1_low = tr32(MAC_ADDR_1_LOW);
7802 /* Skip MAC addr 1 if ASF is using it. */
7803 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7804 !(addr1_high == 0 && addr1_low == 0))
7805 skip_mac_1 = 1;
7807 spin_lock_bh(&tp->lock);
7808 __tg3_set_mac_addr(tp, skip_mac_1);
7809 spin_unlock_bh(&tp->lock);
7811 return err;
7814 /* tp->lock is held. */
7815 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7816 dma_addr_t mapping, u32 maxlen_flags,
7817 u32 nic_addr)
7819 tg3_write_mem(tp,
7820 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7821 ((u64) mapping >> 32));
7822 tg3_write_mem(tp,
7823 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7824 ((u64) mapping & 0xffffffff));
7825 tg3_write_mem(tp,
7826 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7827 maxlen_flags);
7829 if (!tg3_flag(tp, 5705_PLUS))
7830 tg3_write_mem(tp,
7831 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7832 nic_addr);
7835 static void __tg3_set_rx_mode(struct net_device *);
7836 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7838 int i;
7840 if (!tg3_flag(tp, ENABLE_TSS)) {
7841 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7842 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7843 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7844 } else {
7845 tw32(HOSTCC_TXCOL_TICKS, 0);
7846 tw32(HOSTCC_TXMAX_FRAMES, 0);
7847 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7850 if (!tg3_flag(tp, ENABLE_RSS)) {
7851 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7852 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7853 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7854 } else {
7855 tw32(HOSTCC_RXCOL_TICKS, 0);
7856 tw32(HOSTCC_RXMAX_FRAMES, 0);
7857 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7860 if (!tg3_flag(tp, 5705_PLUS)) {
7861 u32 val = ec->stats_block_coalesce_usecs;
7863 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7864 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7866 if (!netif_carrier_ok(tp->dev))
7867 val = 0;
7869 tw32(HOSTCC_STAT_COAL_TICKS, val);
7872 for (i = 0; i < tp->irq_cnt - 1; i++) {
7873 u32 reg;
7875 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7876 tw32(reg, ec->rx_coalesce_usecs);
7877 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7878 tw32(reg, ec->rx_max_coalesced_frames);
7879 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7880 tw32(reg, ec->rx_max_coalesced_frames_irq);
7882 if (tg3_flag(tp, ENABLE_TSS)) {
7883 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7884 tw32(reg, ec->tx_coalesce_usecs);
7885 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7886 tw32(reg, ec->tx_max_coalesced_frames);
7887 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7888 tw32(reg, ec->tx_max_coalesced_frames_irq);
7892 for (; i < tp->irq_max - 1; i++) {
7893 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7894 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7895 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7897 if (tg3_flag(tp, ENABLE_TSS)) {
7898 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7899 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7900 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7905 /* tp->lock is held. */
7906 static void tg3_rings_reset(struct tg3 *tp)
7908 int i;
7909 u32 stblk, txrcb, rxrcb, limit;
7910 struct tg3_napi *tnapi = &tp->napi[0];
7912 /* Disable all transmit rings but the first. */
7913 if (!tg3_flag(tp, 5705_PLUS))
7914 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7915 else if (tg3_flag(tp, 5717_PLUS))
7916 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7917 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7918 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7919 else
7920 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7922 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7923 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7924 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7925 BDINFO_FLAGS_DISABLED);
7928 /* Disable all receive return rings but the first. */
7929 if (tg3_flag(tp, 5717_PLUS))
7930 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7931 else if (!tg3_flag(tp, 5705_PLUS))
7932 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7933 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7935 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7936 else
7937 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7939 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7940 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7941 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7942 BDINFO_FLAGS_DISABLED);
7944 /* Disable interrupts */
7945 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7947 /* Zero mailbox registers. */
7948 if (tg3_flag(tp, SUPPORT_MSIX)) {
7949 for (i = 1; i < tp->irq_max; i++) {
7950 tp->napi[i].tx_prod = 0;
7951 tp->napi[i].tx_cons = 0;
7952 if (tg3_flag(tp, ENABLE_TSS))
7953 tw32_mailbox(tp->napi[i].prodmbox, 0);
7954 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7955 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7957 if (!tg3_flag(tp, ENABLE_TSS))
7958 tw32_mailbox(tp->napi[0].prodmbox, 0);
7959 } else {
7960 tp->napi[0].tx_prod = 0;
7961 tp->napi[0].tx_cons = 0;
7962 tw32_mailbox(tp->napi[0].prodmbox, 0);
7963 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7966 /* Make sure the NIC-based send BD rings are disabled. */
7967 if (!tg3_flag(tp, 5705_PLUS)) {
7968 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7969 for (i = 0; i < 16; i++)
7970 tw32_tx_mbox(mbox + i * 8, 0);
7973 txrcb = NIC_SRAM_SEND_RCB;
7974 rxrcb = NIC_SRAM_RCV_RET_RCB;
7976 /* Clear status block in ram. */
7977 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7979 /* Set status block DMA address */
7980 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7981 ((u64) tnapi->status_mapping >> 32));
7982 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7983 ((u64) tnapi->status_mapping & 0xffffffff));
7985 if (tnapi->tx_ring) {
7986 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7987 (TG3_TX_RING_SIZE <<
7988 BDINFO_FLAGS_MAXLEN_SHIFT),
7989 NIC_SRAM_TX_BUFFER_DESC);
7990 txrcb += TG3_BDINFO_SIZE;
7993 if (tnapi->rx_rcb) {
7994 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7995 (tp->rx_ret_ring_mask + 1) <<
7996 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7997 rxrcb += TG3_BDINFO_SIZE;
8000 stblk = HOSTCC_STATBLCK_RING1;
8002 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8003 u64 mapping = (u64)tnapi->status_mapping;
8004 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8005 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8007 /* Clear status block in ram. */
8008 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8010 if (tnapi->tx_ring) {
8011 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8012 (TG3_TX_RING_SIZE <<
8013 BDINFO_FLAGS_MAXLEN_SHIFT),
8014 NIC_SRAM_TX_BUFFER_DESC);
8015 txrcb += TG3_BDINFO_SIZE;
8018 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8019 ((tp->rx_ret_ring_mask + 1) <<
8020 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8022 stblk += 8;
8023 rxrcb += TG3_BDINFO_SIZE;
8027 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8029 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8031 if (!tg3_flag(tp, 5750_PLUS) ||
8032 tg3_flag(tp, 5780_CLASS) ||
8033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8035 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8036 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8038 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8039 else
8040 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8042 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8043 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8045 val = min(nic_rep_thresh, host_rep_thresh);
8046 tw32(RCVBDI_STD_THRESH, val);
8048 if (tg3_flag(tp, 57765_PLUS))
8049 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8051 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8052 return;
8054 if (!tg3_flag(tp, 5705_PLUS))
8055 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8056 else
8057 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8059 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8061 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8062 tw32(RCVBDI_JUMBO_THRESH, val);
8064 if (tg3_flag(tp, 57765_PLUS))
8065 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8068 /* tp->lock is held. */
8069 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8071 u32 val, rdmac_mode;
8072 int i, err, limit;
8073 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8075 tg3_disable_ints(tp);
8077 tg3_stop_fw(tp);
8079 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8081 if (tg3_flag(tp, INIT_COMPLETE))
8082 tg3_abort_hw(tp, 1);
8084 /* Enable MAC control of LPI */
8085 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8086 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8087 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8088 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8090 tw32_f(TG3_CPMU_EEE_CTRL,
8091 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8093 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8094 TG3_CPMU_EEEMD_LPI_IN_TX |
8095 TG3_CPMU_EEEMD_LPI_IN_RX |
8096 TG3_CPMU_EEEMD_EEE_ENABLE;
8098 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8099 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8101 if (tg3_flag(tp, ENABLE_APE))
8102 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8104 tw32_f(TG3_CPMU_EEE_MODE, val);
8106 tw32_f(TG3_CPMU_EEE_DBTMR1,
8107 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8108 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8110 tw32_f(TG3_CPMU_EEE_DBTMR2,
8111 TG3_CPMU_DBTMR2_APE_TX_2047US |
8112 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8115 if (reset_phy)
8116 tg3_phy_reset(tp);
8118 err = tg3_chip_reset(tp);
8119 if (err)
8120 return err;
8122 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8124 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8125 val = tr32(TG3_CPMU_CTRL);
8126 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8127 tw32(TG3_CPMU_CTRL, val);
8129 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8130 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8131 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8132 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8134 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8135 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8136 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8137 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8139 val = tr32(TG3_CPMU_HST_ACC);
8140 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8141 val |= CPMU_HST_ACC_MACCLK_6_25;
8142 tw32(TG3_CPMU_HST_ACC, val);
8145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8146 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8147 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8148 PCIE_PWR_MGMT_L1_THRESH_4MS;
8149 tw32(PCIE_PWR_MGMT_THRESH, val);
8151 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8152 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8154 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8156 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8157 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8160 if (tg3_flag(tp, L1PLLPD_EN)) {
8161 u32 grc_mode = tr32(GRC_MODE);
8163 /* Access the lower 1K of PL PCIE block registers. */
8164 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8165 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8167 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8168 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8169 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8171 tw32(GRC_MODE, grc_mode);
8174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8175 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8176 u32 grc_mode = tr32(GRC_MODE);
8178 /* Access the lower 1K of PL PCIE block registers. */
8179 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8180 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8182 val = tr32(TG3_PCIE_TLDLPL_PORT +
8183 TG3_PCIE_PL_LO_PHYCTL5);
8184 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8185 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8187 tw32(GRC_MODE, grc_mode);
8190 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8191 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8192 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8193 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8196 /* This works around an issue with Athlon chipsets on
8197 * B3 tigon3 silicon. This bit has no effect on any
8198 * other revision. But do not set this on PCI Express
8199 * chips and don't even touch the clocks if the CPMU is present.
8201 if (!tg3_flag(tp, CPMU_PRESENT)) {
8202 if (!tg3_flag(tp, PCI_EXPRESS))
8203 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8204 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8207 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8208 tg3_flag(tp, PCIX_MODE)) {
8209 val = tr32(TG3PCI_PCISTATE);
8210 val |= PCISTATE_RETRY_SAME_DMA;
8211 tw32(TG3PCI_PCISTATE, val);
8214 if (tg3_flag(tp, ENABLE_APE)) {
8215 /* Allow reads and writes to the
8216 * APE register and memory space.
8218 val = tr32(TG3PCI_PCISTATE);
8219 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8220 PCISTATE_ALLOW_APE_SHMEM_WR |
8221 PCISTATE_ALLOW_APE_PSPACE_WR;
8222 tw32(TG3PCI_PCISTATE, val);
8225 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8226 /* Enable some hw fixes. */
8227 val = tr32(TG3PCI_MSI_DATA);
8228 val |= (1 << 26) | (1 << 28) | (1 << 29);
8229 tw32(TG3PCI_MSI_DATA, val);
8232 /* Descriptor ring init may make accesses to the
8233 * NIC SRAM area to setup the TX descriptors, so we
8234 * can only do this after the hardware has been
8235 * successfully reset.
8237 err = tg3_init_rings(tp);
8238 if (err)
8239 return err;
8241 if (tg3_flag(tp, 57765_PLUS)) {
8242 val = tr32(TG3PCI_DMA_RW_CTRL) &
8243 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8244 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8245 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8246 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8248 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8249 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8250 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8251 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8252 /* This value is determined during the probe time DMA
8253 * engine test, tg3_test_dma.
8255 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8258 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8259 GRC_MODE_4X_NIC_SEND_RINGS |
8260 GRC_MODE_NO_TX_PHDR_CSUM |
8261 GRC_MODE_NO_RX_PHDR_CSUM);
8262 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8264 /* Pseudo-header checksum is done by hardware logic and not
8265 * the offload processers, so make the chip do the pseudo-
8266 * header checksums on receive. For transmit it is more
8267 * convenient to do the pseudo-header checksum in software
8268 * as Linux does that on transmit for us in all cases.
8270 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8272 tw32(GRC_MODE,
8273 tp->grc_mode |
8274 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8276 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8277 val = tr32(GRC_MISC_CFG);
8278 val &= ~0xff;
8279 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8280 tw32(GRC_MISC_CFG, val);
8282 /* Initialize MBUF/DESC pool. */
8283 if (tg3_flag(tp, 5750_PLUS)) {
8284 /* Do nothing. */
8285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8286 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8288 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8289 else
8290 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8291 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8292 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8293 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8294 int fw_len;
8296 fw_len = tp->fw_len;
8297 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8298 tw32(BUFMGR_MB_POOL_ADDR,
8299 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8300 tw32(BUFMGR_MB_POOL_SIZE,
8301 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8304 if (tp->dev->mtu <= ETH_DATA_LEN) {
8305 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8306 tp->bufmgr_config.mbuf_read_dma_low_water);
8307 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8308 tp->bufmgr_config.mbuf_mac_rx_low_water);
8309 tw32(BUFMGR_MB_HIGH_WATER,
8310 tp->bufmgr_config.mbuf_high_water);
8311 } else {
8312 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8313 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8314 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8315 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8316 tw32(BUFMGR_MB_HIGH_WATER,
8317 tp->bufmgr_config.mbuf_high_water_jumbo);
8319 tw32(BUFMGR_DMA_LOW_WATER,
8320 tp->bufmgr_config.dma_low_water);
8321 tw32(BUFMGR_DMA_HIGH_WATER,
8322 tp->bufmgr_config.dma_high_water);
8324 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8326 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8328 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8329 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8330 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8331 tw32(BUFMGR_MODE, val);
8332 for (i = 0; i < 2000; i++) {
8333 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8334 break;
8335 udelay(10);
8337 if (i >= 2000) {
8338 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8339 return -ENODEV;
8342 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8343 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8345 tg3_setup_rxbd_thresholds(tp);
8347 /* Initialize TG3_BDINFO's at:
8348 * RCVDBDI_STD_BD: standard eth size rx ring
8349 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8350 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8352 * like so:
8353 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8354 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8355 * ring attribute flags
8356 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8358 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8359 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8361 * The size of each ring is fixed in the firmware, but the location is
8362 * configurable.
8364 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8365 ((u64) tpr->rx_std_mapping >> 32));
8366 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8367 ((u64) tpr->rx_std_mapping & 0xffffffff));
8368 if (!tg3_flag(tp, 5717_PLUS))
8369 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8370 NIC_SRAM_RX_BUFFER_DESC);
8372 /* Disable the mini ring */
8373 if (!tg3_flag(tp, 5705_PLUS))
8374 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8375 BDINFO_FLAGS_DISABLED);
8377 /* Program the jumbo buffer descriptor ring control
8378 * blocks on those devices that have them.
8380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8381 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8383 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8384 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8385 ((u64) tpr->rx_jmb_mapping >> 32));
8386 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8387 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8388 val = TG3_RX_JMB_RING_SIZE(tp) <<
8389 BDINFO_FLAGS_MAXLEN_SHIFT;
8390 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8391 val | BDINFO_FLAGS_USE_EXT_RECV);
8392 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8394 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8395 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8396 } else {
8397 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8398 BDINFO_FLAGS_DISABLED);
8401 if (tg3_flag(tp, 57765_PLUS)) {
8402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8403 val = TG3_RX_STD_MAX_SIZE_5700;
8404 else
8405 val = TG3_RX_STD_MAX_SIZE_5717;
8406 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8407 val |= (TG3_RX_STD_DMA_SZ << 2);
8408 } else
8409 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8410 } else
8411 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8413 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8415 tpr->rx_std_prod_idx = tp->rx_pending;
8416 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8418 tpr->rx_jmb_prod_idx =
8419 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8420 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8422 tg3_rings_reset(tp);
8424 /* Initialize MAC address and backoff seed. */
8425 __tg3_set_mac_addr(tp, 0);
8427 /* MTU + ethernet header + FCS + optional VLAN tag */
8428 tw32(MAC_RX_MTU_SIZE,
8429 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8431 /* The slot time is changed by tg3_setup_phy if we
8432 * run at gigabit with half duplex.
8434 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8435 (6 << TX_LENGTHS_IPG_SHIFT) |
8436 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8439 val |= tr32(MAC_TX_LENGTHS) &
8440 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8441 TX_LENGTHS_CNT_DWN_VAL_MSK);
8443 tw32(MAC_TX_LENGTHS, val);
8445 /* Receive rules. */
8446 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8447 tw32(RCVLPC_CONFIG, 0x0181);
8449 /* Calculate RDMAC_MODE setting early, we need it to determine
8450 * the RCVLPC_STATE_ENABLE mask.
8452 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8453 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8454 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8455 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8456 RDMAC_MODE_LNGREAD_ENAB);
8458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8459 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8464 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8465 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8466 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8469 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8470 if (tg3_flag(tp, TSO_CAPABLE) &&
8471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8472 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8473 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8474 !tg3_flag(tp, IS_5788)) {
8475 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8479 if (tg3_flag(tp, PCI_EXPRESS))
8480 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8482 if (tg3_flag(tp, HW_TSO_1) ||
8483 tg3_flag(tp, HW_TSO_2) ||
8484 tg3_flag(tp, HW_TSO_3))
8485 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8487 if (tg3_flag(tp, HW_TSO_3) ||
8488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8490 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8493 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8499 tg3_flag(tp, 57765_PLUS)) {
8500 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8503 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8504 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8505 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8506 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8507 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8508 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8510 tw32(TG3_RDMA_RSRVCTRL_REG,
8511 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8516 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8517 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8518 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8519 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8522 /* Receive/send statistics. */
8523 if (tg3_flag(tp, 5750_PLUS)) {
8524 val = tr32(RCVLPC_STATS_ENABLE);
8525 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8526 tw32(RCVLPC_STATS_ENABLE, val);
8527 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8528 tg3_flag(tp, TSO_CAPABLE)) {
8529 val = tr32(RCVLPC_STATS_ENABLE);
8530 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8531 tw32(RCVLPC_STATS_ENABLE, val);
8532 } else {
8533 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8535 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8536 tw32(SNDDATAI_STATSENAB, 0xffffff);
8537 tw32(SNDDATAI_STATSCTRL,
8538 (SNDDATAI_SCTRL_ENABLE |
8539 SNDDATAI_SCTRL_FASTUPD));
8541 /* Setup host coalescing engine. */
8542 tw32(HOSTCC_MODE, 0);
8543 for (i = 0; i < 2000; i++) {
8544 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8545 break;
8546 udelay(10);
8549 __tg3_set_coalesce(tp, &tp->coal);
8551 if (!tg3_flag(tp, 5705_PLUS)) {
8552 /* Status/statistics block address. See tg3_timer,
8553 * the tg3_periodic_fetch_stats call there, and
8554 * tg3_get_stats to see how this works for 5705/5750 chips.
8556 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8557 ((u64) tp->stats_mapping >> 32));
8558 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8559 ((u64) tp->stats_mapping & 0xffffffff));
8560 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8562 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8564 /* Clear statistics and status block memory areas */
8565 for (i = NIC_SRAM_STATS_BLK;
8566 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8567 i += sizeof(u32)) {
8568 tg3_write_mem(tp, i, 0);
8569 udelay(40);
8573 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8575 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8576 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8577 if (!tg3_flag(tp, 5705_PLUS))
8578 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8580 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8581 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8582 /* reset to prevent losing 1st rx packet intermittently */
8583 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8584 udelay(10);
8587 if (tg3_flag(tp, ENABLE_APE))
8588 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8589 else
8590 tp->mac_mode = 0;
8591 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8592 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8593 if (!tg3_flag(tp, 5705_PLUS) &&
8594 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8595 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8596 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8597 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8598 udelay(40);
8600 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8601 * If TG3_FLAG_IS_NIC is zero, we should read the
8602 * register to preserve the GPIO settings for LOMs. The GPIOs,
8603 * whether used as inputs or outputs, are set by boot code after
8604 * reset.
8606 if (!tg3_flag(tp, IS_NIC)) {
8607 u32 gpio_mask;
8609 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8610 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8611 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8614 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8615 GRC_LCLCTRL_GPIO_OUTPUT3;
8617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8618 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8620 tp->grc_local_ctrl &= ~gpio_mask;
8621 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8623 /* GPIO1 must be driven high for eeprom write protect */
8624 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8625 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8626 GRC_LCLCTRL_GPIO_OUTPUT1);
8628 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8629 udelay(100);
8631 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8632 val = tr32(MSGINT_MODE);
8633 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8634 tw32(MSGINT_MODE, val);
8637 if (!tg3_flag(tp, 5705_PLUS)) {
8638 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8639 udelay(40);
8642 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8643 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8644 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8645 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8646 WDMAC_MODE_LNGREAD_ENAB);
8648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8649 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8650 if (tg3_flag(tp, TSO_CAPABLE) &&
8651 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8652 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8653 /* nothing */
8654 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8655 !tg3_flag(tp, IS_5788)) {
8656 val |= WDMAC_MODE_RX_ACCEL;
8660 /* Enable host coalescing bug fix */
8661 if (tg3_flag(tp, 5755_PLUS))
8662 val |= WDMAC_MODE_STATUS_TAG_FIX;
8664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8665 val |= WDMAC_MODE_BURST_ALL_DATA;
8667 tw32_f(WDMAC_MODE, val);
8668 udelay(40);
8670 if (tg3_flag(tp, PCIX_MODE)) {
8671 u16 pcix_cmd;
8673 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8674 &pcix_cmd);
8675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8676 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8677 pcix_cmd |= PCI_X_CMD_READ_2K;
8678 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8679 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8680 pcix_cmd |= PCI_X_CMD_READ_2K;
8682 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8683 pcix_cmd);
8686 tw32_f(RDMAC_MODE, rdmac_mode);
8687 udelay(40);
8689 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8690 if (!tg3_flag(tp, 5705_PLUS))
8691 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8694 tw32(SNDDATAC_MODE,
8695 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8696 else
8697 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8699 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8700 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8701 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8702 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8703 val |= RCVDBDI_MODE_LRG_RING_SZ;
8704 tw32(RCVDBDI_MODE, val);
8705 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8706 if (tg3_flag(tp, HW_TSO_1) ||
8707 tg3_flag(tp, HW_TSO_2) ||
8708 tg3_flag(tp, HW_TSO_3))
8709 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8710 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8711 if (tg3_flag(tp, ENABLE_TSS))
8712 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8713 tw32(SNDBDI_MODE, val);
8714 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8716 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8717 err = tg3_load_5701_a0_firmware_fix(tp);
8718 if (err)
8719 return err;
8722 if (tg3_flag(tp, TSO_CAPABLE)) {
8723 err = tg3_load_tso_firmware(tp);
8724 if (err)
8725 return err;
8728 tp->tx_mode = TX_MODE_ENABLE;
8730 if (tg3_flag(tp, 5755_PLUS) ||
8731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8732 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8735 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8736 tp->tx_mode &= ~val;
8737 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8740 tw32_f(MAC_TX_MODE, tp->tx_mode);
8741 udelay(100);
8743 if (tg3_flag(tp, ENABLE_RSS)) {
8744 u32 reg = MAC_RSS_INDIR_TBL_0;
8745 u8 *ent = (u8 *)&val;
8747 /* Setup the indirection table */
8748 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8749 int idx = i % sizeof(val);
8751 ent[idx] = i % (tp->irq_cnt - 1);
8752 if (idx == sizeof(val) - 1) {
8753 tw32(reg, val);
8754 reg += 4;
8758 /* Setup the "secret" hash key. */
8759 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8760 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8761 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8762 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8763 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8764 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8765 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8766 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8767 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8768 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8771 tp->rx_mode = RX_MODE_ENABLE;
8772 if (tg3_flag(tp, 5755_PLUS))
8773 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8775 if (tg3_flag(tp, ENABLE_RSS))
8776 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8777 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8778 RX_MODE_RSS_IPV6_HASH_EN |
8779 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8780 RX_MODE_RSS_IPV4_HASH_EN |
8781 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8783 tw32_f(MAC_RX_MODE, tp->rx_mode);
8784 udelay(10);
8786 tw32(MAC_LED_CTRL, tp->led_ctrl);
8788 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8789 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8790 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8791 udelay(10);
8793 tw32_f(MAC_RX_MODE, tp->rx_mode);
8794 udelay(10);
8796 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8798 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8799 /* Set drive transmission level to 1.2V */
8800 /* only if the signal pre-emphasis bit is not set */
8801 val = tr32(MAC_SERDES_CFG);
8802 val &= 0xfffff000;
8803 val |= 0x880;
8804 tw32(MAC_SERDES_CFG, val);
8806 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8807 tw32(MAC_SERDES_CFG, 0x616000);
8810 /* Prevent chip from dropping frames when flow control
8811 * is enabled.
8813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8814 val = 1;
8815 else
8816 val = 2;
8817 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8820 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8821 /* Use hardware link auto-negotiation */
8822 tg3_flag_set(tp, HW_AUTONEG);
8825 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8826 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8827 u32 tmp;
8829 tmp = tr32(SERDES_RX_CTRL);
8830 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8831 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8832 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8833 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8836 if (!tg3_flag(tp, USE_PHYLIB)) {
8837 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8838 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8839 tp->link_config.speed = tp->link_config.orig_speed;
8840 tp->link_config.duplex = tp->link_config.orig_duplex;
8841 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8844 err = tg3_setup_phy(tp, 0);
8845 if (err)
8846 return err;
8848 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8849 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8850 u32 tmp;
8852 /* Clear CRC stats. */
8853 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8854 tg3_writephy(tp, MII_TG3_TEST1,
8855 tmp | MII_TG3_TEST1_CRC_EN);
8856 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8861 __tg3_set_rx_mode(tp->dev);
8863 /* Initialize receive rules. */
8864 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8865 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8866 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8867 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8869 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8870 limit = 8;
8871 else
8872 limit = 16;
8873 if (tg3_flag(tp, ENABLE_ASF))
8874 limit -= 4;
8875 switch (limit) {
8876 case 16:
8877 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8878 case 15:
8879 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8880 case 14:
8881 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8882 case 13:
8883 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8884 case 12:
8885 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8886 case 11:
8887 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8888 case 10:
8889 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8890 case 9:
8891 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8892 case 8:
8893 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8894 case 7:
8895 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8896 case 6:
8897 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8898 case 5:
8899 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8900 case 4:
8901 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8902 case 3:
8903 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8904 case 2:
8905 case 1:
8907 default:
8908 break;
8911 if (tg3_flag(tp, ENABLE_APE))
8912 /* Write our heartbeat update interval to APE. */
8913 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8914 APE_HOST_HEARTBEAT_INT_DISABLE);
8916 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8918 return 0;
8921 /* Called at device open time to get the chip ready for
8922 * packet processing. Invoked with tp->lock held.
8924 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8926 tg3_switch_clocks(tp);
8928 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8930 return tg3_reset_hw(tp, reset_phy);
8933 #define TG3_STAT_ADD32(PSTAT, REG) \
8934 do { u32 __val = tr32(REG); \
8935 (PSTAT)->low += __val; \
8936 if ((PSTAT)->low < __val) \
8937 (PSTAT)->high += 1; \
8938 } while (0)
8940 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8942 struct tg3_hw_stats *sp = tp->hw_stats;
8944 if (!netif_carrier_ok(tp->dev))
8945 return;
8947 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8948 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8949 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8950 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8951 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8952 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8953 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8954 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8955 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8956 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8957 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8958 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8959 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8961 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8962 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8963 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8964 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8965 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8966 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8967 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8968 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8969 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8970 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8971 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8972 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8973 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8974 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8976 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8977 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8978 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8979 } else {
8980 u32 val = tr32(HOSTCC_FLOW_ATTN);
8981 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8982 if (val) {
8983 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8984 sp->rx_discards.low += val;
8985 if (sp->rx_discards.low < val)
8986 sp->rx_discards.high += 1;
8988 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8990 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8993 static void tg3_timer(unsigned long __opaque)
8995 struct tg3 *tp = (struct tg3 *) __opaque;
8997 if (tp->irq_sync)
8998 goto restart_timer;
9000 spin_lock(&tp->lock);
9002 if (!tg3_flag(tp, TAGGED_STATUS)) {
9003 /* All of this garbage is because when using non-tagged
9004 * IRQ status the mailbox/status_block protocol the chip
9005 * uses with the cpu is race prone.
9007 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9008 tw32(GRC_LOCAL_CTRL,
9009 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9010 } else {
9011 tw32(HOSTCC_MODE, tp->coalesce_mode |
9012 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9015 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9016 tg3_flag_set(tp, RESTART_TIMER);
9017 spin_unlock(&tp->lock);
9018 schedule_work(&tp->reset_task);
9019 return;
9023 /* This part only runs once per second. */
9024 if (!--tp->timer_counter) {
9025 if (tg3_flag(tp, 5705_PLUS))
9026 tg3_periodic_fetch_stats(tp);
9028 if (tp->setlpicnt && !--tp->setlpicnt) {
9029 u32 val = tr32(TG3_CPMU_EEE_MODE);
9030 tw32(TG3_CPMU_EEE_MODE,
9031 val | TG3_CPMU_EEEMD_LPI_ENABLE);
9034 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9035 u32 mac_stat;
9036 int phy_event;
9038 mac_stat = tr32(MAC_STATUS);
9040 phy_event = 0;
9041 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9042 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9043 phy_event = 1;
9044 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9045 phy_event = 1;
9047 if (phy_event)
9048 tg3_setup_phy(tp, 0);
9049 } else if (tg3_flag(tp, POLL_SERDES)) {
9050 u32 mac_stat = tr32(MAC_STATUS);
9051 int need_setup = 0;
9053 if (netif_carrier_ok(tp->dev) &&
9054 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9055 need_setup = 1;
9057 if (!netif_carrier_ok(tp->dev) &&
9058 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9059 MAC_STATUS_SIGNAL_DET))) {
9060 need_setup = 1;
9062 if (need_setup) {
9063 if (!tp->serdes_counter) {
9064 tw32_f(MAC_MODE,
9065 (tp->mac_mode &
9066 ~MAC_MODE_PORT_MODE_MASK));
9067 udelay(40);
9068 tw32_f(MAC_MODE, tp->mac_mode);
9069 udelay(40);
9071 tg3_setup_phy(tp, 0);
9073 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9074 tg3_flag(tp, 5780_CLASS)) {
9075 tg3_serdes_parallel_detect(tp);
9078 tp->timer_counter = tp->timer_multiplier;
9081 /* Heartbeat is only sent once every 2 seconds.
9083 * The heartbeat is to tell the ASF firmware that the host
9084 * driver is still alive. In the event that the OS crashes,
9085 * ASF needs to reset the hardware to free up the FIFO space
9086 * that may be filled with rx packets destined for the host.
9087 * If the FIFO is full, ASF will no longer function properly.
9089 * Unintended resets have been reported on real time kernels
9090 * where the timer doesn't run on time. Netpoll will also have
9091 * same problem.
9093 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9094 * to check the ring condition when the heartbeat is expiring
9095 * before doing the reset. This will prevent most unintended
9096 * resets.
9098 if (!--tp->asf_counter) {
9099 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9100 tg3_wait_for_event_ack(tp);
9102 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9103 FWCMD_NICDRV_ALIVE3);
9104 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9106 TG3_FW_UPDATE_TIMEOUT_SEC);
9108 tg3_generate_fw_event(tp);
9110 tp->asf_counter = tp->asf_multiplier;
9113 spin_unlock(&tp->lock);
9115 restart_timer:
9116 tp->timer.expires = jiffies + tp->timer_offset;
9117 add_timer(&tp->timer);
9120 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9122 irq_handler_t fn;
9123 unsigned long flags;
9124 char *name;
9125 struct tg3_napi *tnapi = &tp->napi[irq_num];
9127 if (tp->irq_cnt == 1)
9128 name = tp->dev->name;
9129 else {
9130 name = &tnapi->irq_lbl[0];
9131 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9132 name[IFNAMSIZ-1] = 0;
9135 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9136 fn = tg3_msi;
9137 if (tg3_flag(tp, 1SHOT_MSI))
9138 fn = tg3_msi_1shot;
9139 flags = 0;
9140 } else {
9141 fn = tg3_interrupt;
9142 if (tg3_flag(tp, TAGGED_STATUS))
9143 fn = tg3_interrupt_tagged;
9144 flags = IRQF_SHARED;
9147 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9150 static int tg3_test_interrupt(struct tg3 *tp)
9152 struct tg3_napi *tnapi = &tp->napi[0];
9153 struct net_device *dev = tp->dev;
9154 int err, i, intr_ok = 0;
9155 u32 val;
9157 if (!netif_running(dev))
9158 return -ENODEV;
9160 tg3_disable_ints(tp);
9162 free_irq(tnapi->irq_vec, tnapi);
9165 * Turn off MSI one shot mode. Otherwise this test has no
9166 * observable way to know whether the interrupt was delivered.
9168 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9169 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9170 tw32(MSGINT_MODE, val);
9173 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9174 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9175 if (err)
9176 return err;
9178 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9179 tg3_enable_ints(tp);
9181 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9182 tnapi->coal_now);
9184 for (i = 0; i < 5; i++) {
9185 u32 int_mbox, misc_host_ctrl;
9187 int_mbox = tr32_mailbox(tnapi->int_mbox);
9188 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9190 if ((int_mbox != 0) ||
9191 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9192 intr_ok = 1;
9193 break;
9196 msleep(10);
9199 tg3_disable_ints(tp);
9201 free_irq(tnapi->irq_vec, tnapi);
9203 err = tg3_request_irq(tp, 0);
9205 if (err)
9206 return err;
9208 if (intr_ok) {
9209 /* Reenable MSI one shot mode. */
9210 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9211 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9212 tw32(MSGINT_MODE, val);
9214 return 0;
9217 return -EIO;
9220 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9221 * successfully restored
9223 static int tg3_test_msi(struct tg3 *tp)
9225 int err;
9226 u16 pci_cmd;
9228 if (!tg3_flag(tp, USING_MSI))
9229 return 0;
9231 /* Turn off SERR reporting in case MSI terminates with Master
9232 * Abort.
9234 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9235 pci_write_config_word(tp->pdev, PCI_COMMAND,
9236 pci_cmd & ~PCI_COMMAND_SERR);
9238 err = tg3_test_interrupt(tp);
9240 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9242 if (!err)
9243 return 0;
9245 /* other failures */
9246 if (err != -EIO)
9247 return err;
9249 /* MSI test failed, go back to INTx mode */
9250 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9251 "to INTx mode. Please report this failure to the PCI "
9252 "maintainer and include system chipset information\n");
9254 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9256 pci_disable_msi(tp->pdev);
9258 tg3_flag_clear(tp, USING_MSI);
9259 tp->napi[0].irq_vec = tp->pdev->irq;
9261 err = tg3_request_irq(tp, 0);
9262 if (err)
9263 return err;
9265 /* Need to reset the chip because the MSI cycle may have terminated
9266 * with Master Abort.
9268 tg3_full_lock(tp, 1);
9270 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9271 err = tg3_init_hw(tp, 1);
9273 tg3_full_unlock(tp);
9275 if (err)
9276 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9278 return err;
9281 static int tg3_request_firmware(struct tg3 *tp)
9283 const __be32 *fw_data;
9285 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9286 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9287 tp->fw_needed);
9288 return -ENOENT;
9291 fw_data = (void *)tp->fw->data;
9293 /* Firmware blob starts with version numbers, followed by
9294 * start address and _full_ length including BSS sections
9295 * (which must be longer than the actual data, of course
9298 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9299 if (tp->fw_len < (tp->fw->size - 12)) {
9300 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9301 tp->fw_len, tp->fw_needed);
9302 release_firmware(tp->fw);
9303 tp->fw = NULL;
9304 return -EINVAL;
9307 /* We no longer need firmware; we have it. */
9308 tp->fw_needed = NULL;
9309 return 0;
9312 static bool tg3_enable_msix(struct tg3 *tp)
9314 int i, rc, cpus = num_online_cpus();
9315 struct msix_entry msix_ent[tp->irq_max];
9317 if (cpus == 1)
9318 /* Just fallback to the simpler MSI mode. */
9319 return false;
9322 * We want as many rx rings enabled as there are cpus.
9323 * The first MSIX vector only deals with link interrupts, etc,
9324 * so we add one to the number of vectors we are requesting.
9326 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9328 for (i = 0; i < tp->irq_max; i++) {
9329 msix_ent[i].entry = i;
9330 msix_ent[i].vector = 0;
9333 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9334 if (rc < 0) {
9335 return false;
9336 } else if (rc != 0) {
9337 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9338 return false;
9339 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9340 tp->irq_cnt, rc);
9341 tp->irq_cnt = rc;
9344 for (i = 0; i < tp->irq_max; i++)
9345 tp->napi[i].irq_vec = msix_ent[i].vector;
9347 netif_set_real_num_tx_queues(tp->dev, 1);
9348 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9349 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9350 pci_disable_msix(tp->pdev);
9351 return false;
9354 if (tp->irq_cnt > 1) {
9355 tg3_flag_set(tp, ENABLE_RSS);
9357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9359 tg3_flag_set(tp, ENABLE_TSS);
9360 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9364 return true;
9367 static void tg3_ints_init(struct tg3 *tp)
9369 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9370 !tg3_flag(tp, TAGGED_STATUS)) {
9371 /* All MSI supporting chips should support tagged
9372 * status. Assert that this is the case.
9374 netdev_warn(tp->dev,
9375 "MSI without TAGGED_STATUS? Not using MSI\n");
9376 goto defcfg;
9379 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9380 tg3_flag_set(tp, USING_MSIX);
9381 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9382 tg3_flag_set(tp, USING_MSI);
9384 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9385 u32 msi_mode = tr32(MSGINT_MODE);
9386 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9387 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9388 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9390 defcfg:
9391 if (!tg3_flag(tp, USING_MSIX)) {
9392 tp->irq_cnt = 1;
9393 tp->napi[0].irq_vec = tp->pdev->irq;
9394 netif_set_real_num_tx_queues(tp->dev, 1);
9395 netif_set_real_num_rx_queues(tp->dev, 1);
9399 static void tg3_ints_fini(struct tg3 *tp)
9401 if (tg3_flag(tp, USING_MSIX))
9402 pci_disable_msix(tp->pdev);
9403 else if (tg3_flag(tp, USING_MSI))
9404 pci_disable_msi(tp->pdev);
9405 tg3_flag_clear(tp, USING_MSI);
9406 tg3_flag_clear(tp, USING_MSIX);
9407 tg3_flag_clear(tp, ENABLE_RSS);
9408 tg3_flag_clear(tp, ENABLE_TSS);
9411 static int tg3_open(struct net_device *dev)
9413 struct tg3 *tp = netdev_priv(dev);
9414 int i, err;
9416 if (tp->fw_needed) {
9417 err = tg3_request_firmware(tp);
9418 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9419 if (err)
9420 return err;
9421 } else if (err) {
9422 netdev_warn(tp->dev, "TSO capability disabled\n");
9423 tg3_flag_clear(tp, TSO_CAPABLE);
9424 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9425 netdev_notice(tp->dev, "TSO capability restored\n");
9426 tg3_flag_set(tp, TSO_CAPABLE);
9430 netif_carrier_off(tp->dev);
9432 err = tg3_power_up(tp);
9433 if (err)
9434 return err;
9436 tg3_full_lock(tp, 0);
9438 tg3_disable_ints(tp);
9439 tg3_flag_clear(tp, INIT_COMPLETE);
9441 tg3_full_unlock(tp);
9444 * Setup interrupts first so we know how
9445 * many NAPI resources to allocate
9447 tg3_ints_init(tp);
9449 /* The placement of this call is tied
9450 * to the setup and use of Host TX descriptors.
9452 err = tg3_alloc_consistent(tp);
9453 if (err)
9454 goto err_out1;
9456 tg3_napi_init(tp);
9458 tg3_napi_enable(tp);
9460 for (i = 0; i < tp->irq_cnt; i++) {
9461 struct tg3_napi *tnapi = &tp->napi[i];
9462 err = tg3_request_irq(tp, i);
9463 if (err) {
9464 for (i--; i >= 0; i--)
9465 free_irq(tnapi->irq_vec, tnapi);
9466 break;
9470 if (err)
9471 goto err_out2;
9473 tg3_full_lock(tp, 0);
9475 err = tg3_init_hw(tp, 1);
9476 if (err) {
9477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9478 tg3_free_rings(tp);
9479 } else {
9480 if (tg3_flag(tp, TAGGED_STATUS))
9481 tp->timer_offset = HZ;
9482 else
9483 tp->timer_offset = HZ / 10;
9485 BUG_ON(tp->timer_offset > HZ);
9486 tp->timer_counter = tp->timer_multiplier =
9487 (HZ / tp->timer_offset);
9488 tp->asf_counter = tp->asf_multiplier =
9489 ((HZ / tp->timer_offset) * 2);
9491 init_timer(&tp->timer);
9492 tp->timer.expires = jiffies + tp->timer_offset;
9493 tp->timer.data = (unsigned long) tp;
9494 tp->timer.function = tg3_timer;
9497 tg3_full_unlock(tp);
9499 if (err)
9500 goto err_out3;
9502 if (tg3_flag(tp, USING_MSI)) {
9503 err = tg3_test_msi(tp);
9505 if (err) {
9506 tg3_full_lock(tp, 0);
9507 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9508 tg3_free_rings(tp);
9509 tg3_full_unlock(tp);
9511 goto err_out2;
9514 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9515 u32 val = tr32(PCIE_TRANSACTION_CFG);
9517 tw32(PCIE_TRANSACTION_CFG,
9518 val | PCIE_TRANS_CFG_1SHOT_MSI);
9522 tg3_phy_start(tp);
9524 tg3_full_lock(tp, 0);
9526 add_timer(&tp->timer);
9527 tg3_flag_set(tp, INIT_COMPLETE);
9528 tg3_enable_ints(tp);
9530 tg3_full_unlock(tp);
9532 netif_tx_start_all_queues(dev);
9535 * Reset loopback feature if it was turned on while the device was down
9536 * make sure that it's installed properly now.
9538 if (dev->features & NETIF_F_LOOPBACK)
9539 tg3_set_loopback(dev, dev->features);
9541 return 0;
9543 err_out3:
9544 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9545 struct tg3_napi *tnapi = &tp->napi[i];
9546 free_irq(tnapi->irq_vec, tnapi);
9549 err_out2:
9550 tg3_napi_disable(tp);
9551 tg3_napi_fini(tp);
9552 tg3_free_consistent(tp);
9554 err_out1:
9555 tg3_ints_fini(tp);
9556 return err;
9559 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9560 struct rtnl_link_stats64 *);
9561 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9563 static int tg3_close(struct net_device *dev)
9565 int i;
9566 struct tg3 *tp = netdev_priv(dev);
9568 tg3_napi_disable(tp);
9569 cancel_work_sync(&tp->reset_task);
9571 netif_tx_stop_all_queues(dev);
9573 del_timer_sync(&tp->timer);
9575 tg3_phy_stop(tp);
9577 tg3_full_lock(tp, 1);
9579 tg3_disable_ints(tp);
9581 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9582 tg3_free_rings(tp);
9583 tg3_flag_clear(tp, INIT_COMPLETE);
9585 tg3_full_unlock(tp);
9587 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9588 struct tg3_napi *tnapi = &tp->napi[i];
9589 free_irq(tnapi->irq_vec, tnapi);
9592 tg3_ints_fini(tp);
9594 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9596 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9597 sizeof(tp->estats_prev));
9599 tg3_napi_fini(tp);
9601 tg3_free_consistent(tp);
9603 tg3_power_down(tp);
9605 netif_carrier_off(tp->dev);
9607 return 0;
9610 static inline u64 get_stat64(tg3_stat64_t *val)
9612 return ((u64)val->high << 32) | ((u64)val->low);
9615 static u64 calc_crc_errors(struct tg3 *tp)
9617 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9619 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9620 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9622 u32 val;
9624 spin_lock_bh(&tp->lock);
9625 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9626 tg3_writephy(tp, MII_TG3_TEST1,
9627 val | MII_TG3_TEST1_CRC_EN);
9628 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9629 } else
9630 val = 0;
9631 spin_unlock_bh(&tp->lock);
9633 tp->phy_crc_errors += val;
9635 return tp->phy_crc_errors;
9638 return get_stat64(&hw_stats->rx_fcs_errors);
9641 #define ESTAT_ADD(member) \
9642 estats->member = old_estats->member + \
9643 get_stat64(&hw_stats->member)
9645 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9647 struct tg3_ethtool_stats *estats = &tp->estats;
9648 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9649 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9651 if (!hw_stats)
9652 return old_estats;
9654 ESTAT_ADD(rx_octets);
9655 ESTAT_ADD(rx_fragments);
9656 ESTAT_ADD(rx_ucast_packets);
9657 ESTAT_ADD(rx_mcast_packets);
9658 ESTAT_ADD(rx_bcast_packets);
9659 ESTAT_ADD(rx_fcs_errors);
9660 ESTAT_ADD(rx_align_errors);
9661 ESTAT_ADD(rx_xon_pause_rcvd);
9662 ESTAT_ADD(rx_xoff_pause_rcvd);
9663 ESTAT_ADD(rx_mac_ctrl_rcvd);
9664 ESTAT_ADD(rx_xoff_entered);
9665 ESTAT_ADD(rx_frame_too_long_errors);
9666 ESTAT_ADD(rx_jabbers);
9667 ESTAT_ADD(rx_undersize_packets);
9668 ESTAT_ADD(rx_in_length_errors);
9669 ESTAT_ADD(rx_out_length_errors);
9670 ESTAT_ADD(rx_64_or_less_octet_packets);
9671 ESTAT_ADD(rx_65_to_127_octet_packets);
9672 ESTAT_ADD(rx_128_to_255_octet_packets);
9673 ESTAT_ADD(rx_256_to_511_octet_packets);
9674 ESTAT_ADD(rx_512_to_1023_octet_packets);
9675 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9676 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9677 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9678 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9679 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9681 ESTAT_ADD(tx_octets);
9682 ESTAT_ADD(tx_collisions);
9683 ESTAT_ADD(tx_xon_sent);
9684 ESTAT_ADD(tx_xoff_sent);
9685 ESTAT_ADD(tx_flow_control);
9686 ESTAT_ADD(tx_mac_errors);
9687 ESTAT_ADD(tx_single_collisions);
9688 ESTAT_ADD(tx_mult_collisions);
9689 ESTAT_ADD(tx_deferred);
9690 ESTAT_ADD(tx_excessive_collisions);
9691 ESTAT_ADD(tx_late_collisions);
9692 ESTAT_ADD(tx_collide_2times);
9693 ESTAT_ADD(tx_collide_3times);
9694 ESTAT_ADD(tx_collide_4times);
9695 ESTAT_ADD(tx_collide_5times);
9696 ESTAT_ADD(tx_collide_6times);
9697 ESTAT_ADD(tx_collide_7times);
9698 ESTAT_ADD(tx_collide_8times);
9699 ESTAT_ADD(tx_collide_9times);
9700 ESTAT_ADD(tx_collide_10times);
9701 ESTAT_ADD(tx_collide_11times);
9702 ESTAT_ADD(tx_collide_12times);
9703 ESTAT_ADD(tx_collide_13times);
9704 ESTAT_ADD(tx_collide_14times);
9705 ESTAT_ADD(tx_collide_15times);
9706 ESTAT_ADD(tx_ucast_packets);
9707 ESTAT_ADD(tx_mcast_packets);
9708 ESTAT_ADD(tx_bcast_packets);
9709 ESTAT_ADD(tx_carrier_sense_errors);
9710 ESTAT_ADD(tx_discards);
9711 ESTAT_ADD(tx_errors);
9713 ESTAT_ADD(dma_writeq_full);
9714 ESTAT_ADD(dma_write_prioq_full);
9715 ESTAT_ADD(rxbds_empty);
9716 ESTAT_ADD(rx_discards);
9717 ESTAT_ADD(rx_errors);
9718 ESTAT_ADD(rx_threshold_hit);
9720 ESTAT_ADD(dma_readq_full);
9721 ESTAT_ADD(dma_read_prioq_full);
9722 ESTAT_ADD(tx_comp_queue_full);
9724 ESTAT_ADD(ring_set_send_prod_index);
9725 ESTAT_ADD(ring_status_update);
9726 ESTAT_ADD(nic_irqs);
9727 ESTAT_ADD(nic_avoided_irqs);
9728 ESTAT_ADD(nic_tx_threshold_hit);
9730 return estats;
9733 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9734 struct rtnl_link_stats64 *stats)
9736 struct tg3 *tp = netdev_priv(dev);
9737 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9738 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9740 if (!hw_stats)
9741 return old_stats;
9743 stats->rx_packets = old_stats->rx_packets +
9744 get_stat64(&hw_stats->rx_ucast_packets) +
9745 get_stat64(&hw_stats->rx_mcast_packets) +
9746 get_stat64(&hw_stats->rx_bcast_packets);
9748 stats->tx_packets = old_stats->tx_packets +
9749 get_stat64(&hw_stats->tx_ucast_packets) +
9750 get_stat64(&hw_stats->tx_mcast_packets) +
9751 get_stat64(&hw_stats->tx_bcast_packets);
9753 stats->rx_bytes = old_stats->rx_bytes +
9754 get_stat64(&hw_stats->rx_octets);
9755 stats->tx_bytes = old_stats->tx_bytes +
9756 get_stat64(&hw_stats->tx_octets);
9758 stats->rx_errors = old_stats->rx_errors +
9759 get_stat64(&hw_stats->rx_errors);
9760 stats->tx_errors = old_stats->tx_errors +
9761 get_stat64(&hw_stats->tx_errors) +
9762 get_stat64(&hw_stats->tx_mac_errors) +
9763 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9764 get_stat64(&hw_stats->tx_discards);
9766 stats->multicast = old_stats->multicast +
9767 get_stat64(&hw_stats->rx_mcast_packets);
9768 stats->collisions = old_stats->collisions +
9769 get_stat64(&hw_stats->tx_collisions);
9771 stats->rx_length_errors = old_stats->rx_length_errors +
9772 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9773 get_stat64(&hw_stats->rx_undersize_packets);
9775 stats->rx_over_errors = old_stats->rx_over_errors +
9776 get_stat64(&hw_stats->rxbds_empty);
9777 stats->rx_frame_errors = old_stats->rx_frame_errors +
9778 get_stat64(&hw_stats->rx_align_errors);
9779 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9780 get_stat64(&hw_stats->tx_discards);
9781 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9782 get_stat64(&hw_stats->tx_carrier_sense_errors);
9784 stats->rx_crc_errors = old_stats->rx_crc_errors +
9785 calc_crc_errors(tp);
9787 stats->rx_missed_errors = old_stats->rx_missed_errors +
9788 get_stat64(&hw_stats->rx_discards);
9790 stats->rx_dropped = tp->rx_dropped;
9792 return stats;
9795 static inline u32 calc_crc(unsigned char *buf, int len)
9797 u32 reg;
9798 u32 tmp;
9799 int j, k;
9801 reg = 0xffffffff;
9803 for (j = 0; j < len; j++) {
9804 reg ^= buf[j];
9806 for (k = 0; k < 8; k++) {
9807 tmp = reg & 0x01;
9809 reg >>= 1;
9811 if (tmp)
9812 reg ^= 0xedb88320;
9816 return ~reg;
9819 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9821 /* accept or reject all multicast frames */
9822 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9823 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9824 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9825 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9828 static void __tg3_set_rx_mode(struct net_device *dev)
9830 struct tg3 *tp = netdev_priv(dev);
9831 u32 rx_mode;
9833 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9834 RX_MODE_KEEP_VLAN_TAG);
9836 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9837 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9838 * flag clear.
9840 if (!tg3_flag(tp, ENABLE_ASF))
9841 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9842 #endif
9844 if (dev->flags & IFF_PROMISC) {
9845 /* Promiscuous mode. */
9846 rx_mode |= RX_MODE_PROMISC;
9847 } else if (dev->flags & IFF_ALLMULTI) {
9848 /* Accept all multicast. */
9849 tg3_set_multi(tp, 1);
9850 } else if (netdev_mc_empty(dev)) {
9851 /* Reject all multicast. */
9852 tg3_set_multi(tp, 0);
9853 } else {
9854 /* Accept one or more multicast(s). */
9855 struct netdev_hw_addr *ha;
9856 u32 mc_filter[4] = { 0, };
9857 u32 regidx;
9858 u32 bit;
9859 u32 crc;
9861 netdev_for_each_mc_addr(ha, dev) {
9862 crc = calc_crc(ha->addr, ETH_ALEN);
9863 bit = ~crc & 0x7f;
9864 regidx = (bit & 0x60) >> 5;
9865 bit &= 0x1f;
9866 mc_filter[regidx] |= (1 << bit);
9869 tw32(MAC_HASH_REG_0, mc_filter[0]);
9870 tw32(MAC_HASH_REG_1, mc_filter[1]);
9871 tw32(MAC_HASH_REG_2, mc_filter[2]);
9872 tw32(MAC_HASH_REG_3, mc_filter[3]);
9875 if (rx_mode != tp->rx_mode) {
9876 tp->rx_mode = rx_mode;
9877 tw32_f(MAC_RX_MODE, rx_mode);
9878 udelay(10);
9882 static void tg3_set_rx_mode(struct net_device *dev)
9884 struct tg3 *tp = netdev_priv(dev);
9886 if (!netif_running(dev))
9887 return;
9889 tg3_full_lock(tp, 0);
9890 __tg3_set_rx_mode(dev);
9891 tg3_full_unlock(tp);
9894 static int tg3_get_regs_len(struct net_device *dev)
9896 return TG3_REG_BLK_SIZE;
9899 static void tg3_get_regs(struct net_device *dev,
9900 struct ethtool_regs *regs, void *_p)
9902 struct tg3 *tp = netdev_priv(dev);
9904 regs->version = 0;
9906 memset(_p, 0, TG3_REG_BLK_SIZE);
9908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9909 return;
9911 tg3_full_lock(tp, 0);
9913 tg3_dump_legacy_regs(tp, (u32 *)_p);
9915 tg3_full_unlock(tp);
9918 static int tg3_get_eeprom_len(struct net_device *dev)
9920 struct tg3 *tp = netdev_priv(dev);
9922 return tp->nvram_size;
9925 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9927 struct tg3 *tp = netdev_priv(dev);
9928 int ret;
9929 u8 *pd;
9930 u32 i, offset, len, b_offset, b_count;
9931 __be32 val;
9933 if (tg3_flag(tp, NO_NVRAM))
9934 return -EINVAL;
9936 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9937 return -EAGAIN;
9939 offset = eeprom->offset;
9940 len = eeprom->len;
9941 eeprom->len = 0;
9943 eeprom->magic = TG3_EEPROM_MAGIC;
9945 if (offset & 3) {
9946 /* adjustments to start on required 4 byte boundary */
9947 b_offset = offset & 3;
9948 b_count = 4 - b_offset;
9949 if (b_count > len) {
9950 /* i.e. offset=1 len=2 */
9951 b_count = len;
9953 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9954 if (ret)
9955 return ret;
9956 memcpy(data, ((char *)&val) + b_offset, b_count);
9957 len -= b_count;
9958 offset += b_count;
9959 eeprom->len += b_count;
9962 /* read bytes up to the last 4 byte boundary */
9963 pd = &data[eeprom->len];
9964 for (i = 0; i < (len - (len & 3)); i += 4) {
9965 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9966 if (ret) {
9967 eeprom->len += i;
9968 return ret;
9970 memcpy(pd + i, &val, 4);
9972 eeprom->len += i;
9974 if (len & 3) {
9975 /* read last bytes not ending on 4 byte boundary */
9976 pd = &data[eeprom->len];
9977 b_count = len & 3;
9978 b_offset = offset + len - b_count;
9979 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9980 if (ret)
9981 return ret;
9982 memcpy(pd, &val, b_count);
9983 eeprom->len += b_count;
9985 return 0;
9988 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9990 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9992 struct tg3 *tp = netdev_priv(dev);
9993 int ret;
9994 u32 offset, len, b_offset, odd_len;
9995 u8 *buf;
9996 __be32 start, end;
9998 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9999 return -EAGAIN;
10001 if (tg3_flag(tp, NO_NVRAM) ||
10002 eeprom->magic != TG3_EEPROM_MAGIC)
10003 return -EINVAL;
10005 offset = eeprom->offset;
10006 len = eeprom->len;
10008 if ((b_offset = (offset & 3))) {
10009 /* adjustments to start on required 4 byte boundary */
10010 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10011 if (ret)
10012 return ret;
10013 len += b_offset;
10014 offset &= ~3;
10015 if (len < 4)
10016 len = 4;
10019 odd_len = 0;
10020 if (len & 3) {
10021 /* adjustments to end on required 4 byte boundary */
10022 odd_len = 1;
10023 len = (len + 3) & ~3;
10024 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10025 if (ret)
10026 return ret;
10029 buf = data;
10030 if (b_offset || odd_len) {
10031 buf = kmalloc(len, GFP_KERNEL);
10032 if (!buf)
10033 return -ENOMEM;
10034 if (b_offset)
10035 memcpy(buf, &start, 4);
10036 if (odd_len)
10037 memcpy(buf+len-4, &end, 4);
10038 memcpy(buf + b_offset, data, eeprom->len);
10041 ret = tg3_nvram_write_block(tp, offset, len, buf);
10043 if (buf != data)
10044 kfree(buf);
10046 return ret;
10049 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10051 struct tg3 *tp = netdev_priv(dev);
10053 if (tg3_flag(tp, USE_PHYLIB)) {
10054 struct phy_device *phydev;
10055 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10056 return -EAGAIN;
10057 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10058 return phy_ethtool_gset(phydev, cmd);
10061 cmd->supported = (SUPPORTED_Autoneg);
10063 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10064 cmd->supported |= (SUPPORTED_1000baseT_Half |
10065 SUPPORTED_1000baseT_Full);
10067 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10068 cmd->supported |= (SUPPORTED_100baseT_Half |
10069 SUPPORTED_100baseT_Full |
10070 SUPPORTED_10baseT_Half |
10071 SUPPORTED_10baseT_Full |
10072 SUPPORTED_TP);
10073 cmd->port = PORT_TP;
10074 } else {
10075 cmd->supported |= SUPPORTED_FIBRE;
10076 cmd->port = PORT_FIBRE;
10079 cmd->advertising = tp->link_config.advertising;
10080 if (netif_running(dev)) {
10081 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10082 cmd->duplex = tp->link_config.active_duplex;
10083 } else {
10084 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10085 cmd->duplex = DUPLEX_INVALID;
10087 cmd->phy_address = tp->phy_addr;
10088 cmd->transceiver = XCVR_INTERNAL;
10089 cmd->autoneg = tp->link_config.autoneg;
10090 cmd->maxtxpkt = 0;
10091 cmd->maxrxpkt = 0;
10092 return 0;
10095 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10097 struct tg3 *tp = netdev_priv(dev);
10098 u32 speed = ethtool_cmd_speed(cmd);
10100 if (tg3_flag(tp, USE_PHYLIB)) {
10101 struct phy_device *phydev;
10102 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10103 return -EAGAIN;
10104 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10105 return phy_ethtool_sset(phydev, cmd);
10108 if (cmd->autoneg != AUTONEG_ENABLE &&
10109 cmd->autoneg != AUTONEG_DISABLE)
10110 return -EINVAL;
10112 if (cmd->autoneg == AUTONEG_DISABLE &&
10113 cmd->duplex != DUPLEX_FULL &&
10114 cmd->duplex != DUPLEX_HALF)
10115 return -EINVAL;
10117 if (cmd->autoneg == AUTONEG_ENABLE) {
10118 u32 mask = ADVERTISED_Autoneg |
10119 ADVERTISED_Pause |
10120 ADVERTISED_Asym_Pause;
10122 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10123 mask |= ADVERTISED_1000baseT_Half |
10124 ADVERTISED_1000baseT_Full;
10126 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10127 mask |= ADVERTISED_100baseT_Half |
10128 ADVERTISED_100baseT_Full |
10129 ADVERTISED_10baseT_Half |
10130 ADVERTISED_10baseT_Full |
10131 ADVERTISED_TP;
10132 else
10133 mask |= ADVERTISED_FIBRE;
10135 if (cmd->advertising & ~mask)
10136 return -EINVAL;
10138 mask &= (ADVERTISED_1000baseT_Half |
10139 ADVERTISED_1000baseT_Full |
10140 ADVERTISED_100baseT_Half |
10141 ADVERTISED_100baseT_Full |
10142 ADVERTISED_10baseT_Half |
10143 ADVERTISED_10baseT_Full);
10145 cmd->advertising &= mask;
10146 } else {
10147 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10148 if (speed != SPEED_1000)
10149 return -EINVAL;
10151 if (cmd->duplex != DUPLEX_FULL)
10152 return -EINVAL;
10153 } else {
10154 if (speed != SPEED_100 &&
10155 speed != SPEED_10)
10156 return -EINVAL;
10160 tg3_full_lock(tp, 0);
10162 tp->link_config.autoneg = cmd->autoneg;
10163 if (cmd->autoneg == AUTONEG_ENABLE) {
10164 tp->link_config.advertising = (cmd->advertising |
10165 ADVERTISED_Autoneg);
10166 tp->link_config.speed = SPEED_INVALID;
10167 tp->link_config.duplex = DUPLEX_INVALID;
10168 } else {
10169 tp->link_config.advertising = 0;
10170 tp->link_config.speed = speed;
10171 tp->link_config.duplex = cmd->duplex;
10174 tp->link_config.orig_speed = tp->link_config.speed;
10175 tp->link_config.orig_duplex = tp->link_config.duplex;
10176 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10178 if (netif_running(dev))
10179 tg3_setup_phy(tp, 1);
10181 tg3_full_unlock(tp);
10183 return 0;
10186 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10188 struct tg3 *tp = netdev_priv(dev);
10190 strcpy(info->driver, DRV_MODULE_NAME);
10191 strcpy(info->version, DRV_MODULE_VERSION);
10192 strcpy(info->fw_version, tp->fw_ver);
10193 strcpy(info->bus_info, pci_name(tp->pdev));
10196 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10198 struct tg3 *tp = netdev_priv(dev);
10200 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10201 wol->supported = WAKE_MAGIC;
10202 else
10203 wol->supported = 0;
10204 wol->wolopts = 0;
10205 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10206 wol->wolopts = WAKE_MAGIC;
10207 memset(&wol->sopass, 0, sizeof(wol->sopass));
10210 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10212 struct tg3 *tp = netdev_priv(dev);
10213 struct device *dp = &tp->pdev->dev;
10215 if (wol->wolopts & ~WAKE_MAGIC)
10216 return -EINVAL;
10217 if ((wol->wolopts & WAKE_MAGIC) &&
10218 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10219 return -EINVAL;
10221 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10223 spin_lock_bh(&tp->lock);
10224 if (device_may_wakeup(dp))
10225 tg3_flag_set(tp, WOL_ENABLE);
10226 else
10227 tg3_flag_clear(tp, WOL_ENABLE);
10228 spin_unlock_bh(&tp->lock);
10230 return 0;
10233 static u32 tg3_get_msglevel(struct net_device *dev)
10235 struct tg3 *tp = netdev_priv(dev);
10236 return tp->msg_enable;
10239 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10241 struct tg3 *tp = netdev_priv(dev);
10242 tp->msg_enable = value;
10245 static int tg3_nway_reset(struct net_device *dev)
10247 struct tg3 *tp = netdev_priv(dev);
10248 int r;
10250 if (!netif_running(dev))
10251 return -EAGAIN;
10253 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10254 return -EINVAL;
10256 if (tg3_flag(tp, USE_PHYLIB)) {
10257 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10258 return -EAGAIN;
10259 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10260 } else {
10261 u32 bmcr;
10263 spin_lock_bh(&tp->lock);
10264 r = -EINVAL;
10265 tg3_readphy(tp, MII_BMCR, &bmcr);
10266 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10267 ((bmcr & BMCR_ANENABLE) ||
10268 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10269 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10270 BMCR_ANENABLE);
10271 r = 0;
10273 spin_unlock_bh(&tp->lock);
10276 return r;
10279 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10281 struct tg3 *tp = netdev_priv(dev);
10283 ering->rx_max_pending = tp->rx_std_ring_mask;
10284 ering->rx_mini_max_pending = 0;
10285 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10286 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10287 else
10288 ering->rx_jumbo_max_pending = 0;
10290 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10292 ering->rx_pending = tp->rx_pending;
10293 ering->rx_mini_pending = 0;
10294 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10295 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10296 else
10297 ering->rx_jumbo_pending = 0;
10299 ering->tx_pending = tp->napi[0].tx_pending;
10302 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10304 struct tg3 *tp = netdev_priv(dev);
10305 int i, irq_sync = 0, err = 0;
10307 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10308 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10309 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10310 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10311 (tg3_flag(tp, TSO_BUG) &&
10312 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10313 return -EINVAL;
10315 if (netif_running(dev)) {
10316 tg3_phy_stop(tp);
10317 tg3_netif_stop(tp);
10318 irq_sync = 1;
10321 tg3_full_lock(tp, irq_sync);
10323 tp->rx_pending = ering->rx_pending;
10325 if (tg3_flag(tp, MAX_RXPEND_64) &&
10326 tp->rx_pending > 63)
10327 tp->rx_pending = 63;
10328 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10330 for (i = 0; i < tp->irq_max; i++)
10331 tp->napi[i].tx_pending = ering->tx_pending;
10333 if (netif_running(dev)) {
10334 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10335 err = tg3_restart_hw(tp, 1);
10336 if (!err)
10337 tg3_netif_start(tp);
10340 tg3_full_unlock(tp);
10342 if (irq_sync && !err)
10343 tg3_phy_start(tp);
10345 return err;
10348 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10350 struct tg3 *tp = netdev_priv(dev);
10352 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10354 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10355 epause->rx_pause = 1;
10356 else
10357 epause->rx_pause = 0;
10359 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10360 epause->tx_pause = 1;
10361 else
10362 epause->tx_pause = 0;
10365 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10367 struct tg3 *tp = netdev_priv(dev);
10368 int err = 0;
10370 if (tg3_flag(tp, USE_PHYLIB)) {
10371 u32 newadv;
10372 struct phy_device *phydev;
10374 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10376 if (!(phydev->supported & SUPPORTED_Pause) ||
10377 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10378 (epause->rx_pause != epause->tx_pause)))
10379 return -EINVAL;
10381 tp->link_config.flowctrl = 0;
10382 if (epause->rx_pause) {
10383 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10385 if (epause->tx_pause) {
10386 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10387 newadv = ADVERTISED_Pause;
10388 } else
10389 newadv = ADVERTISED_Pause |
10390 ADVERTISED_Asym_Pause;
10391 } else if (epause->tx_pause) {
10392 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10393 newadv = ADVERTISED_Asym_Pause;
10394 } else
10395 newadv = 0;
10397 if (epause->autoneg)
10398 tg3_flag_set(tp, PAUSE_AUTONEG);
10399 else
10400 tg3_flag_clear(tp, PAUSE_AUTONEG);
10402 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10403 u32 oldadv = phydev->advertising &
10404 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10405 if (oldadv != newadv) {
10406 phydev->advertising &=
10407 ~(ADVERTISED_Pause |
10408 ADVERTISED_Asym_Pause);
10409 phydev->advertising |= newadv;
10410 if (phydev->autoneg) {
10412 * Always renegotiate the link to
10413 * inform our link partner of our
10414 * flow control settings, even if the
10415 * flow control is forced. Let
10416 * tg3_adjust_link() do the final
10417 * flow control setup.
10419 return phy_start_aneg(phydev);
10423 if (!epause->autoneg)
10424 tg3_setup_flow_control(tp, 0, 0);
10425 } else {
10426 tp->link_config.orig_advertising &=
10427 ~(ADVERTISED_Pause |
10428 ADVERTISED_Asym_Pause);
10429 tp->link_config.orig_advertising |= newadv;
10431 } else {
10432 int irq_sync = 0;
10434 if (netif_running(dev)) {
10435 tg3_netif_stop(tp);
10436 irq_sync = 1;
10439 tg3_full_lock(tp, irq_sync);
10441 if (epause->autoneg)
10442 tg3_flag_set(tp, PAUSE_AUTONEG);
10443 else
10444 tg3_flag_clear(tp, PAUSE_AUTONEG);
10445 if (epause->rx_pause)
10446 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10447 else
10448 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10449 if (epause->tx_pause)
10450 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10451 else
10452 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10454 if (netif_running(dev)) {
10455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10456 err = tg3_restart_hw(tp, 1);
10457 if (!err)
10458 tg3_netif_start(tp);
10461 tg3_full_unlock(tp);
10464 return err;
10467 static int tg3_get_sset_count(struct net_device *dev, int sset)
10469 switch (sset) {
10470 case ETH_SS_TEST:
10471 return TG3_NUM_TEST;
10472 case ETH_SS_STATS:
10473 return TG3_NUM_STATS;
10474 default:
10475 return -EOPNOTSUPP;
10479 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10481 switch (stringset) {
10482 case ETH_SS_STATS:
10483 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10484 break;
10485 case ETH_SS_TEST:
10486 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10487 break;
10488 default:
10489 WARN_ON(1); /* we need a WARN() */
10490 break;
10494 static int tg3_set_phys_id(struct net_device *dev,
10495 enum ethtool_phys_id_state state)
10497 struct tg3 *tp = netdev_priv(dev);
10499 if (!netif_running(tp->dev))
10500 return -EAGAIN;
10502 switch (state) {
10503 case ETHTOOL_ID_ACTIVE:
10504 return 1; /* cycle on/off once per second */
10506 case ETHTOOL_ID_ON:
10507 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10508 LED_CTRL_1000MBPS_ON |
10509 LED_CTRL_100MBPS_ON |
10510 LED_CTRL_10MBPS_ON |
10511 LED_CTRL_TRAFFIC_OVERRIDE |
10512 LED_CTRL_TRAFFIC_BLINK |
10513 LED_CTRL_TRAFFIC_LED);
10514 break;
10516 case ETHTOOL_ID_OFF:
10517 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10518 LED_CTRL_TRAFFIC_OVERRIDE);
10519 break;
10521 case ETHTOOL_ID_INACTIVE:
10522 tw32(MAC_LED_CTRL, tp->led_ctrl);
10523 break;
10526 return 0;
10529 static void tg3_get_ethtool_stats(struct net_device *dev,
10530 struct ethtool_stats *estats, u64 *tmp_stats)
10532 struct tg3 *tp = netdev_priv(dev);
10533 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10536 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10538 int i;
10539 __be32 *buf;
10540 u32 offset = 0, len = 0;
10541 u32 magic, val;
10543 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10544 return NULL;
10546 if (magic == TG3_EEPROM_MAGIC) {
10547 for (offset = TG3_NVM_DIR_START;
10548 offset < TG3_NVM_DIR_END;
10549 offset += TG3_NVM_DIRENT_SIZE) {
10550 if (tg3_nvram_read(tp, offset, &val))
10551 return NULL;
10553 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10554 TG3_NVM_DIRTYPE_EXTVPD)
10555 break;
10558 if (offset != TG3_NVM_DIR_END) {
10559 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10560 if (tg3_nvram_read(tp, offset + 4, &offset))
10561 return NULL;
10563 offset = tg3_nvram_logical_addr(tp, offset);
10567 if (!offset || !len) {
10568 offset = TG3_NVM_VPD_OFF;
10569 len = TG3_NVM_VPD_LEN;
10572 buf = kmalloc(len, GFP_KERNEL);
10573 if (buf == NULL)
10574 return NULL;
10576 if (magic == TG3_EEPROM_MAGIC) {
10577 for (i = 0; i < len; i += 4) {
10578 /* The data is in little-endian format in NVRAM.
10579 * Use the big-endian read routines to preserve
10580 * the byte order as it exists in NVRAM.
10582 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10583 goto error;
10585 } else {
10586 u8 *ptr;
10587 ssize_t cnt;
10588 unsigned int pos = 0;
10590 ptr = (u8 *)&buf[0];
10591 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10592 cnt = pci_read_vpd(tp->pdev, pos,
10593 len - pos, ptr);
10594 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10595 cnt = 0;
10596 else if (cnt < 0)
10597 goto error;
10599 if (pos != len)
10600 goto error;
10603 return buf;
10605 error:
10606 kfree(buf);
10607 return NULL;
10610 #define NVRAM_TEST_SIZE 0x100
10611 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10612 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10613 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10614 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10615 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10617 static int tg3_test_nvram(struct tg3 *tp)
10619 u32 csum, magic;
10620 __be32 *buf;
10621 int i, j, k, err = 0, size;
10623 if (tg3_flag(tp, NO_NVRAM))
10624 return 0;
10626 if (tg3_nvram_read(tp, 0, &magic) != 0)
10627 return -EIO;
10629 if (magic == TG3_EEPROM_MAGIC)
10630 size = NVRAM_TEST_SIZE;
10631 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10632 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10633 TG3_EEPROM_SB_FORMAT_1) {
10634 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10635 case TG3_EEPROM_SB_REVISION_0:
10636 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10637 break;
10638 case TG3_EEPROM_SB_REVISION_2:
10639 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10640 break;
10641 case TG3_EEPROM_SB_REVISION_3:
10642 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10643 break;
10644 default:
10645 return 0;
10647 } else
10648 return 0;
10649 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10650 size = NVRAM_SELFBOOT_HW_SIZE;
10651 else
10652 return -EIO;
10654 buf = kmalloc(size, GFP_KERNEL);
10655 if (buf == NULL)
10656 return -ENOMEM;
10658 err = -EIO;
10659 for (i = 0, j = 0; i < size; i += 4, j++) {
10660 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10661 if (err)
10662 break;
10664 if (i < size)
10665 goto out;
10667 /* Selfboot format */
10668 magic = be32_to_cpu(buf[0]);
10669 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10670 TG3_EEPROM_MAGIC_FW) {
10671 u8 *buf8 = (u8 *) buf, csum8 = 0;
10673 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10674 TG3_EEPROM_SB_REVISION_2) {
10675 /* For rev 2, the csum doesn't include the MBA. */
10676 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10677 csum8 += buf8[i];
10678 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10679 csum8 += buf8[i];
10680 } else {
10681 for (i = 0; i < size; i++)
10682 csum8 += buf8[i];
10685 if (csum8 == 0) {
10686 err = 0;
10687 goto out;
10690 err = -EIO;
10691 goto out;
10694 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10695 TG3_EEPROM_MAGIC_HW) {
10696 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10697 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10698 u8 *buf8 = (u8 *) buf;
10700 /* Separate the parity bits and the data bytes. */
10701 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10702 if ((i == 0) || (i == 8)) {
10703 int l;
10704 u8 msk;
10706 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10707 parity[k++] = buf8[i] & msk;
10708 i++;
10709 } else if (i == 16) {
10710 int l;
10711 u8 msk;
10713 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10714 parity[k++] = buf8[i] & msk;
10715 i++;
10717 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10718 parity[k++] = buf8[i] & msk;
10719 i++;
10721 data[j++] = buf8[i];
10724 err = -EIO;
10725 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10726 u8 hw8 = hweight8(data[i]);
10728 if ((hw8 & 0x1) && parity[i])
10729 goto out;
10730 else if (!(hw8 & 0x1) && !parity[i])
10731 goto out;
10733 err = 0;
10734 goto out;
10737 err = -EIO;
10739 /* Bootstrap checksum at offset 0x10 */
10740 csum = calc_crc((unsigned char *) buf, 0x10);
10741 if (csum != le32_to_cpu(buf[0x10/4]))
10742 goto out;
10744 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10745 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10746 if (csum != le32_to_cpu(buf[0xfc/4]))
10747 goto out;
10749 kfree(buf);
10751 buf = tg3_vpd_readblock(tp);
10752 if (!buf)
10753 return -ENOMEM;
10755 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10756 PCI_VPD_LRDT_RO_DATA);
10757 if (i > 0) {
10758 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10759 if (j < 0)
10760 goto out;
10762 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10763 goto out;
10765 i += PCI_VPD_LRDT_TAG_SIZE;
10766 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10767 PCI_VPD_RO_KEYWORD_CHKSUM);
10768 if (j > 0) {
10769 u8 csum8 = 0;
10771 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10773 for (i = 0; i <= j; i++)
10774 csum8 += ((u8 *)buf)[i];
10776 if (csum8)
10777 goto out;
10781 err = 0;
10783 out:
10784 kfree(buf);
10785 return err;
10788 #define TG3_SERDES_TIMEOUT_SEC 2
10789 #define TG3_COPPER_TIMEOUT_SEC 6
10791 static int tg3_test_link(struct tg3 *tp)
10793 int i, max;
10795 if (!netif_running(tp->dev))
10796 return -ENODEV;
10798 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10799 max = TG3_SERDES_TIMEOUT_SEC;
10800 else
10801 max = TG3_COPPER_TIMEOUT_SEC;
10803 for (i = 0; i < max; i++) {
10804 if (netif_carrier_ok(tp->dev))
10805 return 0;
10807 if (msleep_interruptible(1000))
10808 break;
10811 return -EIO;
10814 /* Only test the commonly used registers */
10815 static int tg3_test_registers(struct tg3 *tp)
10817 int i, is_5705, is_5750;
10818 u32 offset, read_mask, write_mask, val, save_val, read_val;
10819 static struct {
10820 u16 offset;
10821 u16 flags;
10822 #define TG3_FL_5705 0x1
10823 #define TG3_FL_NOT_5705 0x2
10824 #define TG3_FL_NOT_5788 0x4
10825 #define TG3_FL_NOT_5750 0x8
10826 u32 read_mask;
10827 u32 write_mask;
10828 } reg_tbl[] = {
10829 /* MAC Control Registers */
10830 { MAC_MODE, TG3_FL_NOT_5705,
10831 0x00000000, 0x00ef6f8c },
10832 { MAC_MODE, TG3_FL_5705,
10833 0x00000000, 0x01ef6b8c },
10834 { MAC_STATUS, TG3_FL_NOT_5705,
10835 0x03800107, 0x00000000 },
10836 { MAC_STATUS, TG3_FL_5705,
10837 0x03800100, 0x00000000 },
10838 { MAC_ADDR_0_HIGH, 0x0000,
10839 0x00000000, 0x0000ffff },
10840 { MAC_ADDR_0_LOW, 0x0000,
10841 0x00000000, 0xffffffff },
10842 { MAC_RX_MTU_SIZE, 0x0000,
10843 0x00000000, 0x0000ffff },
10844 { MAC_TX_MODE, 0x0000,
10845 0x00000000, 0x00000070 },
10846 { MAC_TX_LENGTHS, 0x0000,
10847 0x00000000, 0x00003fff },
10848 { MAC_RX_MODE, TG3_FL_NOT_5705,
10849 0x00000000, 0x000007fc },
10850 { MAC_RX_MODE, TG3_FL_5705,
10851 0x00000000, 0x000007dc },
10852 { MAC_HASH_REG_0, 0x0000,
10853 0x00000000, 0xffffffff },
10854 { MAC_HASH_REG_1, 0x0000,
10855 0x00000000, 0xffffffff },
10856 { MAC_HASH_REG_2, 0x0000,
10857 0x00000000, 0xffffffff },
10858 { MAC_HASH_REG_3, 0x0000,
10859 0x00000000, 0xffffffff },
10861 /* Receive Data and Receive BD Initiator Control Registers. */
10862 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10863 0x00000000, 0xffffffff },
10864 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10865 0x00000000, 0xffffffff },
10866 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10867 0x00000000, 0x00000003 },
10868 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10869 0x00000000, 0xffffffff },
10870 { RCVDBDI_STD_BD+0, 0x0000,
10871 0x00000000, 0xffffffff },
10872 { RCVDBDI_STD_BD+4, 0x0000,
10873 0x00000000, 0xffffffff },
10874 { RCVDBDI_STD_BD+8, 0x0000,
10875 0x00000000, 0xffff0002 },
10876 { RCVDBDI_STD_BD+0xc, 0x0000,
10877 0x00000000, 0xffffffff },
10879 /* Receive BD Initiator Control Registers. */
10880 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10881 0x00000000, 0xffffffff },
10882 { RCVBDI_STD_THRESH, TG3_FL_5705,
10883 0x00000000, 0x000003ff },
10884 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10885 0x00000000, 0xffffffff },
10887 /* Host Coalescing Control Registers. */
10888 { HOSTCC_MODE, TG3_FL_NOT_5705,
10889 0x00000000, 0x00000004 },
10890 { HOSTCC_MODE, TG3_FL_5705,
10891 0x00000000, 0x000000f6 },
10892 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10893 0x00000000, 0xffffffff },
10894 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10895 0x00000000, 0x000003ff },
10896 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10897 0x00000000, 0xffffffff },
10898 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10899 0x00000000, 0x000003ff },
10900 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10901 0x00000000, 0xffffffff },
10902 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10903 0x00000000, 0x000000ff },
10904 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10905 0x00000000, 0xffffffff },
10906 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10907 0x00000000, 0x000000ff },
10908 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10909 0x00000000, 0xffffffff },
10910 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10911 0x00000000, 0xffffffff },
10912 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10913 0x00000000, 0xffffffff },
10914 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10915 0x00000000, 0x000000ff },
10916 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10917 0x00000000, 0xffffffff },
10918 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10919 0x00000000, 0x000000ff },
10920 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10921 0x00000000, 0xffffffff },
10922 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10923 0x00000000, 0xffffffff },
10924 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10925 0x00000000, 0xffffffff },
10926 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10927 0x00000000, 0xffffffff },
10928 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10929 0x00000000, 0xffffffff },
10930 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10931 0xffffffff, 0x00000000 },
10932 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10933 0xffffffff, 0x00000000 },
10935 /* Buffer Manager Control Registers. */
10936 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10937 0x00000000, 0x007fff80 },
10938 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10939 0x00000000, 0x007fffff },
10940 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10941 0x00000000, 0x0000003f },
10942 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10943 0x00000000, 0x000001ff },
10944 { BUFMGR_MB_HIGH_WATER, 0x0000,
10945 0x00000000, 0x000001ff },
10946 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10947 0xffffffff, 0x00000000 },
10948 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10949 0xffffffff, 0x00000000 },
10951 /* Mailbox Registers */
10952 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10953 0x00000000, 0x000001ff },
10954 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10955 0x00000000, 0x000001ff },
10956 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10957 0x00000000, 0x000007ff },
10958 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10959 0x00000000, 0x000001ff },
10961 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10964 is_5705 = is_5750 = 0;
10965 if (tg3_flag(tp, 5705_PLUS)) {
10966 is_5705 = 1;
10967 if (tg3_flag(tp, 5750_PLUS))
10968 is_5750 = 1;
10971 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10972 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10973 continue;
10975 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10976 continue;
10978 if (tg3_flag(tp, IS_5788) &&
10979 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10980 continue;
10982 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10983 continue;
10985 offset = (u32) reg_tbl[i].offset;
10986 read_mask = reg_tbl[i].read_mask;
10987 write_mask = reg_tbl[i].write_mask;
10989 /* Save the original register content */
10990 save_val = tr32(offset);
10992 /* Determine the read-only value. */
10993 read_val = save_val & read_mask;
10995 /* Write zero to the register, then make sure the read-only bits
10996 * are not changed and the read/write bits are all zeros.
10998 tw32(offset, 0);
11000 val = tr32(offset);
11002 /* Test the read-only and read/write bits. */
11003 if (((val & read_mask) != read_val) || (val & write_mask))
11004 goto out;
11006 /* Write ones to all the bits defined by RdMask and WrMask, then
11007 * make sure the read-only bits are not changed and the
11008 * read/write bits are all ones.
11010 tw32(offset, read_mask | write_mask);
11012 val = tr32(offset);
11014 /* Test the read-only bits. */
11015 if ((val & read_mask) != read_val)
11016 goto out;
11018 /* Test the read/write bits. */
11019 if ((val & write_mask) != write_mask)
11020 goto out;
11022 tw32(offset, save_val);
11025 return 0;
11027 out:
11028 if (netif_msg_hw(tp))
11029 netdev_err(tp->dev,
11030 "Register test failed at offset %x\n", offset);
11031 tw32(offset, save_val);
11032 return -EIO;
11035 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11037 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11038 int i;
11039 u32 j;
11041 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11042 for (j = 0; j < len; j += 4) {
11043 u32 val;
11045 tg3_write_mem(tp, offset + j, test_pattern[i]);
11046 tg3_read_mem(tp, offset + j, &val);
11047 if (val != test_pattern[i])
11048 return -EIO;
11051 return 0;
11054 static int tg3_test_memory(struct tg3 *tp)
11056 static struct mem_entry {
11057 u32 offset;
11058 u32 len;
11059 } mem_tbl_570x[] = {
11060 { 0x00000000, 0x00b50},
11061 { 0x00002000, 0x1c000},
11062 { 0xffffffff, 0x00000}
11063 }, mem_tbl_5705[] = {
11064 { 0x00000100, 0x0000c},
11065 { 0x00000200, 0x00008},
11066 { 0x00004000, 0x00800},
11067 { 0x00006000, 0x01000},
11068 { 0x00008000, 0x02000},
11069 { 0x00010000, 0x0e000},
11070 { 0xffffffff, 0x00000}
11071 }, mem_tbl_5755[] = {
11072 { 0x00000200, 0x00008},
11073 { 0x00004000, 0x00800},
11074 { 0x00006000, 0x00800},
11075 { 0x00008000, 0x02000},
11076 { 0x00010000, 0x0c000},
11077 { 0xffffffff, 0x00000}
11078 }, mem_tbl_5906[] = {
11079 { 0x00000200, 0x00008},
11080 { 0x00004000, 0x00400},
11081 { 0x00006000, 0x00400},
11082 { 0x00008000, 0x01000},
11083 { 0x00010000, 0x01000},
11084 { 0xffffffff, 0x00000}
11085 }, mem_tbl_5717[] = {
11086 { 0x00000200, 0x00008},
11087 { 0x00010000, 0x0a000},
11088 { 0x00020000, 0x13c00},
11089 { 0xffffffff, 0x00000}
11090 }, mem_tbl_57765[] = {
11091 { 0x00000200, 0x00008},
11092 { 0x00004000, 0x00800},
11093 { 0x00006000, 0x09800},
11094 { 0x00010000, 0x0a000},
11095 { 0xffffffff, 0x00000}
11097 struct mem_entry *mem_tbl;
11098 int err = 0;
11099 int i;
11101 if (tg3_flag(tp, 5717_PLUS))
11102 mem_tbl = mem_tbl_5717;
11103 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11104 mem_tbl = mem_tbl_57765;
11105 else if (tg3_flag(tp, 5755_PLUS))
11106 mem_tbl = mem_tbl_5755;
11107 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11108 mem_tbl = mem_tbl_5906;
11109 else if (tg3_flag(tp, 5705_PLUS))
11110 mem_tbl = mem_tbl_5705;
11111 else
11112 mem_tbl = mem_tbl_570x;
11114 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11115 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11116 if (err)
11117 break;
11120 return err;
11123 #define TG3_MAC_LOOPBACK 0
11124 #define TG3_PHY_LOOPBACK 1
11125 #define TG3_TSO_LOOPBACK 2
11127 #define TG3_TSO_MSS 500
11129 #define TG3_TSO_IP_HDR_LEN 20
11130 #define TG3_TSO_TCP_HDR_LEN 20
11131 #define TG3_TSO_TCP_OPT_LEN 12
11133 static const u8 tg3_tso_header[] = {
11134 0x08, 0x00,
11135 0x45, 0x00, 0x00, 0x00,
11136 0x00, 0x00, 0x40, 0x00,
11137 0x40, 0x06, 0x00, 0x00,
11138 0x0a, 0x00, 0x00, 0x01,
11139 0x0a, 0x00, 0x00, 0x02,
11140 0x0d, 0x00, 0xe0, 0x00,
11141 0x00, 0x00, 0x01, 0x00,
11142 0x00, 0x00, 0x02, 0x00,
11143 0x80, 0x10, 0x10, 0x00,
11144 0x14, 0x09, 0x00, 0x00,
11145 0x01, 0x01, 0x08, 0x0a,
11146 0x11, 0x11, 0x11, 0x11,
11147 0x11, 0x11, 0x11, 0x11,
11150 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11152 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11153 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11154 struct sk_buff *skb, *rx_skb;
11155 u8 *tx_data;
11156 dma_addr_t map;
11157 int num_pkts, tx_len, rx_len, i, err;
11158 struct tg3_rx_buffer_desc *desc;
11159 struct tg3_napi *tnapi, *rnapi;
11160 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11162 tnapi = &tp->napi[0];
11163 rnapi = &tp->napi[0];
11164 if (tp->irq_cnt > 1) {
11165 if (tg3_flag(tp, ENABLE_RSS))
11166 rnapi = &tp->napi[1];
11167 if (tg3_flag(tp, ENABLE_TSS))
11168 tnapi = &tp->napi[1];
11170 coal_now = tnapi->coal_now | rnapi->coal_now;
11172 if (loopback_mode == TG3_MAC_LOOPBACK) {
11173 /* HW errata - mac loopback fails in some cases on 5780.
11174 * Normal traffic and PHY loopback are not affected by
11175 * errata. Also, the MAC loopback test is deprecated for
11176 * all newer ASIC revisions.
11178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11179 tg3_flag(tp, CPMU_PRESENT))
11180 return 0;
11182 mac_mode = tp->mac_mode &
11183 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11184 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11185 if (!tg3_flag(tp, 5705_PLUS))
11186 mac_mode |= MAC_MODE_LINK_POLARITY;
11187 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11188 mac_mode |= MAC_MODE_PORT_MODE_MII;
11189 else
11190 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11191 tw32(MAC_MODE, mac_mode);
11192 } else {
11193 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11194 tg3_phy_fet_toggle_apd(tp, false);
11195 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11196 } else
11197 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11199 tg3_phy_toggle_automdix(tp, 0);
11201 tg3_writephy(tp, MII_BMCR, val);
11202 udelay(40);
11204 mac_mode = tp->mac_mode &
11205 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11206 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11207 tg3_writephy(tp, MII_TG3_FET_PTEST,
11208 MII_TG3_FET_PTEST_FRC_TX_LINK |
11209 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11210 /* The write needs to be flushed for the AC131 */
11211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11212 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11213 mac_mode |= MAC_MODE_PORT_MODE_MII;
11214 } else
11215 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11217 /* reset to prevent losing 1st rx packet intermittently */
11218 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11219 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11220 udelay(10);
11221 tw32_f(MAC_RX_MODE, tp->rx_mode);
11223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11224 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11225 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11226 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11227 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11228 mac_mode |= MAC_MODE_LINK_POLARITY;
11229 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11230 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11232 tw32(MAC_MODE, mac_mode);
11234 /* Wait for link */
11235 for (i = 0; i < 100; i++) {
11236 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11237 break;
11238 mdelay(1);
11242 err = -EIO;
11244 tx_len = pktsz;
11245 skb = netdev_alloc_skb(tp->dev, tx_len);
11246 if (!skb)
11247 return -ENOMEM;
11249 tx_data = skb_put(skb, tx_len);
11250 memcpy(tx_data, tp->dev->dev_addr, 6);
11251 memset(tx_data + 6, 0x0, 8);
11253 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11255 if (loopback_mode == TG3_TSO_LOOPBACK) {
11256 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11258 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11259 TG3_TSO_TCP_OPT_LEN;
11261 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11262 sizeof(tg3_tso_header));
11263 mss = TG3_TSO_MSS;
11265 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11266 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11268 /* Set the total length field in the IP header */
11269 iph->tot_len = htons((u16)(mss + hdr_len));
11271 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11272 TXD_FLAG_CPU_POST_DMA);
11274 if (tg3_flag(tp, HW_TSO_1) ||
11275 tg3_flag(tp, HW_TSO_2) ||
11276 tg3_flag(tp, HW_TSO_3)) {
11277 struct tcphdr *th;
11278 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11279 th = (struct tcphdr *)&tx_data[val];
11280 th->check = 0;
11281 } else
11282 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11284 if (tg3_flag(tp, HW_TSO_3)) {
11285 mss |= (hdr_len & 0xc) << 12;
11286 if (hdr_len & 0x10)
11287 base_flags |= 0x00000010;
11288 base_flags |= (hdr_len & 0x3e0) << 5;
11289 } else if (tg3_flag(tp, HW_TSO_2))
11290 mss |= hdr_len << 9;
11291 else if (tg3_flag(tp, HW_TSO_1) ||
11292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11293 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11294 } else {
11295 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11298 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11299 } else {
11300 num_pkts = 1;
11301 data_off = ETH_HLEN;
11304 for (i = data_off; i < tx_len; i++)
11305 tx_data[i] = (u8) (i & 0xff);
11307 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11308 if (pci_dma_mapping_error(tp->pdev, map)) {
11309 dev_kfree_skb(skb);
11310 return -EIO;
11313 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11314 rnapi->coal_now);
11316 udelay(10);
11318 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11320 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11321 base_flags, (mss << 1) | 1);
11323 tnapi->tx_prod++;
11325 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11326 tr32_mailbox(tnapi->prodmbox);
11328 udelay(10);
11330 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11331 for (i = 0; i < 35; i++) {
11332 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11333 coal_now);
11335 udelay(10);
11337 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11338 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11339 if ((tx_idx == tnapi->tx_prod) &&
11340 (rx_idx == (rx_start_idx + num_pkts)))
11341 break;
11344 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11345 dev_kfree_skb(skb);
11347 if (tx_idx != tnapi->tx_prod)
11348 goto out;
11350 if (rx_idx != rx_start_idx + num_pkts)
11351 goto out;
11353 val = data_off;
11354 while (rx_idx != rx_start_idx) {
11355 desc = &rnapi->rx_rcb[rx_start_idx++];
11356 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11357 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11359 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11360 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11361 goto out;
11363 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11364 - ETH_FCS_LEN;
11366 if (loopback_mode != TG3_TSO_LOOPBACK) {
11367 if (rx_len != tx_len)
11368 goto out;
11370 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11371 if (opaque_key != RXD_OPAQUE_RING_STD)
11372 goto out;
11373 } else {
11374 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11375 goto out;
11377 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11378 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11379 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11380 goto out;
11383 if (opaque_key == RXD_OPAQUE_RING_STD) {
11384 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11385 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11386 mapping);
11387 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11388 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11389 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11390 mapping);
11391 } else
11392 goto out;
11394 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11395 PCI_DMA_FROMDEVICE);
11397 for (i = data_off; i < rx_len; i++, val++) {
11398 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11399 goto out;
11403 err = 0;
11405 /* tg3_free_rings will unmap and free the rx_skb */
11406 out:
11407 return err;
11410 #define TG3_STD_LOOPBACK_FAILED 1
11411 #define TG3_JMB_LOOPBACK_FAILED 2
11412 #define TG3_TSO_LOOPBACK_FAILED 4
11414 #define TG3_MAC_LOOPBACK_SHIFT 0
11415 #define TG3_PHY_LOOPBACK_SHIFT 4
11416 #define TG3_LOOPBACK_FAILED 0x00000077
11418 static int tg3_test_loopback(struct tg3 *tp)
11420 int err = 0;
11421 u32 eee_cap, cpmuctrl = 0;
11423 if (!netif_running(tp->dev))
11424 return TG3_LOOPBACK_FAILED;
11426 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11427 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11429 err = tg3_reset_hw(tp, 1);
11430 if (err) {
11431 err = TG3_LOOPBACK_FAILED;
11432 goto done;
11435 if (tg3_flag(tp, ENABLE_RSS)) {
11436 int i;
11438 /* Reroute all rx packets to the 1st queue */
11439 for (i = MAC_RSS_INDIR_TBL_0;
11440 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11441 tw32(i, 0x0);
11444 /* Turn off gphy autopowerdown. */
11445 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11446 tg3_phy_toggle_apd(tp, false);
11448 if (tg3_flag(tp, CPMU_PRESENT)) {
11449 int i;
11450 u32 status;
11452 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11454 /* Wait for up to 40 microseconds to acquire lock. */
11455 for (i = 0; i < 4; i++) {
11456 status = tr32(TG3_CPMU_MUTEX_GNT);
11457 if (status == CPMU_MUTEX_GNT_DRIVER)
11458 break;
11459 udelay(10);
11462 if (status != CPMU_MUTEX_GNT_DRIVER) {
11463 err = TG3_LOOPBACK_FAILED;
11464 goto done;
11467 /* Turn off link-based power management. */
11468 cpmuctrl = tr32(TG3_CPMU_CTRL);
11469 tw32(TG3_CPMU_CTRL,
11470 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11471 CPMU_CTRL_LINK_AWARE_MODE));
11474 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11475 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11477 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11478 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11479 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11481 if (tg3_flag(tp, CPMU_PRESENT)) {
11482 tw32(TG3_CPMU_CTRL, cpmuctrl);
11484 /* Release the mutex */
11485 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11488 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11489 !tg3_flag(tp, USE_PHYLIB)) {
11490 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11491 err |= TG3_STD_LOOPBACK_FAILED <<
11492 TG3_PHY_LOOPBACK_SHIFT;
11493 if (tg3_flag(tp, TSO_CAPABLE) &&
11494 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11495 err |= TG3_TSO_LOOPBACK_FAILED <<
11496 TG3_PHY_LOOPBACK_SHIFT;
11497 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11498 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11499 err |= TG3_JMB_LOOPBACK_FAILED <<
11500 TG3_PHY_LOOPBACK_SHIFT;
11503 /* Re-enable gphy autopowerdown. */
11504 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11505 tg3_phy_toggle_apd(tp, true);
11507 done:
11508 tp->phy_flags |= eee_cap;
11510 return err;
11513 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11514 u64 *data)
11516 struct tg3 *tp = netdev_priv(dev);
11518 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11519 tg3_power_up(tp);
11521 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11523 if (tg3_test_nvram(tp) != 0) {
11524 etest->flags |= ETH_TEST_FL_FAILED;
11525 data[0] = 1;
11527 if (tg3_test_link(tp) != 0) {
11528 etest->flags |= ETH_TEST_FL_FAILED;
11529 data[1] = 1;
11531 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11532 int err, err2 = 0, irq_sync = 0;
11534 if (netif_running(dev)) {
11535 tg3_phy_stop(tp);
11536 tg3_netif_stop(tp);
11537 irq_sync = 1;
11540 tg3_full_lock(tp, irq_sync);
11542 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11543 err = tg3_nvram_lock(tp);
11544 tg3_halt_cpu(tp, RX_CPU_BASE);
11545 if (!tg3_flag(tp, 5705_PLUS))
11546 tg3_halt_cpu(tp, TX_CPU_BASE);
11547 if (!err)
11548 tg3_nvram_unlock(tp);
11550 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11551 tg3_phy_reset(tp);
11553 if (tg3_test_registers(tp) != 0) {
11554 etest->flags |= ETH_TEST_FL_FAILED;
11555 data[2] = 1;
11557 if (tg3_test_memory(tp) != 0) {
11558 etest->flags |= ETH_TEST_FL_FAILED;
11559 data[3] = 1;
11561 if ((data[4] = tg3_test_loopback(tp)) != 0)
11562 etest->flags |= ETH_TEST_FL_FAILED;
11564 tg3_full_unlock(tp);
11566 if (tg3_test_interrupt(tp) != 0) {
11567 etest->flags |= ETH_TEST_FL_FAILED;
11568 data[5] = 1;
11571 tg3_full_lock(tp, 0);
11573 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11574 if (netif_running(dev)) {
11575 tg3_flag_set(tp, INIT_COMPLETE);
11576 err2 = tg3_restart_hw(tp, 1);
11577 if (!err2)
11578 tg3_netif_start(tp);
11581 tg3_full_unlock(tp);
11583 if (irq_sync && !err2)
11584 tg3_phy_start(tp);
11586 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11587 tg3_power_down(tp);
11591 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11593 struct mii_ioctl_data *data = if_mii(ifr);
11594 struct tg3 *tp = netdev_priv(dev);
11595 int err;
11597 if (tg3_flag(tp, USE_PHYLIB)) {
11598 struct phy_device *phydev;
11599 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11600 return -EAGAIN;
11601 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11602 return phy_mii_ioctl(phydev, ifr, cmd);
11605 switch (cmd) {
11606 case SIOCGMIIPHY:
11607 data->phy_id = tp->phy_addr;
11609 /* fallthru */
11610 case SIOCGMIIREG: {
11611 u32 mii_regval;
11613 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11614 break; /* We have no PHY */
11616 if (!netif_running(dev))
11617 return -EAGAIN;
11619 spin_lock_bh(&tp->lock);
11620 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11621 spin_unlock_bh(&tp->lock);
11623 data->val_out = mii_regval;
11625 return err;
11628 case SIOCSMIIREG:
11629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11630 break; /* We have no PHY */
11632 if (!netif_running(dev))
11633 return -EAGAIN;
11635 spin_lock_bh(&tp->lock);
11636 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11637 spin_unlock_bh(&tp->lock);
11639 return err;
11641 default:
11642 /* do nothing */
11643 break;
11645 return -EOPNOTSUPP;
11648 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11650 struct tg3 *tp = netdev_priv(dev);
11652 memcpy(ec, &tp->coal, sizeof(*ec));
11653 return 0;
11656 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11658 struct tg3 *tp = netdev_priv(dev);
11659 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11660 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11662 if (!tg3_flag(tp, 5705_PLUS)) {
11663 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11664 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11665 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11666 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11669 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11670 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11671 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11672 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11673 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11674 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11675 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11676 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11677 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11678 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11679 return -EINVAL;
11681 /* No rx interrupts will be generated if both are zero */
11682 if ((ec->rx_coalesce_usecs == 0) &&
11683 (ec->rx_max_coalesced_frames == 0))
11684 return -EINVAL;
11686 /* No tx interrupts will be generated if both are zero */
11687 if ((ec->tx_coalesce_usecs == 0) &&
11688 (ec->tx_max_coalesced_frames == 0))
11689 return -EINVAL;
11691 /* Only copy relevant parameters, ignore all others. */
11692 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11693 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11694 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11695 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11696 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11697 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11698 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11699 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11700 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11702 if (netif_running(dev)) {
11703 tg3_full_lock(tp, 0);
11704 __tg3_set_coalesce(tp, &tp->coal);
11705 tg3_full_unlock(tp);
11707 return 0;
11710 static const struct ethtool_ops tg3_ethtool_ops = {
11711 .get_settings = tg3_get_settings,
11712 .set_settings = tg3_set_settings,
11713 .get_drvinfo = tg3_get_drvinfo,
11714 .get_regs_len = tg3_get_regs_len,
11715 .get_regs = tg3_get_regs,
11716 .get_wol = tg3_get_wol,
11717 .set_wol = tg3_set_wol,
11718 .get_msglevel = tg3_get_msglevel,
11719 .set_msglevel = tg3_set_msglevel,
11720 .nway_reset = tg3_nway_reset,
11721 .get_link = ethtool_op_get_link,
11722 .get_eeprom_len = tg3_get_eeprom_len,
11723 .get_eeprom = tg3_get_eeprom,
11724 .set_eeprom = tg3_set_eeprom,
11725 .get_ringparam = tg3_get_ringparam,
11726 .set_ringparam = tg3_set_ringparam,
11727 .get_pauseparam = tg3_get_pauseparam,
11728 .set_pauseparam = tg3_set_pauseparam,
11729 .self_test = tg3_self_test,
11730 .get_strings = tg3_get_strings,
11731 .set_phys_id = tg3_set_phys_id,
11732 .get_ethtool_stats = tg3_get_ethtool_stats,
11733 .get_coalesce = tg3_get_coalesce,
11734 .set_coalesce = tg3_set_coalesce,
11735 .get_sset_count = tg3_get_sset_count,
11738 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11740 u32 cursize, val, magic;
11742 tp->nvram_size = EEPROM_CHIP_SIZE;
11744 if (tg3_nvram_read(tp, 0, &magic) != 0)
11745 return;
11747 if ((magic != TG3_EEPROM_MAGIC) &&
11748 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11749 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11750 return;
11753 * Size the chip by reading offsets at increasing powers of two.
11754 * When we encounter our validation signature, we know the addressing
11755 * has wrapped around, and thus have our chip size.
11757 cursize = 0x10;
11759 while (cursize < tp->nvram_size) {
11760 if (tg3_nvram_read(tp, cursize, &val) != 0)
11761 return;
11763 if (val == magic)
11764 break;
11766 cursize <<= 1;
11769 tp->nvram_size = cursize;
11772 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11774 u32 val;
11776 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11777 return;
11779 /* Selfboot format */
11780 if (val != TG3_EEPROM_MAGIC) {
11781 tg3_get_eeprom_size(tp);
11782 return;
11785 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11786 if (val != 0) {
11787 /* This is confusing. We want to operate on the
11788 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11789 * call will read from NVRAM and byteswap the data
11790 * according to the byteswapping settings for all
11791 * other register accesses. This ensures the data we
11792 * want will always reside in the lower 16-bits.
11793 * However, the data in NVRAM is in LE format, which
11794 * means the data from the NVRAM read will always be
11795 * opposite the endianness of the CPU. The 16-bit
11796 * byteswap then brings the data to CPU endianness.
11798 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11799 return;
11802 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11805 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11807 u32 nvcfg1;
11809 nvcfg1 = tr32(NVRAM_CFG1);
11810 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11811 tg3_flag_set(tp, FLASH);
11812 } else {
11813 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11814 tw32(NVRAM_CFG1, nvcfg1);
11817 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11818 tg3_flag(tp, 5780_CLASS)) {
11819 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11820 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11821 tp->nvram_jedecnum = JEDEC_ATMEL;
11822 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11823 tg3_flag_set(tp, NVRAM_BUFFERED);
11824 break;
11825 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11826 tp->nvram_jedecnum = JEDEC_ATMEL;
11827 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11828 break;
11829 case FLASH_VENDOR_ATMEL_EEPROM:
11830 tp->nvram_jedecnum = JEDEC_ATMEL;
11831 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11832 tg3_flag_set(tp, NVRAM_BUFFERED);
11833 break;
11834 case FLASH_VENDOR_ST:
11835 tp->nvram_jedecnum = JEDEC_ST;
11836 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11837 tg3_flag_set(tp, NVRAM_BUFFERED);
11838 break;
11839 case FLASH_VENDOR_SAIFUN:
11840 tp->nvram_jedecnum = JEDEC_SAIFUN;
11841 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11842 break;
11843 case FLASH_VENDOR_SST_SMALL:
11844 case FLASH_VENDOR_SST_LARGE:
11845 tp->nvram_jedecnum = JEDEC_SST;
11846 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11847 break;
11849 } else {
11850 tp->nvram_jedecnum = JEDEC_ATMEL;
11851 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11852 tg3_flag_set(tp, NVRAM_BUFFERED);
11856 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11858 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11859 case FLASH_5752PAGE_SIZE_256:
11860 tp->nvram_pagesize = 256;
11861 break;
11862 case FLASH_5752PAGE_SIZE_512:
11863 tp->nvram_pagesize = 512;
11864 break;
11865 case FLASH_5752PAGE_SIZE_1K:
11866 tp->nvram_pagesize = 1024;
11867 break;
11868 case FLASH_5752PAGE_SIZE_2K:
11869 tp->nvram_pagesize = 2048;
11870 break;
11871 case FLASH_5752PAGE_SIZE_4K:
11872 tp->nvram_pagesize = 4096;
11873 break;
11874 case FLASH_5752PAGE_SIZE_264:
11875 tp->nvram_pagesize = 264;
11876 break;
11877 case FLASH_5752PAGE_SIZE_528:
11878 tp->nvram_pagesize = 528;
11879 break;
11883 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11885 u32 nvcfg1;
11887 nvcfg1 = tr32(NVRAM_CFG1);
11889 /* NVRAM protection for TPM */
11890 if (nvcfg1 & (1 << 27))
11891 tg3_flag_set(tp, PROTECTED_NVRAM);
11893 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11894 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11895 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11896 tp->nvram_jedecnum = JEDEC_ATMEL;
11897 tg3_flag_set(tp, NVRAM_BUFFERED);
11898 break;
11899 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11900 tp->nvram_jedecnum = JEDEC_ATMEL;
11901 tg3_flag_set(tp, NVRAM_BUFFERED);
11902 tg3_flag_set(tp, FLASH);
11903 break;
11904 case FLASH_5752VENDOR_ST_M45PE10:
11905 case FLASH_5752VENDOR_ST_M45PE20:
11906 case FLASH_5752VENDOR_ST_M45PE40:
11907 tp->nvram_jedecnum = JEDEC_ST;
11908 tg3_flag_set(tp, NVRAM_BUFFERED);
11909 tg3_flag_set(tp, FLASH);
11910 break;
11913 if (tg3_flag(tp, FLASH)) {
11914 tg3_nvram_get_pagesize(tp, nvcfg1);
11915 } else {
11916 /* For eeprom, set pagesize to maximum eeprom size */
11917 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11919 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11920 tw32(NVRAM_CFG1, nvcfg1);
11924 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11926 u32 nvcfg1, protect = 0;
11928 nvcfg1 = tr32(NVRAM_CFG1);
11930 /* NVRAM protection for TPM */
11931 if (nvcfg1 & (1 << 27)) {
11932 tg3_flag_set(tp, PROTECTED_NVRAM);
11933 protect = 1;
11936 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11937 switch (nvcfg1) {
11938 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11939 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11940 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11941 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11942 tp->nvram_jedecnum = JEDEC_ATMEL;
11943 tg3_flag_set(tp, NVRAM_BUFFERED);
11944 tg3_flag_set(tp, FLASH);
11945 tp->nvram_pagesize = 264;
11946 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11947 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11948 tp->nvram_size = (protect ? 0x3e200 :
11949 TG3_NVRAM_SIZE_512KB);
11950 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11951 tp->nvram_size = (protect ? 0x1f200 :
11952 TG3_NVRAM_SIZE_256KB);
11953 else
11954 tp->nvram_size = (protect ? 0x1f200 :
11955 TG3_NVRAM_SIZE_128KB);
11956 break;
11957 case FLASH_5752VENDOR_ST_M45PE10:
11958 case FLASH_5752VENDOR_ST_M45PE20:
11959 case FLASH_5752VENDOR_ST_M45PE40:
11960 tp->nvram_jedecnum = JEDEC_ST;
11961 tg3_flag_set(tp, NVRAM_BUFFERED);
11962 tg3_flag_set(tp, FLASH);
11963 tp->nvram_pagesize = 256;
11964 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11965 tp->nvram_size = (protect ?
11966 TG3_NVRAM_SIZE_64KB :
11967 TG3_NVRAM_SIZE_128KB);
11968 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11969 tp->nvram_size = (protect ?
11970 TG3_NVRAM_SIZE_64KB :
11971 TG3_NVRAM_SIZE_256KB);
11972 else
11973 tp->nvram_size = (protect ?
11974 TG3_NVRAM_SIZE_128KB :
11975 TG3_NVRAM_SIZE_512KB);
11976 break;
11980 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11982 u32 nvcfg1;
11984 nvcfg1 = tr32(NVRAM_CFG1);
11986 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11987 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11988 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11989 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11990 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11991 tp->nvram_jedecnum = JEDEC_ATMEL;
11992 tg3_flag_set(tp, NVRAM_BUFFERED);
11993 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11995 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11996 tw32(NVRAM_CFG1, nvcfg1);
11997 break;
11998 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11999 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12000 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12001 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12002 tp->nvram_jedecnum = JEDEC_ATMEL;
12003 tg3_flag_set(tp, NVRAM_BUFFERED);
12004 tg3_flag_set(tp, FLASH);
12005 tp->nvram_pagesize = 264;
12006 break;
12007 case FLASH_5752VENDOR_ST_M45PE10:
12008 case FLASH_5752VENDOR_ST_M45PE20:
12009 case FLASH_5752VENDOR_ST_M45PE40:
12010 tp->nvram_jedecnum = JEDEC_ST;
12011 tg3_flag_set(tp, NVRAM_BUFFERED);
12012 tg3_flag_set(tp, FLASH);
12013 tp->nvram_pagesize = 256;
12014 break;
12018 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12020 u32 nvcfg1, protect = 0;
12022 nvcfg1 = tr32(NVRAM_CFG1);
12024 /* NVRAM protection for TPM */
12025 if (nvcfg1 & (1 << 27)) {
12026 tg3_flag_set(tp, PROTECTED_NVRAM);
12027 protect = 1;
12030 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12031 switch (nvcfg1) {
12032 case FLASH_5761VENDOR_ATMEL_ADB021D:
12033 case FLASH_5761VENDOR_ATMEL_ADB041D:
12034 case FLASH_5761VENDOR_ATMEL_ADB081D:
12035 case FLASH_5761VENDOR_ATMEL_ADB161D:
12036 case FLASH_5761VENDOR_ATMEL_MDB021D:
12037 case FLASH_5761VENDOR_ATMEL_MDB041D:
12038 case FLASH_5761VENDOR_ATMEL_MDB081D:
12039 case FLASH_5761VENDOR_ATMEL_MDB161D:
12040 tp->nvram_jedecnum = JEDEC_ATMEL;
12041 tg3_flag_set(tp, NVRAM_BUFFERED);
12042 tg3_flag_set(tp, FLASH);
12043 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12044 tp->nvram_pagesize = 256;
12045 break;
12046 case FLASH_5761VENDOR_ST_A_M45PE20:
12047 case FLASH_5761VENDOR_ST_A_M45PE40:
12048 case FLASH_5761VENDOR_ST_A_M45PE80:
12049 case FLASH_5761VENDOR_ST_A_M45PE16:
12050 case FLASH_5761VENDOR_ST_M_M45PE20:
12051 case FLASH_5761VENDOR_ST_M_M45PE40:
12052 case FLASH_5761VENDOR_ST_M_M45PE80:
12053 case FLASH_5761VENDOR_ST_M_M45PE16:
12054 tp->nvram_jedecnum = JEDEC_ST;
12055 tg3_flag_set(tp, NVRAM_BUFFERED);
12056 tg3_flag_set(tp, FLASH);
12057 tp->nvram_pagesize = 256;
12058 break;
12061 if (protect) {
12062 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12063 } else {
12064 switch (nvcfg1) {
12065 case FLASH_5761VENDOR_ATMEL_ADB161D:
12066 case FLASH_5761VENDOR_ATMEL_MDB161D:
12067 case FLASH_5761VENDOR_ST_A_M45PE16:
12068 case FLASH_5761VENDOR_ST_M_M45PE16:
12069 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12070 break;
12071 case FLASH_5761VENDOR_ATMEL_ADB081D:
12072 case FLASH_5761VENDOR_ATMEL_MDB081D:
12073 case FLASH_5761VENDOR_ST_A_M45PE80:
12074 case FLASH_5761VENDOR_ST_M_M45PE80:
12075 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12076 break;
12077 case FLASH_5761VENDOR_ATMEL_ADB041D:
12078 case FLASH_5761VENDOR_ATMEL_MDB041D:
12079 case FLASH_5761VENDOR_ST_A_M45PE40:
12080 case FLASH_5761VENDOR_ST_M_M45PE40:
12081 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12082 break;
12083 case FLASH_5761VENDOR_ATMEL_ADB021D:
12084 case FLASH_5761VENDOR_ATMEL_MDB021D:
12085 case FLASH_5761VENDOR_ST_A_M45PE20:
12086 case FLASH_5761VENDOR_ST_M_M45PE20:
12087 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12088 break;
12093 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12095 tp->nvram_jedecnum = JEDEC_ATMEL;
12096 tg3_flag_set(tp, NVRAM_BUFFERED);
12097 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12100 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12102 u32 nvcfg1;
12104 nvcfg1 = tr32(NVRAM_CFG1);
12106 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12107 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12108 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12109 tp->nvram_jedecnum = JEDEC_ATMEL;
12110 tg3_flag_set(tp, NVRAM_BUFFERED);
12111 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12113 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12114 tw32(NVRAM_CFG1, nvcfg1);
12115 return;
12116 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12117 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12118 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12119 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12120 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12121 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12122 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12123 tp->nvram_jedecnum = JEDEC_ATMEL;
12124 tg3_flag_set(tp, NVRAM_BUFFERED);
12125 tg3_flag_set(tp, FLASH);
12127 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12128 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12129 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12130 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12131 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12132 break;
12133 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12134 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12135 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12136 break;
12137 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12138 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12139 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12140 break;
12142 break;
12143 case FLASH_5752VENDOR_ST_M45PE10:
12144 case FLASH_5752VENDOR_ST_M45PE20:
12145 case FLASH_5752VENDOR_ST_M45PE40:
12146 tp->nvram_jedecnum = JEDEC_ST;
12147 tg3_flag_set(tp, NVRAM_BUFFERED);
12148 tg3_flag_set(tp, FLASH);
12150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12151 case FLASH_5752VENDOR_ST_M45PE10:
12152 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12153 break;
12154 case FLASH_5752VENDOR_ST_M45PE20:
12155 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12156 break;
12157 case FLASH_5752VENDOR_ST_M45PE40:
12158 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12159 break;
12161 break;
12162 default:
12163 tg3_flag_set(tp, NO_NVRAM);
12164 return;
12167 tg3_nvram_get_pagesize(tp, nvcfg1);
12168 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12169 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12173 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12175 u32 nvcfg1;
12177 nvcfg1 = tr32(NVRAM_CFG1);
12179 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12180 case FLASH_5717VENDOR_ATMEL_EEPROM:
12181 case FLASH_5717VENDOR_MICRO_EEPROM:
12182 tp->nvram_jedecnum = JEDEC_ATMEL;
12183 tg3_flag_set(tp, NVRAM_BUFFERED);
12184 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12186 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12187 tw32(NVRAM_CFG1, nvcfg1);
12188 return;
12189 case FLASH_5717VENDOR_ATMEL_MDB011D:
12190 case FLASH_5717VENDOR_ATMEL_ADB011B:
12191 case FLASH_5717VENDOR_ATMEL_ADB011D:
12192 case FLASH_5717VENDOR_ATMEL_MDB021D:
12193 case FLASH_5717VENDOR_ATMEL_ADB021B:
12194 case FLASH_5717VENDOR_ATMEL_ADB021D:
12195 case FLASH_5717VENDOR_ATMEL_45USPT:
12196 tp->nvram_jedecnum = JEDEC_ATMEL;
12197 tg3_flag_set(tp, NVRAM_BUFFERED);
12198 tg3_flag_set(tp, FLASH);
12200 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12201 case FLASH_5717VENDOR_ATMEL_MDB021D:
12202 /* Detect size with tg3_nvram_get_size() */
12203 break;
12204 case FLASH_5717VENDOR_ATMEL_ADB021B:
12205 case FLASH_5717VENDOR_ATMEL_ADB021D:
12206 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12207 break;
12208 default:
12209 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12210 break;
12212 break;
12213 case FLASH_5717VENDOR_ST_M_M25PE10:
12214 case FLASH_5717VENDOR_ST_A_M25PE10:
12215 case FLASH_5717VENDOR_ST_M_M45PE10:
12216 case FLASH_5717VENDOR_ST_A_M45PE10:
12217 case FLASH_5717VENDOR_ST_M_M25PE20:
12218 case FLASH_5717VENDOR_ST_A_M25PE20:
12219 case FLASH_5717VENDOR_ST_M_M45PE20:
12220 case FLASH_5717VENDOR_ST_A_M45PE20:
12221 case FLASH_5717VENDOR_ST_25USPT:
12222 case FLASH_5717VENDOR_ST_45USPT:
12223 tp->nvram_jedecnum = JEDEC_ST;
12224 tg3_flag_set(tp, NVRAM_BUFFERED);
12225 tg3_flag_set(tp, FLASH);
12227 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12228 case FLASH_5717VENDOR_ST_M_M25PE20:
12229 case FLASH_5717VENDOR_ST_M_M45PE20:
12230 /* Detect size with tg3_nvram_get_size() */
12231 break;
12232 case FLASH_5717VENDOR_ST_A_M25PE20:
12233 case FLASH_5717VENDOR_ST_A_M45PE20:
12234 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12235 break;
12236 default:
12237 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12238 break;
12240 break;
12241 default:
12242 tg3_flag_set(tp, NO_NVRAM);
12243 return;
12246 tg3_nvram_get_pagesize(tp, nvcfg1);
12247 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12248 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12251 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12253 u32 nvcfg1, nvmpinstrp;
12255 nvcfg1 = tr32(NVRAM_CFG1);
12256 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12258 switch (nvmpinstrp) {
12259 case FLASH_5720_EEPROM_HD:
12260 case FLASH_5720_EEPROM_LD:
12261 tp->nvram_jedecnum = JEDEC_ATMEL;
12262 tg3_flag_set(tp, NVRAM_BUFFERED);
12264 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12265 tw32(NVRAM_CFG1, nvcfg1);
12266 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12267 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12268 else
12269 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12270 return;
12271 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12272 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12273 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12274 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12275 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12276 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12277 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12278 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12279 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12280 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12281 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12282 case FLASH_5720VENDOR_ATMEL_45USPT:
12283 tp->nvram_jedecnum = JEDEC_ATMEL;
12284 tg3_flag_set(tp, NVRAM_BUFFERED);
12285 tg3_flag_set(tp, FLASH);
12287 switch (nvmpinstrp) {
12288 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12289 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12290 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12291 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12292 break;
12293 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12294 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12295 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12296 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12297 break;
12298 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12299 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12300 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12301 break;
12302 default:
12303 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12304 break;
12306 break;
12307 case FLASH_5720VENDOR_M_ST_M25PE10:
12308 case FLASH_5720VENDOR_M_ST_M45PE10:
12309 case FLASH_5720VENDOR_A_ST_M25PE10:
12310 case FLASH_5720VENDOR_A_ST_M45PE10:
12311 case FLASH_5720VENDOR_M_ST_M25PE20:
12312 case FLASH_5720VENDOR_M_ST_M45PE20:
12313 case FLASH_5720VENDOR_A_ST_M25PE20:
12314 case FLASH_5720VENDOR_A_ST_M45PE20:
12315 case FLASH_5720VENDOR_M_ST_M25PE40:
12316 case FLASH_5720VENDOR_M_ST_M45PE40:
12317 case FLASH_5720VENDOR_A_ST_M25PE40:
12318 case FLASH_5720VENDOR_A_ST_M45PE40:
12319 case FLASH_5720VENDOR_M_ST_M25PE80:
12320 case FLASH_5720VENDOR_M_ST_M45PE80:
12321 case FLASH_5720VENDOR_A_ST_M25PE80:
12322 case FLASH_5720VENDOR_A_ST_M45PE80:
12323 case FLASH_5720VENDOR_ST_25USPT:
12324 case FLASH_5720VENDOR_ST_45USPT:
12325 tp->nvram_jedecnum = JEDEC_ST;
12326 tg3_flag_set(tp, NVRAM_BUFFERED);
12327 tg3_flag_set(tp, FLASH);
12329 switch (nvmpinstrp) {
12330 case FLASH_5720VENDOR_M_ST_M25PE20:
12331 case FLASH_5720VENDOR_M_ST_M45PE20:
12332 case FLASH_5720VENDOR_A_ST_M25PE20:
12333 case FLASH_5720VENDOR_A_ST_M45PE20:
12334 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12335 break;
12336 case FLASH_5720VENDOR_M_ST_M25PE40:
12337 case FLASH_5720VENDOR_M_ST_M45PE40:
12338 case FLASH_5720VENDOR_A_ST_M25PE40:
12339 case FLASH_5720VENDOR_A_ST_M45PE40:
12340 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12341 break;
12342 case FLASH_5720VENDOR_M_ST_M25PE80:
12343 case FLASH_5720VENDOR_M_ST_M45PE80:
12344 case FLASH_5720VENDOR_A_ST_M25PE80:
12345 case FLASH_5720VENDOR_A_ST_M45PE80:
12346 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12347 break;
12348 default:
12349 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12350 break;
12352 break;
12353 default:
12354 tg3_flag_set(tp, NO_NVRAM);
12355 return;
12358 tg3_nvram_get_pagesize(tp, nvcfg1);
12359 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12360 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12363 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12364 static void __devinit tg3_nvram_init(struct tg3 *tp)
12366 tw32_f(GRC_EEPROM_ADDR,
12367 (EEPROM_ADDR_FSM_RESET |
12368 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12369 EEPROM_ADDR_CLKPERD_SHIFT)));
12371 msleep(1);
12373 /* Enable seeprom accesses. */
12374 tw32_f(GRC_LOCAL_CTRL,
12375 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12376 udelay(100);
12378 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12379 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12380 tg3_flag_set(tp, NVRAM);
12382 if (tg3_nvram_lock(tp)) {
12383 netdev_warn(tp->dev,
12384 "Cannot get nvram lock, %s failed\n",
12385 __func__);
12386 return;
12388 tg3_enable_nvram_access(tp);
12390 tp->nvram_size = 0;
12392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12393 tg3_get_5752_nvram_info(tp);
12394 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12395 tg3_get_5755_nvram_info(tp);
12396 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12399 tg3_get_5787_nvram_info(tp);
12400 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12401 tg3_get_5761_nvram_info(tp);
12402 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12403 tg3_get_5906_nvram_info(tp);
12404 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12406 tg3_get_57780_nvram_info(tp);
12407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12409 tg3_get_5717_nvram_info(tp);
12410 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12411 tg3_get_5720_nvram_info(tp);
12412 else
12413 tg3_get_nvram_info(tp);
12415 if (tp->nvram_size == 0)
12416 tg3_get_nvram_size(tp);
12418 tg3_disable_nvram_access(tp);
12419 tg3_nvram_unlock(tp);
12421 } else {
12422 tg3_flag_clear(tp, NVRAM);
12423 tg3_flag_clear(tp, NVRAM_BUFFERED);
12425 tg3_get_eeprom_size(tp);
12429 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12430 u32 offset, u32 len, u8 *buf)
12432 int i, j, rc = 0;
12433 u32 val;
12435 for (i = 0; i < len; i += 4) {
12436 u32 addr;
12437 __be32 data;
12439 addr = offset + i;
12441 memcpy(&data, buf + i, 4);
12444 * The SEEPROM interface expects the data to always be opposite
12445 * the native endian format. We accomplish this by reversing
12446 * all the operations that would have been performed on the
12447 * data from a call to tg3_nvram_read_be32().
12449 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12451 val = tr32(GRC_EEPROM_ADDR);
12452 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12454 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12455 EEPROM_ADDR_READ);
12456 tw32(GRC_EEPROM_ADDR, val |
12457 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12458 (addr & EEPROM_ADDR_ADDR_MASK) |
12459 EEPROM_ADDR_START |
12460 EEPROM_ADDR_WRITE);
12462 for (j = 0; j < 1000; j++) {
12463 val = tr32(GRC_EEPROM_ADDR);
12465 if (val & EEPROM_ADDR_COMPLETE)
12466 break;
12467 msleep(1);
12469 if (!(val & EEPROM_ADDR_COMPLETE)) {
12470 rc = -EBUSY;
12471 break;
12475 return rc;
12478 /* offset and length are dword aligned */
12479 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12480 u8 *buf)
12482 int ret = 0;
12483 u32 pagesize = tp->nvram_pagesize;
12484 u32 pagemask = pagesize - 1;
12485 u32 nvram_cmd;
12486 u8 *tmp;
12488 tmp = kmalloc(pagesize, GFP_KERNEL);
12489 if (tmp == NULL)
12490 return -ENOMEM;
12492 while (len) {
12493 int j;
12494 u32 phy_addr, page_off, size;
12496 phy_addr = offset & ~pagemask;
12498 for (j = 0; j < pagesize; j += 4) {
12499 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12500 (__be32 *) (tmp + j));
12501 if (ret)
12502 break;
12504 if (ret)
12505 break;
12507 page_off = offset & pagemask;
12508 size = pagesize;
12509 if (len < size)
12510 size = len;
12512 len -= size;
12514 memcpy(tmp + page_off, buf, size);
12516 offset = offset + (pagesize - page_off);
12518 tg3_enable_nvram_access(tp);
12521 * Before we can erase the flash page, we need
12522 * to issue a special "write enable" command.
12524 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12526 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12527 break;
12529 /* Erase the target page */
12530 tw32(NVRAM_ADDR, phy_addr);
12532 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12533 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12535 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12536 break;
12538 /* Issue another write enable to start the write. */
12539 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12541 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12542 break;
12544 for (j = 0; j < pagesize; j += 4) {
12545 __be32 data;
12547 data = *((__be32 *) (tmp + j));
12549 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12551 tw32(NVRAM_ADDR, phy_addr + j);
12553 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12554 NVRAM_CMD_WR;
12556 if (j == 0)
12557 nvram_cmd |= NVRAM_CMD_FIRST;
12558 else if (j == (pagesize - 4))
12559 nvram_cmd |= NVRAM_CMD_LAST;
12561 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12562 break;
12564 if (ret)
12565 break;
12568 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12569 tg3_nvram_exec_cmd(tp, nvram_cmd);
12571 kfree(tmp);
12573 return ret;
12576 /* offset and length are dword aligned */
12577 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12578 u8 *buf)
12580 int i, ret = 0;
12582 for (i = 0; i < len; i += 4, offset += 4) {
12583 u32 page_off, phy_addr, nvram_cmd;
12584 __be32 data;
12586 memcpy(&data, buf + i, 4);
12587 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12589 page_off = offset % tp->nvram_pagesize;
12591 phy_addr = tg3_nvram_phys_addr(tp, offset);
12593 tw32(NVRAM_ADDR, phy_addr);
12595 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12597 if (page_off == 0 || i == 0)
12598 nvram_cmd |= NVRAM_CMD_FIRST;
12599 if (page_off == (tp->nvram_pagesize - 4))
12600 nvram_cmd |= NVRAM_CMD_LAST;
12602 if (i == (len - 4))
12603 nvram_cmd |= NVRAM_CMD_LAST;
12605 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12606 !tg3_flag(tp, 5755_PLUS) &&
12607 (tp->nvram_jedecnum == JEDEC_ST) &&
12608 (nvram_cmd & NVRAM_CMD_FIRST)) {
12610 if ((ret = tg3_nvram_exec_cmd(tp,
12611 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12612 NVRAM_CMD_DONE)))
12614 break;
12616 if (!tg3_flag(tp, FLASH)) {
12617 /* We always do complete word writes to eeprom. */
12618 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12621 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12622 break;
12624 return ret;
12627 /* offset and length are dword aligned */
12628 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12630 int ret;
12632 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12633 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12634 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12635 udelay(40);
12638 if (!tg3_flag(tp, NVRAM)) {
12639 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12640 } else {
12641 u32 grc_mode;
12643 ret = tg3_nvram_lock(tp);
12644 if (ret)
12645 return ret;
12647 tg3_enable_nvram_access(tp);
12648 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12649 tw32(NVRAM_WRITE1, 0x406);
12651 grc_mode = tr32(GRC_MODE);
12652 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12654 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12655 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12656 buf);
12657 } else {
12658 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12659 buf);
12662 grc_mode = tr32(GRC_MODE);
12663 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12665 tg3_disable_nvram_access(tp);
12666 tg3_nvram_unlock(tp);
12669 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12670 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12671 udelay(40);
12674 return ret;
12677 struct subsys_tbl_ent {
12678 u16 subsys_vendor, subsys_devid;
12679 u32 phy_id;
12682 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12683 /* Broadcom boards. */
12684 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12685 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12686 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12687 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12688 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12689 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12690 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12691 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12692 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12693 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12694 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12695 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12696 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12697 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12698 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12699 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12700 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12701 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12702 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12703 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12704 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12705 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12707 /* 3com boards. */
12708 { TG3PCI_SUBVENDOR_ID_3COM,
12709 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12710 { TG3PCI_SUBVENDOR_ID_3COM,
12711 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12712 { TG3PCI_SUBVENDOR_ID_3COM,
12713 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12714 { TG3PCI_SUBVENDOR_ID_3COM,
12715 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12716 { TG3PCI_SUBVENDOR_ID_3COM,
12717 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12719 /* DELL boards. */
12720 { TG3PCI_SUBVENDOR_ID_DELL,
12721 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12722 { TG3PCI_SUBVENDOR_ID_DELL,
12723 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12724 { TG3PCI_SUBVENDOR_ID_DELL,
12725 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12726 { TG3PCI_SUBVENDOR_ID_DELL,
12727 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12729 /* Compaq boards. */
12730 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12731 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12732 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12733 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12734 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12735 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12736 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12737 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12738 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12739 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12741 /* IBM boards. */
12742 { TG3PCI_SUBVENDOR_ID_IBM,
12743 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12746 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12748 int i;
12750 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12751 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12752 tp->pdev->subsystem_vendor) &&
12753 (subsys_id_to_phy_id[i].subsys_devid ==
12754 tp->pdev->subsystem_device))
12755 return &subsys_id_to_phy_id[i];
12757 return NULL;
12760 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12762 u32 val;
12763 u16 pmcsr;
12765 /* On some early chips the SRAM cannot be accessed in D3hot state,
12766 * so need make sure we're in D0.
12768 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12769 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12770 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12771 msleep(1);
12773 /* Make sure register accesses (indirect or otherwise)
12774 * will function correctly.
12776 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12777 tp->misc_host_ctrl);
12779 /* The memory arbiter has to be enabled in order for SRAM accesses
12780 * to succeed. Normally on powerup the tg3 chip firmware will make
12781 * sure it is enabled, but other entities such as system netboot
12782 * code might disable it.
12784 val = tr32(MEMARB_MODE);
12785 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12787 tp->phy_id = TG3_PHY_ID_INVALID;
12788 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12790 /* Assume an onboard device and WOL capable by default. */
12791 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12792 tg3_flag_set(tp, WOL_CAP);
12794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12795 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12796 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12797 tg3_flag_set(tp, IS_NIC);
12799 val = tr32(VCPU_CFGSHDW);
12800 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12801 tg3_flag_set(tp, ASPM_WORKAROUND);
12802 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12803 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12804 tg3_flag_set(tp, WOL_ENABLE);
12805 device_set_wakeup_enable(&tp->pdev->dev, true);
12807 goto done;
12810 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12811 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12812 u32 nic_cfg, led_cfg;
12813 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12814 int eeprom_phy_serdes = 0;
12816 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12817 tp->nic_sram_data_cfg = nic_cfg;
12819 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12820 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12821 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12822 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12823 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12824 (ver > 0) && (ver < 0x100))
12825 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12828 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12830 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12831 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12832 eeprom_phy_serdes = 1;
12834 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12835 if (nic_phy_id != 0) {
12836 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12837 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12839 eeprom_phy_id = (id1 >> 16) << 10;
12840 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12841 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12842 } else
12843 eeprom_phy_id = 0;
12845 tp->phy_id = eeprom_phy_id;
12846 if (eeprom_phy_serdes) {
12847 if (!tg3_flag(tp, 5705_PLUS))
12848 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12849 else
12850 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12853 if (tg3_flag(tp, 5750_PLUS))
12854 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12855 SHASTA_EXT_LED_MODE_MASK);
12856 else
12857 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12859 switch (led_cfg) {
12860 default:
12861 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12862 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12863 break;
12865 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12866 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12867 break;
12869 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12870 tp->led_ctrl = LED_CTRL_MODE_MAC;
12872 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12873 * read on some older 5700/5701 bootcode.
12875 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12876 ASIC_REV_5700 ||
12877 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12878 ASIC_REV_5701)
12879 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12881 break;
12883 case SHASTA_EXT_LED_SHARED:
12884 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12885 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12886 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12887 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12888 LED_CTRL_MODE_PHY_2);
12889 break;
12891 case SHASTA_EXT_LED_MAC:
12892 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12893 break;
12895 case SHASTA_EXT_LED_COMBO:
12896 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12897 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12898 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12899 LED_CTRL_MODE_PHY_2);
12900 break;
12904 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12906 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12907 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12909 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12910 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12912 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12913 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12914 if ((tp->pdev->subsystem_vendor ==
12915 PCI_VENDOR_ID_ARIMA) &&
12916 (tp->pdev->subsystem_device == 0x205a ||
12917 tp->pdev->subsystem_device == 0x2063))
12918 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12919 } else {
12920 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12921 tg3_flag_set(tp, IS_NIC);
12924 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12925 tg3_flag_set(tp, ENABLE_ASF);
12926 if (tg3_flag(tp, 5750_PLUS))
12927 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12930 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12931 tg3_flag(tp, 5750_PLUS))
12932 tg3_flag_set(tp, ENABLE_APE);
12934 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12935 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12936 tg3_flag_clear(tp, WOL_CAP);
12938 if (tg3_flag(tp, WOL_CAP) &&
12939 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12940 tg3_flag_set(tp, WOL_ENABLE);
12941 device_set_wakeup_enable(&tp->pdev->dev, true);
12944 if (cfg2 & (1 << 17))
12945 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12947 /* serdes signal pre-emphasis in register 0x590 set by */
12948 /* bootcode if bit 18 is set */
12949 if (cfg2 & (1 << 18))
12950 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12952 if ((tg3_flag(tp, 57765_PLUS) ||
12953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12954 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12955 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12956 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12958 if (tg3_flag(tp, PCI_EXPRESS) &&
12959 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12960 !tg3_flag(tp, 57765_PLUS)) {
12961 u32 cfg3;
12963 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12964 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12965 tg3_flag_set(tp, ASPM_WORKAROUND);
12968 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12969 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12970 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12971 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12972 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12973 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12975 done:
12976 if (tg3_flag(tp, WOL_CAP))
12977 device_set_wakeup_enable(&tp->pdev->dev,
12978 tg3_flag(tp, WOL_ENABLE));
12979 else
12980 device_set_wakeup_capable(&tp->pdev->dev, false);
12983 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12985 int i;
12986 u32 val;
12988 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12989 tw32(OTP_CTRL, cmd);
12991 /* Wait for up to 1 ms for command to execute. */
12992 for (i = 0; i < 100; i++) {
12993 val = tr32(OTP_STATUS);
12994 if (val & OTP_STATUS_CMD_DONE)
12995 break;
12996 udelay(10);
12999 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13002 /* Read the gphy configuration from the OTP region of the chip. The gphy
13003 * configuration is a 32-bit value that straddles the alignment boundary.
13004 * We do two 32-bit reads and then shift and merge the results.
13006 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13008 u32 bhalf_otp, thalf_otp;
13010 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13012 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13013 return 0;
13015 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13017 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13018 return 0;
13020 thalf_otp = tr32(OTP_READ_DATA);
13022 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13024 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13025 return 0;
13027 bhalf_otp = tr32(OTP_READ_DATA);
13029 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13032 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13034 u32 adv = ADVERTISED_Autoneg |
13035 ADVERTISED_Pause;
13037 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13038 adv |= ADVERTISED_1000baseT_Half |
13039 ADVERTISED_1000baseT_Full;
13041 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13042 adv |= ADVERTISED_100baseT_Half |
13043 ADVERTISED_100baseT_Full |
13044 ADVERTISED_10baseT_Half |
13045 ADVERTISED_10baseT_Full |
13046 ADVERTISED_TP;
13047 else
13048 adv |= ADVERTISED_FIBRE;
13050 tp->link_config.advertising = adv;
13051 tp->link_config.speed = SPEED_INVALID;
13052 tp->link_config.duplex = DUPLEX_INVALID;
13053 tp->link_config.autoneg = AUTONEG_ENABLE;
13054 tp->link_config.active_speed = SPEED_INVALID;
13055 tp->link_config.active_duplex = DUPLEX_INVALID;
13056 tp->link_config.orig_speed = SPEED_INVALID;
13057 tp->link_config.orig_duplex = DUPLEX_INVALID;
13058 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13061 static int __devinit tg3_phy_probe(struct tg3 *tp)
13063 u32 hw_phy_id_1, hw_phy_id_2;
13064 u32 hw_phy_id, hw_phy_id_masked;
13065 int err;
13067 /* flow control autonegotiation is default behavior */
13068 tg3_flag_set(tp, PAUSE_AUTONEG);
13069 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13071 if (tg3_flag(tp, USE_PHYLIB))
13072 return tg3_phy_init(tp);
13074 /* Reading the PHY ID register can conflict with ASF
13075 * firmware access to the PHY hardware.
13077 err = 0;
13078 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13079 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13080 } else {
13081 /* Now read the physical PHY_ID from the chip and verify
13082 * that it is sane. If it doesn't look good, we fall back
13083 * to either the hard-coded table based PHY_ID and failing
13084 * that the value found in the eeprom area.
13086 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13087 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13089 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13090 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13091 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13093 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13096 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13097 tp->phy_id = hw_phy_id;
13098 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13099 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13100 else
13101 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13102 } else {
13103 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13104 /* Do nothing, phy ID already set up in
13105 * tg3_get_eeprom_hw_cfg().
13107 } else {
13108 struct subsys_tbl_ent *p;
13110 /* No eeprom signature? Try the hardcoded
13111 * subsys device table.
13113 p = tg3_lookup_by_subsys(tp);
13114 if (!p)
13115 return -ENODEV;
13117 tp->phy_id = p->phy_id;
13118 if (!tp->phy_id ||
13119 tp->phy_id == TG3_PHY_ID_BCM8002)
13120 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13124 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13125 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13126 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13127 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13128 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13129 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13131 tg3_phy_init_link_config(tp);
13133 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13134 !tg3_flag(tp, ENABLE_APE) &&
13135 !tg3_flag(tp, ENABLE_ASF)) {
13136 u32 bmsr, adv_reg, tg3_ctrl, mask;
13138 tg3_readphy(tp, MII_BMSR, &bmsr);
13139 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13140 (bmsr & BMSR_LSTATUS))
13141 goto skip_phy_reset;
13143 err = tg3_phy_reset(tp);
13144 if (err)
13145 return err;
13147 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
13148 ADVERTISE_100HALF | ADVERTISE_100FULL |
13149 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
13150 tg3_ctrl = 0;
13151 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
13152 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
13153 MII_TG3_CTRL_ADV_1000_FULL);
13154 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13155 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
13156 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
13157 MII_TG3_CTRL_ENABLE_AS_MASTER);
13160 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13161 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13162 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13163 if (!tg3_copper_is_advertising_all(tp, mask)) {
13164 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13166 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13167 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13169 tg3_writephy(tp, MII_BMCR,
13170 BMCR_ANENABLE | BMCR_ANRESTART);
13172 tg3_phy_set_wirespeed(tp);
13174 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13175 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13176 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13179 skip_phy_reset:
13180 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13181 err = tg3_init_5401phy_dsp(tp);
13182 if (err)
13183 return err;
13185 err = tg3_init_5401phy_dsp(tp);
13188 return err;
13191 static void __devinit tg3_read_vpd(struct tg3 *tp)
13193 u8 *vpd_data;
13194 unsigned int block_end, rosize, len;
13195 int j, i = 0;
13197 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13198 if (!vpd_data)
13199 goto out_no_vpd;
13201 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13202 PCI_VPD_LRDT_RO_DATA);
13203 if (i < 0)
13204 goto out_not_found;
13206 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13207 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13208 i += PCI_VPD_LRDT_TAG_SIZE;
13210 if (block_end > TG3_NVM_VPD_LEN)
13211 goto out_not_found;
13213 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13214 PCI_VPD_RO_KEYWORD_MFR_ID);
13215 if (j > 0) {
13216 len = pci_vpd_info_field_size(&vpd_data[j]);
13218 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13219 if (j + len > block_end || len != 4 ||
13220 memcmp(&vpd_data[j], "1028", 4))
13221 goto partno;
13223 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13224 PCI_VPD_RO_KEYWORD_VENDOR0);
13225 if (j < 0)
13226 goto partno;
13228 len = pci_vpd_info_field_size(&vpd_data[j]);
13230 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13231 if (j + len > block_end)
13232 goto partno;
13234 memcpy(tp->fw_ver, &vpd_data[j], len);
13235 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13238 partno:
13239 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13240 PCI_VPD_RO_KEYWORD_PARTNO);
13241 if (i < 0)
13242 goto out_not_found;
13244 len = pci_vpd_info_field_size(&vpd_data[i]);
13246 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13247 if (len > TG3_BPN_SIZE ||
13248 (len + i) > TG3_NVM_VPD_LEN)
13249 goto out_not_found;
13251 memcpy(tp->board_part_number, &vpd_data[i], len);
13253 out_not_found:
13254 kfree(vpd_data);
13255 if (tp->board_part_number[0])
13256 return;
13258 out_no_vpd:
13259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13260 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13261 strcpy(tp->board_part_number, "BCM5717");
13262 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13263 strcpy(tp->board_part_number, "BCM5718");
13264 else
13265 goto nomatch;
13266 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13267 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13268 strcpy(tp->board_part_number, "BCM57780");
13269 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13270 strcpy(tp->board_part_number, "BCM57760");
13271 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13272 strcpy(tp->board_part_number, "BCM57790");
13273 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13274 strcpy(tp->board_part_number, "BCM57788");
13275 else
13276 goto nomatch;
13277 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13278 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13279 strcpy(tp->board_part_number, "BCM57761");
13280 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13281 strcpy(tp->board_part_number, "BCM57765");
13282 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13283 strcpy(tp->board_part_number, "BCM57781");
13284 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13285 strcpy(tp->board_part_number, "BCM57785");
13286 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13287 strcpy(tp->board_part_number, "BCM57791");
13288 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13289 strcpy(tp->board_part_number, "BCM57795");
13290 else
13291 goto nomatch;
13292 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13293 strcpy(tp->board_part_number, "BCM95906");
13294 } else {
13295 nomatch:
13296 strcpy(tp->board_part_number, "none");
13300 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13302 u32 val;
13304 if (tg3_nvram_read(tp, offset, &val) ||
13305 (val & 0xfc000000) != 0x0c000000 ||
13306 tg3_nvram_read(tp, offset + 4, &val) ||
13307 val != 0)
13308 return 0;
13310 return 1;
13313 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13315 u32 val, offset, start, ver_offset;
13316 int i, dst_off;
13317 bool newver = false;
13319 if (tg3_nvram_read(tp, 0xc, &offset) ||
13320 tg3_nvram_read(tp, 0x4, &start))
13321 return;
13323 offset = tg3_nvram_logical_addr(tp, offset);
13325 if (tg3_nvram_read(tp, offset, &val))
13326 return;
13328 if ((val & 0xfc000000) == 0x0c000000) {
13329 if (tg3_nvram_read(tp, offset + 4, &val))
13330 return;
13332 if (val == 0)
13333 newver = true;
13336 dst_off = strlen(tp->fw_ver);
13338 if (newver) {
13339 if (TG3_VER_SIZE - dst_off < 16 ||
13340 tg3_nvram_read(tp, offset + 8, &ver_offset))
13341 return;
13343 offset = offset + ver_offset - start;
13344 for (i = 0; i < 16; i += 4) {
13345 __be32 v;
13346 if (tg3_nvram_read_be32(tp, offset + i, &v))
13347 return;
13349 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13351 } else {
13352 u32 major, minor;
13354 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13355 return;
13357 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13358 TG3_NVM_BCVER_MAJSFT;
13359 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13360 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13361 "v%d.%02d", major, minor);
13365 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13367 u32 val, major, minor;
13369 /* Use native endian representation */
13370 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13371 return;
13373 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13374 TG3_NVM_HWSB_CFG1_MAJSFT;
13375 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13376 TG3_NVM_HWSB_CFG1_MINSFT;
13378 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13381 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13383 u32 offset, major, minor, build;
13385 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13387 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13388 return;
13390 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13391 case TG3_EEPROM_SB_REVISION_0:
13392 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13393 break;
13394 case TG3_EEPROM_SB_REVISION_2:
13395 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13396 break;
13397 case TG3_EEPROM_SB_REVISION_3:
13398 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13399 break;
13400 case TG3_EEPROM_SB_REVISION_4:
13401 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13402 break;
13403 case TG3_EEPROM_SB_REVISION_5:
13404 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13405 break;
13406 case TG3_EEPROM_SB_REVISION_6:
13407 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13408 break;
13409 default:
13410 return;
13413 if (tg3_nvram_read(tp, offset, &val))
13414 return;
13416 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13417 TG3_EEPROM_SB_EDH_BLD_SHFT;
13418 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13419 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13420 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13422 if (minor > 99 || build > 26)
13423 return;
13425 offset = strlen(tp->fw_ver);
13426 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13427 " v%d.%02d", major, minor);
13429 if (build > 0) {
13430 offset = strlen(tp->fw_ver);
13431 if (offset < TG3_VER_SIZE - 1)
13432 tp->fw_ver[offset] = 'a' + build - 1;
13436 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13438 u32 val, offset, start;
13439 int i, vlen;
13441 for (offset = TG3_NVM_DIR_START;
13442 offset < TG3_NVM_DIR_END;
13443 offset += TG3_NVM_DIRENT_SIZE) {
13444 if (tg3_nvram_read(tp, offset, &val))
13445 return;
13447 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13448 break;
13451 if (offset == TG3_NVM_DIR_END)
13452 return;
13454 if (!tg3_flag(tp, 5705_PLUS))
13455 start = 0x08000000;
13456 else if (tg3_nvram_read(tp, offset - 4, &start))
13457 return;
13459 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13460 !tg3_fw_img_is_valid(tp, offset) ||
13461 tg3_nvram_read(tp, offset + 8, &val))
13462 return;
13464 offset += val - start;
13466 vlen = strlen(tp->fw_ver);
13468 tp->fw_ver[vlen++] = ',';
13469 tp->fw_ver[vlen++] = ' ';
13471 for (i = 0; i < 4; i++) {
13472 __be32 v;
13473 if (tg3_nvram_read_be32(tp, offset, &v))
13474 return;
13476 offset += sizeof(v);
13478 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13479 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13480 break;
13483 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13484 vlen += sizeof(v);
13488 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13490 int vlen;
13491 u32 apedata;
13492 char *fwtype;
13494 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13495 return;
13497 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13498 if (apedata != APE_SEG_SIG_MAGIC)
13499 return;
13501 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13502 if (!(apedata & APE_FW_STATUS_READY))
13503 return;
13505 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13507 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13508 tg3_flag_set(tp, APE_HAS_NCSI);
13509 fwtype = "NCSI";
13510 } else {
13511 fwtype = "DASH";
13514 vlen = strlen(tp->fw_ver);
13516 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13517 fwtype,
13518 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13519 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13520 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13521 (apedata & APE_FW_VERSION_BLDMSK));
13524 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13526 u32 val;
13527 bool vpd_vers = false;
13529 if (tp->fw_ver[0] != 0)
13530 vpd_vers = true;
13532 if (tg3_flag(tp, NO_NVRAM)) {
13533 strcat(tp->fw_ver, "sb");
13534 return;
13537 if (tg3_nvram_read(tp, 0, &val))
13538 return;
13540 if (val == TG3_EEPROM_MAGIC)
13541 tg3_read_bc_ver(tp);
13542 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13543 tg3_read_sb_ver(tp, val);
13544 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13545 tg3_read_hwsb_ver(tp);
13546 else
13547 return;
13549 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13550 goto done;
13552 tg3_read_mgmtfw_ver(tp);
13554 done:
13555 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13558 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13560 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13562 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13563 return TG3_RX_RET_MAX_SIZE_5717;
13564 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13565 return TG3_RX_RET_MAX_SIZE_5700;
13566 else
13567 return TG3_RX_RET_MAX_SIZE_5705;
13570 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13571 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13572 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13573 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13574 { },
13577 static int __devinit tg3_get_invariants(struct tg3 *tp)
13579 u32 misc_ctrl_reg;
13580 u32 pci_state_reg, grc_misc_cfg;
13581 u32 val;
13582 u16 pci_cmd;
13583 int err;
13585 /* Force memory write invalidate off. If we leave it on,
13586 * then on 5700_BX chips we have to enable a workaround.
13587 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13588 * to match the cacheline size. The Broadcom driver have this
13589 * workaround but turns MWI off all the times so never uses
13590 * it. This seems to suggest that the workaround is insufficient.
13592 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13593 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13596 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13597 * has the register indirect write enable bit set before
13598 * we try to access any of the MMIO registers. It is also
13599 * critical that the PCI-X hw workaround situation is decided
13600 * before that as well.
13602 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13603 &misc_ctrl_reg);
13605 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13606 MISC_HOST_CTRL_CHIPREV_SHIFT);
13607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13608 u32 prod_id_asic_rev;
13610 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13612 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13613 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13614 pci_read_config_dword(tp->pdev,
13615 TG3PCI_GEN2_PRODID_ASICREV,
13616 &prod_id_asic_rev);
13617 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13618 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13619 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13620 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623 pci_read_config_dword(tp->pdev,
13624 TG3PCI_GEN15_PRODID_ASICREV,
13625 &prod_id_asic_rev);
13626 else
13627 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13628 &prod_id_asic_rev);
13630 tp->pci_chip_rev_id = prod_id_asic_rev;
13633 /* Wrong chip ID in 5752 A0. This code can be removed later
13634 * as A0 is not in production.
13636 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13637 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13639 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13640 * we need to disable memory and use config. cycles
13641 * only to access all registers. The 5702/03 chips
13642 * can mistakenly decode the special cycles from the
13643 * ICH chipsets as memory write cycles, causing corruption
13644 * of register and memory space. Only certain ICH bridges
13645 * will drive special cycles with non-zero data during the
13646 * address phase which can fall within the 5703's address
13647 * range. This is not an ICH bug as the PCI spec allows
13648 * non-zero address during special cycles. However, only
13649 * these ICH bridges are known to drive non-zero addresses
13650 * during special cycles.
13652 * Since special cycles do not cross PCI bridges, we only
13653 * enable this workaround if the 5703 is on the secondary
13654 * bus of these ICH bridges.
13656 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13657 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13658 static struct tg3_dev_id {
13659 u32 vendor;
13660 u32 device;
13661 u32 rev;
13662 } ich_chipsets[] = {
13663 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13664 PCI_ANY_ID },
13665 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13666 PCI_ANY_ID },
13667 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13668 0xa },
13669 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13670 PCI_ANY_ID },
13671 { },
13673 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13674 struct pci_dev *bridge = NULL;
13676 while (pci_id->vendor != 0) {
13677 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13678 bridge);
13679 if (!bridge) {
13680 pci_id++;
13681 continue;
13683 if (pci_id->rev != PCI_ANY_ID) {
13684 if (bridge->revision > pci_id->rev)
13685 continue;
13687 if (bridge->subordinate &&
13688 (bridge->subordinate->number ==
13689 tp->pdev->bus->number)) {
13690 tg3_flag_set(tp, ICH_WORKAROUND);
13691 pci_dev_put(bridge);
13692 break;
13697 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13698 static struct tg3_dev_id {
13699 u32 vendor;
13700 u32 device;
13701 } bridge_chipsets[] = {
13702 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13703 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13704 { },
13706 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13707 struct pci_dev *bridge = NULL;
13709 while (pci_id->vendor != 0) {
13710 bridge = pci_get_device(pci_id->vendor,
13711 pci_id->device,
13712 bridge);
13713 if (!bridge) {
13714 pci_id++;
13715 continue;
13717 if (bridge->subordinate &&
13718 (bridge->subordinate->number <=
13719 tp->pdev->bus->number) &&
13720 (bridge->subordinate->subordinate >=
13721 tp->pdev->bus->number)) {
13722 tg3_flag_set(tp, 5701_DMA_BUG);
13723 pci_dev_put(bridge);
13724 break;
13729 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13730 * DMA addresses > 40-bit. This bridge may have other additional
13731 * 57xx devices behind it in some 4-port NIC designs for example.
13732 * Any tg3 device found behind the bridge will also need the 40-bit
13733 * DMA workaround.
13735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13736 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13737 tg3_flag_set(tp, 5780_CLASS);
13738 tg3_flag_set(tp, 40BIT_DMA_BUG);
13739 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13740 } else {
13741 struct pci_dev *bridge = NULL;
13743 do {
13744 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13745 PCI_DEVICE_ID_SERVERWORKS_EPB,
13746 bridge);
13747 if (bridge && bridge->subordinate &&
13748 (bridge->subordinate->number <=
13749 tp->pdev->bus->number) &&
13750 (bridge->subordinate->subordinate >=
13751 tp->pdev->bus->number)) {
13752 tg3_flag_set(tp, 40BIT_DMA_BUG);
13753 pci_dev_put(bridge);
13754 break;
13756 } while (bridge);
13759 /* Initialize misc host control in PCI block. */
13760 tp->misc_host_ctrl |= (misc_ctrl_reg &
13761 MISC_HOST_CTRL_CHIPREV);
13762 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13763 tp->misc_host_ctrl);
13765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13768 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13769 tp->pdev_peer = tg3_find_peer(tp);
13771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13773 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13774 tg3_flag_set(tp, 5717_PLUS);
13776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13777 tg3_flag(tp, 5717_PLUS))
13778 tg3_flag_set(tp, 57765_PLUS);
13780 /* Intentionally exclude ASIC_REV_5906 */
13781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13787 tg3_flag(tp, 57765_PLUS))
13788 tg3_flag_set(tp, 5755_PLUS);
13790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13793 tg3_flag(tp, 5755_PLUS) ||
13794 tg3_flag(tp, 5780_CLASS))
13795 tg3_flag_set(tp, 5750_PLUS);
13797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13798 tg3_flag(tp, 5750_PLUS))
13799 tg3_flag_set(tp, 5705_PLUS);
13801 /* 5700 B0 chips do not support checksumming correctly due
13802 * to hardware bugs.
13804 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13805 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13807 if (tg3_flag(tp, 5755_PLUS))
13808 features |= NETIF_F_IPV6_CSUM;
13809 tp->dev->features |= features;
13810 tp->dev->hw_features |= features;
13811 tp->dev->vlan_features |= features;
13814 /* Determine TSO capabilities */
13815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13816 ; /* Do nothing. HW bug. */
13817 else if (tg3_flag(tp, 57765_PLUS))
13818 tg3_flag_set(tp, HW_TSO_3);
13819 else if (tg3_flag(tp, 5755_PLUS) ||
13820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13821 tg3_flag_set(tp, HW_TSO_2);
13822 else if (tg3_flag(tp, 5750_PLUS)) {
13823 tg3_flag_set(tp, HW_TSO_1);
13824 tg3_flag_set(tp, TSO_BUG);
13825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13826 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13827 tg3_flag_clear(tp, TSO_BUG);
13828 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13829 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13830 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13831 tg3_flag_set(tp, TSO_BUG);
13832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13833 tp->fw_needed = FIRMWARE_TG3TSO5;
13834 else
13835 tp->fw_needed = FIRMWARE_TG3TSO;
13838 tp->irq_max = 1;
13840 if (tg3_flag(tp, 5750_PLUS)) {
13841 tg3_flag_set(tp, SUPPORT_MSI);
13842 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13843 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13845 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13846 tp->pdev_peer == tp->pdev))
13847 tg3_flag_clear(tp, SUPPORT_MSI);
13849 if (tg3_flag(tp, 5755_PLUS) ||
13850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13851 tg3_flag_set(tp, 1SHOT_MSI);
13854 if (tg3_flag(tp, 57765_PLUS)) {
13855 tg3_flag_set(tp, SUPPORT_MSIX);
13856 tp->irq_max = TG3_IRQ_MAX_VECS;
13860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13863 tg3_flag_set(tp, SHORT_DMA_BUG);
13864 else if (!tg3_flag(tp, 5755_PLUS)) {
13865 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13866 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13869 if (tg3_flag(tp, 5717_PLUS))
13870 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13872 if (tg3_flag(tp, 57765_PLUS) &&
13873 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13874 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13876 if (!tg3_flag(tp, 5705_PLUS) ||
13877 tg3_flag(tp, 5780_CLASS) ||
13878 tg3_flag(tp, USE_JUMBO_BDFLAG))
13879 tg3_flag_set(tp, JUMBO_CAPABLE);
13881 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13882 &pci_state_reg);
13884 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13885 if (tp->pcie_cap != 0) {
13886 u16 lnkctl;
13888 tg3_flag_set(tp, PCI_EXPRESS);
13890 tp->pcie_readrq = 4096;
13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13893 tp->pcie_readrq = 2048;
13895 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13897 pci_read_config_word(tp->pdev,
13898 tp->pcie_cap + PCI_EXP_LNKCTL,
13899 &lnkctl);
13900 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13902 tg3_flag_clear(tp, HW_TSO_2);
13903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13905 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13906 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13907 tg3_flag_set(tp, CLKREQ_BUG);
13908 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13909 tg3_flag_set(tp, L1PLLPD_EN);
13911 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13912 tg3_flag_set(tp, PCI_EXPRESS);
13913 } else if (!tg3_flag(tp, 5705_PLUS) ||
13914 tg3_flag(tp, 5780_CLASS)) {
13915 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13916 if (!tp->pcix_cap) {
13917 dev_err(&tp->pdev->dev,
13918 "Cannot find PCI-X capability, aborting\n");
13919 return -EIO;
13922 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13923 tg3_flag_set(tp, PCIX_MODE);
13926 /* If we have an AMD 762 or VIA K8T800 chipset, write
13927 * reordering to the mailbox registers done by the host
13928 * controller can cause major troubles. We read back from
13929 * every mailbox register write to force the writes to be
13930 * posted to the chip in order.
13932 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13933 !tg3_flag(tp, PCI_EXPRESS))
13934 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13936 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13937 &tp->pci_cacheline_sz);
13938 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13939 &tp->pci_lat_timer);
13940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13941 tp->pci_lat_timer < 64) {
13942 tp->pci_lat_timer = 64;
13943 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13944 tp->pci_lat_timer);
13947 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13948 /* 5700 BX chips need to have their TX producer index
13949 * mailboxes written twice to workaround a bug.
13951 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13953 /* If we are in PCI-X mode, enable register write workaround.
13955 * The workaround is to use indirect register accesses
13956 * for all chip writes not to mailbox registers.
13958 if (tg3_flag(tp, PCIX_MODE)) {
13959 u32 pm_reg;
13961 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13963 /* The chip can have it's power management PCI config
13964 * space registers clobbered due to this bug.
13965 * So explicitly force the chip into D0 here.
13967 pci_read_config_dword(tp->pdev,
13968 tp->pm_cap + PCI_PM_CTRL,
13969 &pm_reg);
13970 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13971 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13972 pci_write_config_dword(tp->pdev,
13973 tp->pm_cap + PCI_PM_CTRL,
13974 pm_reg);
13976 /* Also, force SERR#/PERR# in PCI command. */
13977 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13978 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13979 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13983 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13984 tg3_flag_set(tp, PCI_HIGH_SPEED);
13985 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13986 tg3_flag_set(tp, PCI_32BIT);
13988 /* Chip-specific fixup from Broadcom driver */
13989 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13990 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13991 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13992 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13995 /* Default fast path register access methods */
13996 tp->read32 = tg3_read32;
13997 tp->write32 = tg3_write32;
13998 tp->read32_mbox = tg3_read32;
13999 tp->write32_mbox = tg3_write32;
14000 tp->write32_tx_mbox = tg3_write32;
14001 tp->write32_rx_mbox = tg3_write32;
14003 /* Various workaround register access methods */
14004 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14005 tp->write32 = tg3_write_indirect_reg32;
14006 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14007 (tg3_flag(tp, PCI_EXPRESS) &&
14008 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14010 * Back to back register writes can cause problems on these
14011 * chips, the workaround is to read back all reg writes
14012 * except those to mailbox regs.
14014 * See tg3_write_indirect_reg32().
14016 tp->write32 = tg3_write_flush_reg32;
14019 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14020 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14021 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14022 tp->write32_rx_mbox = tg3_write_flush_reg32;
14025 if (tg3_flag(tp, ICH_WORKAROUND)) {
14026 tp->read32 = tg3_read_indirect_reg32;
14027 tp->write32 = tg3_write_indirect_reg32;
14028 tp->read32_mbox = tg3_read_indirect_mbox;
14029 tp->write32_mbox = tg3_write_indirect_mbox;
14030 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14031 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14033 iounmap(tp->regs);
14034 tp->regs = NULL;
14036 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14037 pci_cmd &= ~PCI_COMMAND_MEMORY;
14038 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14041 tp->read32_mbox = tg3_read32_mbox_5906;
14042 tp->write32_mbox = tg3_write32_mbox_5906;
14043 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14044 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14047 if (tp->write32 == tg3_write_indirect_reg32 ||
14048 (tg3_flag(tp, PCIX_MODE) &&
14049 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14051 tg3_flag_set(tp, SRAM_USE_CONFIG);
14053 /* Get eeprom hw config before calling tg3_set_power_state().
14054 * In particular, the TG3_FLAG_IS_NIC flag must be
14055 * determined before calling tg3_set_power_state() so that
14056 * we know whether or not to switch out of Vaux power.
14057 * When the flag is set, it means that GPIO1 is used for eeprom
14058 * write protect and also implies that it is a LOM where GPIOs
14059 * are not used to switch power.
14061 tg3_get_eeprom_hw_cfg(tp);
14063 if (tg3_flag(tp, ENABLE_APE)) {
14064 /* Allow reads and writes to the
14065 * APE register and memory space.
14067 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14068 PCISTATE_ALLOW_APE_SHMEM_WR |
14069 PCISTATE_ALLOW_APE_PSPACE_WR;
14070 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14071 pci_state_reg);
14074 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14078 tg3_flag(tp, 57765_PLUS))
14079 tg3_flag_set(tp, CPMU_PRESENT);
14081 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
14082 * GPIO1 driven high will bring 5700's external PHY out of reset.
14083 * It is also used as eeprom write protect on LOMs.
14085 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14086 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14087 tg3_flag(tp, EEPROM_WRITE_PROT))
14088 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14089 GRC_LCLCTRL_GPIO_OUTPUT1);
14090 /* Unused GPIO3 must be driven as output on 5752 because there
14091 * are no pull-up resistors on unused GPIO pins.
14093 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14094 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14099 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14101 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14103 /* Turn off the debug UART. */
14104 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14105 if (tg3_flag(tp, IS_NIC))
14106 /* Keep VMain power. */
14107 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14108 GRC_LCLCTRL_GPIO_OUTPUT0;
14111 /* Force the chip into D0. */
14112 err = tg3_power_up(tp);
14113 if (err) {
14114 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14115 return err;
14118 /* Derive initial jumbo mode from MTU assigned in
14119 * ether_setup() via the alloc_etherdev() call
14121 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14122 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14124 /* Determine WakeOnLan speed to use. */
14125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14126 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14127 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14129 tg3_flag_clear(tp, WOL_SPEED_100MB);
14130 } else {
14131 tg3_flag_set(tp, WOL_SPEED_100MB);
14134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14135 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14137 /* A few boards don't want Ethernet@WireSpeed phy feature */
14138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14139 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
14140 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14141 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14142 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14143 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14144 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14146 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14147 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14148 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14149 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14150 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14152 if (tg3_flag(tp, 5705_PLUS) &&
14153 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14154 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14155 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14156 !tg3_flag(tp, 57765_PLUS)) {
14157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14161 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14162 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14163 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14164 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14165 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14166 } else
14167 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14171 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14172 tp->phy_otp = tg3_read_otp_phycfg(tp);
14173 if (tp->phy_otp == 0)
14174 tp->phy_otp = TG3_OTP_DEFAULT;
14177 if (tg3_flag(tp, CPMU_PRESENT))
14178 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14179 else
14180 tp->mi_mode = MAC_MI_MODE_BASE;
14182 tp->coalesce_mode = 0;
14183 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14184 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14185 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14187 /* Set these bits to enable statistics workaround. */
14188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14189 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14190 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14191 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14192 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14197 tg3_flag_set(tp, USE_PHYLIB);
14199 err = tg3_mdio_init(tp);
14200 if (err)
14201 return err;
14203 /* Initialize data/descriptor byte/word swapping. */
14204 val = tr32(GRC_MODE);
14205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14206 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14207 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14208 GRC_MODE_B2HRX_ENABLE |
14209 GRC_MODE_HTX2B_ENABLE |
14210 GRC_MODE_HOST_STACKUP);
14211 else
14212 val &= GRC_MODE_HOST_STACKUP;
14214 tw32(GRC_MODE, val | tp->grc_mode);
14216 tg3_switch_clocks(tp);
14218 /* Clear this out for sanity. */
14219 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14221 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14222 &pci_state_reg);
14223 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14224 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14225 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14227 if (chiprevid == CHIPREV_ID_5701_A0 ||
14228 chiprevid == CHIPREV_ID_5701_B0 ||
14229 chiprevid == CHIPREV_ID_5701_B2 ||
14230 chiprevid == CHIPREV_ID_5701_B5) {
14231 void __iomem *sram_base;
14233 /* Write some dummy words into the SRAM status block
14234 * area, see if it reads back correctly. If the return
14235 * value is bad, force enable the PCIX workaround.
14237 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14239 writel(0x00000000, sram_base);
14240 writel(0x00000000, sram_base + 4);
14241 writel(0xffffffff, sram_base + 4);
14242 if (readl(sram_base) != 0x00000000)
14243 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14247 udelay(50);
14248 tg3_nvram_init(tp);
14250 grc_misc_cfg = tr32(GRC_MISC_CFG);
14251 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14254 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14255 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14256 tg3_flag_set(tp, IS_5788);
14258 if (!tg3_flag(tp, IS_5788) &&
14259 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14260 tg3_flag_set(tp, TAGGED_STATUS);
14261 if (tg3_flag(tp, TAGGED_STATUS)) {
14262 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14263 HOSTCC_MODE_CLRTICK_TXBD);
14265 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14266 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14267 tp->misc_host_ctrl);
14270 /* Preserve the APE MAC_MODE bits */
14271 if (tg3_flag(tp, ENABLE_APE))
14272 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14273 else
14274 tp->mac_mode = TG3_DEF_MAC_MODE;
14276 /* these are limited to 10/100 only */
14277 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14278 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14279 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14280 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14281 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14282 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14283 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14284 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14285 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14286 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14287 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14288 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14289 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14290 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14291 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14292 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14294 err = tg3_phy_probe(tp);
14295 if (err) {
14296 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14297 /* ... but do not return immediately ... */
14298 tg3_mdio_fini(tp);
14301 tg3_read_vpd(tp);
14302 tg3_read_fw_ver(tp);
14304 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14305 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14306 } else {
14307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14308 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14309 else
14310 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14313 /* 5700 {AX,BX} chips have a broken status block link
14314 * change bit implementation, so we must use the
14315 * status register in those cases.
14317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14318 tg3_flag_set(tp, USE_LINKCHG_REG);
14319 else
14320 tg3_flag_clear(tp, USE_LINKCHG_REG);
14322 /* The led_ctrl is set during tg3_phy_probe, here we might
14323 * have to force the link status polling mechanism based
14324 * upon subsystem IDs.
14326 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14328 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14329 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14330 tg3_flag_set(tp, USE_LINKCHG_REG);
14333 /* For all SERDES we poll the MAC status register. */
14334 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14335 tg3_flag_set(tp, POLL_SERDES);
14336 else
14337 tg3_flag_clear(tp, POLL_SERDES);
14339 tp->rx_offset = NET_IP_ALIGN;
14340 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14342 tg3_flag(tp, PCIX_MODE)) {
14343 tp->rx_offset = 0;
14344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14345 tp->rx_copy_thresh = ~(u16)0;
14346 #endif
14349 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14350 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14351 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14353 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14355 /* Increment the rx prod index on the rx std ring by at most
14356 * 8 for these chips to workaround hw errata.
14358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14361 tp->rx_std_max_post = 8;
14363 if (tg3_flag(tp, ASPM_WORKAROUND))
14364 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14365 PCIE_PWR_MGMT_L1_THRESH_MSK;
14367 return err;
14370 #ifdef CONFIG_SPARC
14371 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14373 struct net_device *dev = tp->dev;
14374 struct pci_dev *pdev = tp->pdev;
14375 struct device_node *dp = pci_device_to_OF_node(pdev);
14376 const unsigned char *addr;
14377 int len;
14379 addr = of_get_property(dp, "local-mac-address", &len);
14380 if (addr && len == 6) {
14381 memcpy(dev->dev_addr, addr, 6);
14382 memcpy(dev->perm_addr, dev->dev_addr, 6);
14383 return 0;
14385 return -ENODEV;
14388 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14390 struct net_device *dev = tp->dev;
14392 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14393 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14394 return 0;
14396 #endif
14398 static int __devinit tg3_get_device_address(struct tg3 *tp)
14400 struct net_device *dev = tp->dev;
14401 u32 hi, lo, mac_offset;
14402 int addr_ok = 0;
14404 #ifdef CONFIG_SPARC
14405 if (!tg3_get_macaddr_sparc(tp))
14406 return 0;
14407 #endif
14409 mac_offset = 0x7c;
14410 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14411 tg3_flag(tp, 5780_CLASS)) {
14412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14413 mac_offset = 0xcc;
14414 if (tg3_nvram_lock(tp))
14415 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14416 else
14417 tg3_nvram_unlock(tp);
14418 } else if (tg3_flag(tp, 5717_PLUS)) {
14419 if (PCI_FUNC(tp->pdev->devfn) & 1)
14420 mac_offset = 0xcc;
14421 if (PCI_FUNC(tp->pdev->devfn) > 1)
14422 mac_offset += 0x18c;
14423 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14424 mac_offset = 0x10;
14426 /* First try to get it from MAC address mailbox. */
14427 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14428 if ((hi >> 16) == 0x484b) {
14429 dev->dev_addr[0] = (hi >> 8) & 0xff;
14430 dev->dev_addr[1] = (hi >> 0) & 0xff;
14432 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14433 dev->dev_addr[2] = (lo >> 24) & 0xff;
14434 dev->dev_addr[3] = (lo >> 16) & 0xff;
14435 dev->dev_addr[4] = (lo >> 8) & 0xff;
14436 dev->dev_addr[5] = (lo >> 0) & 0xff;
14438 /* Some old bootcode may report a 0 MAC address in SRAM */
14439 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14441 if (!addr_ok) {
14442 /* Next, try NVRAM. */
14443 if (!tg3_flag(tp, NO_NVRAM) &&
14444 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14446 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14447 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14449 /* Finally just fetch it out of the MAC control regs. */
14450 else {
14451 hi = tr32(MAC_ADDR_0_HIGH);
14452 lo = tr32(MAC_ADDR_0_LOW);
14454 dev->dev_addr[5] = lo & 0xff;
14455 dev->dev_addr[4] = (lo >> 8) & 0xff;
14456 dev->dev_addr[3] = (lo >> 16) & 0xff;
14457 dev->dev_addr[2] = (lo >> 24) & 0xff;
14458 dev->dev_addr[1] = hi & 0xff;
14459 dev->dev_addr[0] = (hi >> 8) & 0xff;
14463 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14464 #ifdef CONFIG_SPARC
14465 if (!tg3_get_default_macaddr_sparc(tp))
14466 return 0;
14467 #endif
14468 return -EINVAL;
14470 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14471 return 0;
14474 #define BOUNDARY_SINGLE_CACHELINE 1
14475 #define BOUNDARY_MULTI_CACHELINE 2
14477 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14479 int cacheline_size;
14480 u8 byte;
14481 int goal;
14483 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14484 if (byte == 0)
14485 cacheline_size = 1024;
14486 else
14487 cacheline_size = (int) byte * 4;
14489 /* On 5703 and later chips, the boundary bits have no
14490 * effect.
14492 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14493 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14494 !tg3_flag(tp, PCI_EXPRESS))
14495 goto out;
14497 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14498 goal = BOUNDARY_MULTI_CACHELINE;
14499 #else
14500 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14501 goal = BOUNDARY_SINGLE_CACHELINE;
14502 #else
14503 goal = 0;
14504 #endif
14505 #endif
14507 if (tg3_flag(tp, 57765_PLUS)) {
14508 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14509 goto out;
14512 if (!goal)
14513 goto out;
14515 /* PCI controllers on most RISC systems tend to disconnect
14516 * when a device tries to burst across a cache-line boundary.
14517 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14519 * Unfortunately, for PCI-E there are only limited
14520 * write-side controls for this, and thus for reads
14521 * we will still get the disconnects. We'll also waste
14522 * these PCI cycles for both read and write for chips
14523 * other than 5700 and 5701 which do not implement the
14524 * boundary bits.
14526 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14527 switch (cacheline_size) {
14528 case 16:
14529 case 32:
14530 case 64:
14531 case 128:
14532 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14533 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14534 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14535 } else {
14536 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14537 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14539 break;
14541 case 256:
14542 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14543 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14544 break;
14546 default:
14547 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14548 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14549 break;
14551 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14552 switch (cacheline_size) {
14553 case 16:
14554 case 32:
14555 case 64:
14556 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14557 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14558 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14559 break;
14561 /* fallthrough */
14562 case 128:
14563 default:
14564 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14565 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14566 break;
14568 } else {
14569 switch (cacheline_size) {
14570 case 16:
14571 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14572 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14573 DMA_RWCTRL_WRITE_BNDRY_16);
14574 break;
14576 /* fallthrough */
14577 case 32:
14578 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14579 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14580 DMA_RWCTRL_WRITE_BNDRY_32);
14581 break;
14583 /* fallthrough */
14584 case 64:
14585 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14586 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14587 DMA_RWCTRL_WRITE_BNDRY_64);
14588 break;
14590 /* fallthrough */
14591 case 128:
14592 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14593 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14594 DMA_RWCTRL_WRITE_BNDRY_128);
14595 break;
14597 /* fallthrough */
14598 case 256:
14599 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14600 DMA_RWCTRL_WRITE_BNDRY_256);
14601 break;
14602 case 512:
14603 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14604 DMA_RWCTRL_WRITE_BNDRY_512);
14605 break;
14606 case 1024:
14607 default:
14608 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14609 DMA_RWCTRL_WRITE_BNDRY_1024);
14610 break;
14614 out:
14615 return val;
14618 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14620 struct tg3_internal_buffer_desc test_desc;
14621 u32 sram_dma_descs;
14622 int i, ret;
14624 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14626 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14627 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14628 tw32(RDMAC_STATUS, 0);
14629 tw32(WDMAC_STATUS, 0);
14631 tw32(BUFMGR_MODE, 0);
14632 tw32(FTQ_RESET, 0);
14634 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14635 test_desc.addr_lo = buf_dma & 0xffffffff;
14636 test_desc.nic_mbuf = 0x00002100;
14637 test_desc.len = size;
14640 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14641 * the *second* time the tg3 driver was getting loaded after an
14642 * initial scan.
14644 * Broadcom tells me:
14645 * ...the DMA engine is connected to the GRC block and a DMA
14646 * reset may affect the GRC block in some unpredictable way...
14647 * The behavior of resets to individual blocks has not been tested.
14649 * Broadcom noted the GRC reset will also reset all sub-components.
14651 if (to_device) {
14652 test_desc.cqid_sqid = (13 << 8) | 2;
14654 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14655 udelay(40);
14656 } else {
14657 test_desc.cqid_sqid = (16 << 8) | 7;
14659 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14660 udelay(40);
14662 test_desc.flags = 0x00000005;
14664 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14665 u32 val;
14667 val = *(((u32 *)&test_desc) + i);
14668 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14669 sram_dma_descs + (i * sizeof(u32)));
14670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14672 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14674 if (to_device)
14675 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14676 else
14677 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14679 ret = -ENODEV;
14680 for (i = 0; i < 40; i++) {
14681 u32 val;
14683 if (to_device)
14684 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14685 else
14686 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14687 if ((val & 0xffff) == sram_dma_descs) {
14688 ret = 0;
14689 break;
14692 udelay(100);
14695 return ret;
14698 #define TEST_BUFFER_SIZE 0x2000
14700 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14701 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14702 { },
14705 static int __devinit tg3_test_dma(struct tg3 *tp)
14707 dma_addr_t buf_dma;
14708 u32 *buf, saved_dma_rwctrl;
14709 int ret = 0;
14711 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14712 &buf_dma, GFP_KERNEL);
14713 if (!buf) {
14714 ret = -ENOMEM;
14715 goto out_nofree;
14718 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14719 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14721 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14723 if (tg3_flag(tp, 57765_PLUS))
14724 goto out;
14726 if (tg3_flag(tp, PCI_EXPRESS)) {
14727 /* DMA read watermark not used on PCIE */
14728 tp->dma_rwctrl |= 0x00180000;
14729 } else if (!tg3_flag(tp, PCIX_MODE)) {
14730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14732 tp->dma_rwctrl |= 0x003f0000;
14733 else
14734 tp->dma_rwctrl |= 0x003f000f;
14735 } else {
14736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14738 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14739 u32 read_water = 0x7;
14741 /* If the 5704 is behind the EPB bridge, we can
14742 * do the less restrictive ONE_DMA workaround for
14743 * better performance.
14745 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14747 tp->dma_rwctrl |= 0x8000;
14748 else if (ccval == 0x6 || ccval == 0x7)
14749 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14752 read_water = 4;
14753 /* Set bit 23 to enable PCIX hw bug fix */
14754 tp->dma_rwctrl |=
14755 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14756 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14757 (1 << 23);
14758 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14759 /* 5780 always in PCIX mode */
14760 tp->dma_rwctrl |= 0x00144000;
14761 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14762 /* 5714 always in PCIX mode */
14763 tp->dma_rwctrl |= 0x00148000;
14764 } else {
14765 tp->dma_rwctrl |= 0x001b000f;
14769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14771 tp->dma_rwctrl &= 0xfffffff0;
14773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14774 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14775 /* Remove this if it causes problems for some boards. */
14776 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14778 /* On 5700/5701 chips, we need to set this bit.
14779 * Otherwise the chip will issue cacheline transactions
14780 * to streamable DMA memory with not all the byte
14781 * enables turned on. This is an error on several
14782 * RISC PCI controllers, in particular sparc64.
14784 * On 5703/5704 chips, this bit has been reassigned
14785 * a different meaning. In particular, it is used
14786 * on those chips to enable a PCI-X workaround.
14788 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14791 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14793 #if 0
14794 /* Unneeded, already done by tg3_get_invariants. */
14795 tg3_switch_clocks(tp);
14796 #endif
14798 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14799 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14800 goto out;
14802 /* It is best to perform DMA test with maximum write burst size
14803 * to expose the 5700/5701 write DMA bug.
14805 saved_dma_rwctrl = tp->dma_rwctrl;
14806 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14807 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14809 while (1) {
14810 u32 *p = buf, i;
14812 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14813 p[i] = i;
14815 /* Send the buffer to the chip. */
14816 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14817 if (ret) {
14818 dev_err(&tp->pdev->dev,
14819 "%s: Buffer write failed. err = %d\n",
14820 __func__, ret);
14821 break;
14824 #if 0
14825 /* validate data reached card RAM correctly. */
14826 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14827 u32 val;
14828 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14829 if (le32_to_cpu(val) != p[i]) {
14830 dev_err(&tp->pdev->dev,
14831 "%s: Buffer corrupted on device! "
14832 "(%d != %d)\n", __func__, val, i);
14833 /* ret = -ENODEV here? */
14835 p[i] = 0;
14837 #endif
14838 /* Now read it back. */
14839 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14840 if (ret) {
14841 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14842 "err = %d\n", __func__, ret);
14843 break;
14846 /* Verify it. */
14847 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14848 if (p[i] == i)
14849 continue;
14851 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14852 DMA_RWCTRL_WRITE_BNDRY_16) {
14853 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14854 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14855 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14856 break;
14857 } else {
14858 dev_err(&tp->pdev->dev,
14859 "%s: Buffer corrupted on read back! "
14860 "(%d != %d)\n", __func__, p[i], i);
14861 ret = -ENODEV;
14862 goto out;
14866 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14867 /* Success. */
14868 ret = 0;
14869 break;
14872 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14873 DMA_RWCTRL_WRITE_BNDRY_16) {
14874 /* DMA test passed without adjusting DMA boundary,
14875 * now look for chipsets that are known to expose the
14876 * DMA bug without failing the test.
14878 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14879 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14880 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14881 } else {
14882 /* Safe to use the calculated DMA boundary. */
14883 tp->dma_rwctrl = saved_dma_rwctrl;
14886 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14889 out:
14890 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14891 out_nofree:
14892 return ret;
14895 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14897 if (tg3_flag(tp, 57765_PLUS)) {
14898 tp->bufmgr_config.mbuf_read_dma_low_water =
14899 DEFAULT_MB_RDMA_LOW_WATER_5705;
14900 tp->bufmgr_config.mbuf_mac_rx_low_water =
14901 DEFAULT_MB_MACRX_LOW_WATER_57765;
14902 tp->bufmgr_config.mbuf_high_water =
14903 DEFAULT_MB_HIGH_WATER_57765;
14905 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14906 DEFAULT_MB_RDMA_LOW_WATER_5705;
14907 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14908 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14909 tp->bufmgr_config.mbuf_high_water_jumbo =
14910 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14911 } else if (tg3_flag(tp, 5705_PLUS)) {
14912 tp->bufmgr_config.mbuf_read_dma_low_water =
14913 DEFAULT_MB_RDMA_LOW_WATER_5705;
14914 tp->bufmgr_config.mbuf_mac_rx_low_water =
14915 DEFAULT_MB_MACRX_LOW_WATER_5705;
14916 tp->bufmgr_config.mbuf_high_water =
14917 DEFAULT_MB_HIGH_WATER_5705;
14918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14919 tp->bufmgr_config.mbuf_mac_rx_low_water =
14920 DEFAULT_MB_MACRX_LOW_WATER_5906;
14921 tp->bufmgr_config.mbuf_high_water =
14922 DEFAULT_MB_HIGH_WATER_5906;
14925 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14926 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14927 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14928 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14929 tp->bufmgr_config.mbuf_high_water_jumbo =
14930 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14931 } else {
14932 tp->bufmgr_config.mbuf_read_dma_low_water =
14933 DEFAULT_MB_RDMA_LOW_WATER;
14934 tp->bufmgr_config.mbuf_mac_rx_low_water =
14935 DEFAULT_MB_MACRX_LOW_WATER;
14936 tp->bufmgr_config.mbuf_high_water =
14937 DEFAULT_MB_HIGH_WATER;
14939 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14940 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14941 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14942 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14943 tp->bufmgr_config.mbuf_high_water_jumbo =
14944 DEFAULT_MB_HIGH_WATER_JUMBO;
14947 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14948 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14951 static char * __devinit tg3_phy_string(struct tg3 *tp)
14953 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14954 case TG3_PHY_ID_BCM5400: return "5400";
14955 case TG3_PHY_ID_BCM5401: return "5401";
14956 case TG3_PHY_ID_BCM5411: return "5411";
14957 case TG3_PHY_ID_BCM5701: return "5701";
14958 case TG3_PHY_ID_BCM5703: return "5703";
14959 case TG3_PHY_ID_BCM5704: return "5704";
14960 case TG3_PHY_ID_BCM5705: return "5705";
14961 case TG3_PHY_ID_BCM5750: return "5750";
14962 case TG3_PHY_ID_BCM5752: return "5752";
14963 case TG3_PHY_ID_BCM5714: return "5714";
14964 case TG3_PHY_ID_BCM5780: return "5780";
14965 case TG3_PHY_ID_BCM5755: return "5755";
14966 case TG3_PHY_ID_BCM5787: return "5787";
14967 case TG3_PHY_ID_BCM5784: return "5784";
14968 case TG3_PHY_ID_BCM5756: return "5722/5756";
14969 case TG3_PHY_ID_BCM5906: return "5906";
14970 case TG3_PHY_ID_BCM5761: return "5761";
14971 case TG3_PHY_ID_BCM5718C: return "5718C";
14972 case TG3_PHY_ID_BCM5718S: return "5718S";
14973 case TG3_PHY_ID_BCM57765: return "57765";
14974 case TG3_PHY_ID_BCM5719C: return "5719C";
14975 case TG3_PHY_ID_BCM5720C: return "5720C";
14976 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14977 case 0: return "serdes";
14978 default: return "unknown";
14982 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14984 if (tg3_flag(tp, PCI_EXPRESS)) {
14985 strcpy(str, "PCI Express");
14986 return str;
14987 } else if (tg3_flag(tp, PCIX_MODE)) {
14988 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14990 strcpy(str, "PCIX:");
14992 if ((clock_ctrl == 7) ||
14993 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14994 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14995 strcat(str, "133MHz");
14996 else if (clock_ctrl == 0)
14997 strcat(str, "33MHz");
14998 else if (clock_ctrl == 2)
14999 strcat(str, "50MHz");
15000 else if (clock_ctrl == 4)
15001 strcat(str, "66MHz");
15002 else if (clock_ctrl == 6)
15003 strcat(str, "100MHz");
15004 } else {
15005 strcpy(str, "PCI:");
15006 if (tg3_flag(tp, PCI_HIGH_SPEED))
15007 strcat(str, "66MHz");
15008 else
15009 strcat(str, "33MHz");
15011 if (tg3_flag(tp, PCI_32BIT))
15012 strcat(str, ":32-bit");
15013 else
15014 strcat(str, ":64-bit");
15015 return str;
15018 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15020 struct pci_dev *peer;
15021 unsigned int func, devnr = tp->pdev->devfn & ~7;
15023 for (func = 0; func < 8; func++) {
15024 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15025 if (peer && peer != tp->pdev)
15026 break;
15027 pci_dev_put(peer);
15029 /* 5704 can be configured in single-port mode, set peer to
15030 * tp->pdev in that case.
15032 if (!peer) {
15033 peer = tp->pdev;
15034 return peer;
15038 * We don't need to keep the refcount elevated; there's no way
15039 * to remove one half of this device without removing the other
15041 pci_dev_put(peer);
15043 return peer;
15046 static void __devinit tg3_init_coal(struct tg3 *tp)
15048 struct ethtool_coalesce *ec = &tp->coal;
15050 memset(ec, 0, sizeof(*ec));
15051 ec->cmd = ETHTOOL_GCOALESCE;
15052 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15053 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15054 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15055 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15056 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15057 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15058 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15059 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15060 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15062 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15063 HOSTCC_MODE_CLRTICK_TXBD)) {
15064 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15065 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15066 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15067 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15070 if (tg3_flag(tp, 5705_PLUS)) {
15071 ec->rx_coalesce_usecs_irq = 0;
15072 ec->tx_coalesce_usecs_irq = 0;
15073 ec->stats_block_coalesce_usecs = 0;
15077 static const struct net_device_ops tg3_netdev_ops = {
15078 .ndo_open = tg3_open,
15079 .ndo_stop = tg3_close,
15080 .ndo_start_xmit = tg3_start_xmit,
15081 .ndo_get_stats64 = tg3_get_stats64,
15082 .ndo_validate_addr = eth_validate_addr,
15083 .ndo_set_multicast_list = tg3_set_rx_mode,
15084 .ndo_set_mac_address = tg3_set_mac_addr,
15085 .ndo_do_ioctl = tg3_ioctl,
15086 .ndo_tx_timeout = tg3_tx_timeout,
15087 .ndo_change_mtu = tg3_change_mtu,
15088 .ndo_fix_features = tg3_fix_features,
15089 .ndo_set_features = tg3_set_features,
15090 #ifdef CONFIG_NET_POLL_CONTROLLER
15091 .ndo_poll_controller = tg3_poll_controller,
15092 #endif
15095 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15096 .ndo_open = tg3_open,
15097 .ndo_stop = tg3_close,
15098 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15099 .ndo_get_stats64 = tg3_get_stats64,
15100 .ndo_validate_addr = eth_validate_addr,
15101 .ndo_set_multicast_list = tg3_set_rx_mode,
15102 .ndo_set_mac_address = tg3_set_mac_addr,
15103 .ndo_do_ioctl = tg3_ioctl,
15104 .ndo_tx_timeout = tg3_tx_timeout,
15105 .ndo_change_mtu = tg3_change_mtu,
15106 .ndo_set_features = tg3_set_features,
15107 #ifdef CONFIG_NET_POLL_CONTROLLER
15108 .ndo_poll_controller = tg3_poll_controller,
15109 #endif
15112 static int __devinit tg3_init_one(struct pci_dev *pdev,
15113 const struct pci_device_id *ent)
15115 struct net_device *dev;
15116 struct tg3 *tp;
15117 int i, err, pm_cap;
15118 u32 sndmbx, rcvmbx, intmbx;
15119 char str[40];
15120 u64 dma_mask, persist_dma_mask;
15121 u32 hw_features = 0;
15123 printk_once(KERN_INFO "%s\n", version);
15125 err = pci_enable_device(pdev);
15126 if (err) {
15127 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15128 return err;
15131 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15132 if (err) {
15133 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15134 goto err_out_disable_pdev;
15137 pci_set_master(pdev);
15139 /* Find power-management capability. */
15140 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15141 if (pm_cap == 0) {
15142 dev_err(&pdev->dev,
15143 "Cannot find Power Management capability, aborting\n");
15144 err = -EIO;
15145 goto err_out_free_res;
15148 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15149 if (!dev) {
15150 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15151 err = -ENOMEM;
15152 goto err_out_free_res;
15155 SET_NETDEV_DEV(dev, &pdev->dev);
15157 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15159 tp = netdev_priv(dev);
15160 tp->pdev = pdev;
15161 tp->dev = dev;
15162 tp->pm_cap = pm_cap;
15163 tp->rx_mode = TG3_DEF_RX_MODE;
15164 tp->tx_mode = TG3_DEF_TX_MODE;
15166 if (tg3_debug > 0)
15167 tp->msg_enable = tg3_debug;
15168 else
15169 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15171 /* The word/byte swap controls here control register access byte
15172 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15173 * setting below.
15175 tp->misc_host_ctrl =
15176 MISC_HOST_CTRL_MASK_PCI_INT |
15177 MISC_HOST_CTRL_WORD_SWAP |
15178 MISC_HOST_CTRL_INDIR_ACCESS |
15179 MISC_HOST_CTRL_PCISTATE_RW;
15181 /* The NONFRM (non-frame) byte/word swap controls take effect
15182 * on descriptor entries, anything which isn't packet data.
15184 * The StrongARM chips on the board (one for tx, one for rx)
15185 * are running in big-endian mode.
15187 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15188 GRC_MODE_WSWAP_NONFRM_DATA);
15189 #ifdef __BIG_ENDIAN
15190 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15191 #endif
15192 spin_lock_init(&tp->lock);
15193 spin_lock_init(&tp->indirect_lock);
15194 INIT_WORK(&tp->reset_task, tg3_reset_task);
15196 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15197 if (!tp->regs) {
15198 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15199 err = -ENOMEM;
15200 goto err_out_free_dev;
15203 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15204 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15206 dev->ethtool_ops = &tg3_ethtool_ops;
15207 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15208 dev->irq = pdev->irq;
15210 err = tg3_get_invariants(tp);
15211 if (err) {
15212 dev_err(&pdev->dev,
15213 "Problem fetching invariants of chip, aborting\n");
15214 goto err_out_iounmap;
15217 if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
15218 dev->netdev_ops = &tg3_netdev_ops;
15219 else
15220 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15223 /* The EPB bridge inside 5714, 5715, and 5780 and any
15224 * device behind the EPB cannot support DMA addresses > 40-bit.
15225 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15226 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15227 * do DMA address check in tg3_start_xmit().
15229 if (tg3_flag(tp, IS_5788))
15230 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15231 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15232 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15233 #ifdef CONFIG_HIGHMEM
15234 dma_mask = DMA_BIT_MASK(64);
15235 #endif
15236 } else
15237 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15239 /* Configure DMA attributes. */
15240 if (dma_mask > DMA_BIT_MASK(32)) {
15241 err = pci_set_dma_mask(pdev, dma_mask);
15242 if (!err) {
15243 dev->features |= NETIF_F_HIGHDMA;
15244 err = pci_set_consistent_dma_mask(pdev,
15245 persist_dma_mask);
15246 if (err < 0) {
15247 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15248 "DMA for consistent allocations\n");
15249 goto err_out_iounmap;
15253 if (err || dma_mask == DMA_BIT_MASK(32)) {
15254 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15255 if (err) {
15256 dev_err(&pdev->dev,
15257 "No usable DMA configuration, aborting\n");
15258 goto err_out_iounmap;
15262 tg3_init_bufmgr_config(tp);
15264 /* Selectively allow TSO based on operating conditions */
15265 if ((tg3_flag(tp, HW_TSO_1) ||
15266 tg3_flag(tp, HW_TSO_2) ||
15267 tg3_flag(tp, HW_TSO_3)) ||
15268 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15269 tg3_flag_set(tp, TSO_CAPABLE);
15270 else {
15271 tg3_flag_clear(tp, TSO_CAPABLE);
15272 tg3_flag_clear(tp, TSO_BUG);
15273 tp->fw_needed = NULL;
15276 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15277 tp->fw_needed = FIRMWARE_TG3;
15279 /* TSO is on by default on chips that support hardware TSO.
15280 * Firmware TSO on older chips gives lower performance, so it
15281 * is off by default, but can be enabled using ethtool.
15283 if ((tg3_flag(tp, HW_TSO_1) ||
15284 tg3_flag(tp, HW_TSO_2) ||
15285 tg3_flag(tp, HW_TSO_3)) &&
15286 (dev->features & NETIF_F_IP_CSUM))
15287 hw_features |= NETIF_F_TSO;
15288 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15289 if (dev->features & NETIF_F_IPV6_CSUM)
15290 hw_features |= NETIF_F_TSO6;
15291 if (tg3_flag(tp, HW_TSO_3) ||
15292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15293 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15294 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15297 hw_features |= NETIF_F_TSO_ECN;
15300 dev->hw_features |= hw_features;
15301 dev->features |= hw_features;
15302 dev->vlan_features |= hw_features;
15305 * Add loopback capability only for a subset of devices that support
15306 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15307 * loopback for the remaining devices.
15309 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15310 !tg3_flag(tp, CPMU_PRESENT))
15311 /* Add the loopback capability */
15312 dev->hw_features |= NETIF_F_LOOPBACK;
15314 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15315 !tg3_flag(tp, TSO_CAPABLE) &&
15316 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15317 tg3_flag_set(tp, MAX_RXPEND_64);
15318 tp->rx_pending = 63;
15321 err = tg3_get_device_address(tp);
15322 if (err) {
15323 dev_err(&pdev->dev,
15324 "Could not obtain valid ethernet address, aborting\n");
15325 goto err_out_iounmap;
15328 if (tg3_flag(tp, ENABLE_APE)) {
15329 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15330 if (!tp->aperegs) {
15331 dev_err(&pdev->dev,
15332 "Cannot map APE registers, aborting\n");
15333 err = -ENOMEM;
15334 goto err_out_iounmap;
15337 tg3_ape_lock_init(tp);
15339 if (tg3_flag(tp, ENABLE_ASF))
15340 tg3_read_dash_ver(tp);
15344 * Reset chip in case UNDI or EFI driver did not shutdown
15345 * DMA self test will enable WDMAC and we'll see (spurious)
15346 * pending DMA on the PCI bus at that point.
15348 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15349 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15350 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15351 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15354 err = tg3_test_dma(tp);
15355 if (err) {
15356 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15357 goto err_out_apeunmap;
15360 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15361 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15362 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15363 for (i = 0; i < tp->irq_max; i++) {
15364 struct tg3_napi *tnapi = &tp->napi[i];
15366 tnapi->tp = tp;
15367 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15369 tnapi->int_mbox = intmbx;
15370 if (i < 4)
15371 intmbx += 0x8;
15372 else
15373 intmbx += 0x4;
15375 tnapi->consmbox = rcvmbx;
15376 tnapi->prodmbox = sndmbx;
15378 if (i)
15379 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15380 else
15381 tnapi->coal_now = HOSTCC_MODE_NOW;
15383 if (!tg3_flag(tp, SUPPORT_MSIX))
15384 break;
15387 * If we support MSIX, we'll be using RSS. If we're using
15388 * RSS, the first vector only handles link interrupts and the
15389 * remaining vectors handle rx and tx interrupts. Reuse the
15390 * mailbox values for the next iteration. The values we setup
15391 * above are still useful for the single vectored mode.
15393 if (!i)
15394 continue;
15396 rcvmbx += 0x8;
15398 if (sndmbx & 0x4)
15399 sndmbx -= 0x4;
15400 else
15401 sndmbx += 0xc;
15404 tg3_init_coal(tp);
15406 pci_set_drvdata(pdev, dev);
15408 err = register_netdev(dev);
15409 if (err) {
15410 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15411 goto err_out_apeunmap;
15414 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15415 tp->board_part_number,
15416 tp->pci_chip_rev_id,
15417 tg3_bus_string(tp, str),
15418 dev->dev_addr);
15420 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15421 struct phy_device *phydev;
15422 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15423 netdev_info(dev,
15424 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15425 phydev->drv->name, dev_name(&phydev->dev));
15426 } else {
15427 char *ethtype;
15429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15430 ethtype = "10/100Base-TX";
15431 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15432 ethtype = "1000Base-SX";
15433 else
15434 ethtype = "10/100/1000Base-T";
15436 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15437 "(WireSpeed[%d], EEE[%d])\n",
15438 tg3_phy_string(tp), ethtype,
15439 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15440 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15443 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15444 (dev->features & NETIF_F_RXCSUM) != 0,
15445 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15446 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15447 tg3_flag(tp, ENABLE_ASF) != 0,
15448 tg3_flag(tp, TSO_CAPABLE) != 0);
15449 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15450 tp->dma_rwctrl,
15451 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15452 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15454 pci_save_state(pdev);
15456 return 0;
15458 err_out_apeunmap:
15459 if (tp->aperegs) {
15460 iounmap(tp->aperegs);
15461 tp->aperegs = NULL;
15464 err_out_iounmap:
15465 if (tp->regs) {
15466 iounmap(tp->regs);
15467 tp->regs = NULL;
15470 err_out_free_dev:
15471 free_netdev(dev);
15473 err_out_free_res:
15474 pci_release_regions(pdev);
15476 err_out_disable_pdev:
15477 pci_disable_device(pdev);
15478 pci_set_drvdata(pdev, NULL);
15479 return err;
15482 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15484 struct net_device *dev = pci_get_drvdata(pdev);
15486 if (dev) {
15487 struct tg3 *tp = netdev_priv(dev);
15489 if (tp->fw)
15490 release_firmware(tp->fw);
15492 cancel_work_sync(&tp->reset_task);
15494 if (!tg3_flag(tp, USE_PHYLIB)) {
15495 tg3_phy_fini(tp);
15496 tg3_mdio_fini(tp);
15499 unregister_netdev(dev);
15500 if (tp->aperegs) {
15501 iounmap(tp->aperegs);
15502 tp->aperegs = NULL;
15504 if (tp->regs) {
15505 iounmap(tp->regs);
15506 tp->regs = NULL;
15508 free_netdev(dev);
15509 pci_release_regions(pdev);
15510 pci_disable_device(pdev);
15511 pci_set_drvdata(pdev, NULL);
15515 #ifdef CONFIG_PM_SLEEP
15516 static int tg3_suspend(struct device *device)
15518 struct pci_dev *pdev = to_pci_dev(device);
15519 struct net_device *dev = pci_get_drvdata(pdev);
15520 struct tg3 *tp = netdev_priv(dev);
15521 int err;
15523 if (!netif_running(dev))
15524 return 0;
15526 flush_work_sync(&tp->reset_task);
15527 tg3_phy_stop(tp);
15528 tg3_netif_stop(tp);
15530 del_timer_sync(&tp->timer);
15532 tg3_full_lock(tp, 1);
15533 tg3_disable_ints(tp);
15534 tg3_full_unlock(tp);
15536 netif_device_detach(dev);
15538 tg3_full_lock(tp, 0);
15539 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15540 tg3_flag_clear(tp, INIT_COMPLETE);
15541 tg3_full_unlock(tp);
15543 err = tg3_power_down_prepare(tp);
15544 if (err) {
15545 int err2;
15547 tg3_full_lock(tp, 0);
15549 tg3_flag_set(tp, INIT_COMPLETE);
15550 err2 = tg3_restart_hw(tp, 1);
15551 if (err2)
15552 goto out;
15554 tp->timer.expires = jiffies + tp->timer_offset;
15555 add_timer(&tp->timer);
15557 netif_device_attach(dev);
15558 tg3_netif_start(tp);
15560 out:
15561 tg3_full_unlock(tp);
15563 if (!err2)
15564 tg3_phy_start(tp);
15567 return err;
15570 static int tg3_resume(struct device *device)
15572 struct pci_dev *pdev = to_pci_dev(device);
15573 struct net_device *dev = pci_get_drvdata(pdev);
15574 struct tg3 *tp = netdev_priv(dev);
15575 int err;
15577 if (!netif_running(dev))
15578 return 0;
15580 netif_device_attach(dev);
15582 tg3_full_lock(tp, 0);
15584 tg3_flag_set(tp, INIT_COMPLETE);
15585 err = tg3_restart_hw(tp, 1);
15586 if (err)
15587 goto out;
15589 tp->timer.expires = jiffies + tp->timer_offset;
15590 add_timer(&tp->timer);
15592 tg3_netif_start(tp);
15594 out:
15595 tg3_full_unlock(tp);
15597 if (!err)
15598 tg3_phy_start(tp);
15600 return err;
15603 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15604 #define TG3_PM_OPS (&tg3_pm_ops)
15606 #else
15608 #define TG3_PM_OPS NULL
15610 #endif /* CONFIG_PM_SLEEP */
15613 * tg3_io_error_detected - called when PCI error is detected
15614 * @pdev: Pointer to PCI device
15615 * @state: The current pci connection state
15617 * This function is called after a PCI bus error affecting
15618 * this device has been detected.
15620 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15621 pci_channel_state_t state)
15623 struct net_device *netdev = pci_get_drvdata(pdev);
15624 struct tg3 *tp = netdev_priv(netdev);
15625 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15627 netdev_info(netdev, "PCI I/O error detected\n");
15629 rtnl_lock();
15631 if (!netif_running(netdev))
15632 goto done;
15634 tg3_phy_stop(tp);
15636 tg3_netif_stop(tp);
15638 del_timer_sync(&tp->timer);
15639 tg3_flag_clear(tp, RESTART_TIMER);
15641 /* Want to make sure that the reset task doesn't run */
15642 cancel_work_sync(&tp->reset_task);
15643 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15644 tg3_flag_clear(tp, RESTART_TIMER);
15646 netif_device_detach(netdev);
15648 /* Clean up software state, even if MMIO is blocked */
15649 tg3_full_lock(tp, 0);
15650 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15651 tg3_full_unlock(tp);
15653 done:
15654 if (state == pci_channel_io_perm_failure)
15655 err = PCI_ERS_RESULT_DISCONNECT;
15656 else
15657 pci_disable_device(pdev);
15659 rtnl_unlock();
15661 return err;
15665 * tg3_io_slot_reset - called after the pci bus has been reset.
15666 * @pdev: Pointer to PCI device
15668 * Restart the card from scratch, as if from a cold-boot.
15669 * At this point, the card has exprienced a hard reset,
15670 * followed by fixups by BIOS, and has its config space
15671 * set up identically to what it was at cold boot.
15673 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15675 struct net_device *netdev = pci_get_drvdata(pdev);
15676 struct tg3 *tp = netdev_priv(netdev);
15677 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15678 int err;
15680 rtnl_lock();
15682 if (pci_enable_device(pdev)) {
15683 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15684 goto done;
15687 pci_set_master(pdev);
15688 pci_restore_state(pdev);
15689 pci_save_state(pdev);
15691 if (!netif_running(netdev)) {
15692 rc = PCI_ERS_RESULT_RECOVERED;
15693 goto done;
15696 err = tg3_power_up(tp);
15697 if (err) {
15698 netdev_err(netdev, "Failed to restore register access.\n");
15699 goto done;
15702 rc = PCI_ERS_RESULT_RECOVERED;
15704 done:
15705 rtnl_unlock();
15707 return rc;
15711 * tg3_io_resume - called when traffic can start flowing again.
15712 * @pdev: Pointer to PCI device
15714 * This callback is called when the error recovery driver tells
15715 * us that its OK to resume normal operation.
15717 static void tg3_io_resume(struct pci_dev *pdev)
15719 struct net_device *netdev = pci_get_drvdata(pdev);
15720 struct tg3 *tp = netdev_priv(netdev);
15721 int err;
15723 rtnl_lock();
15725 if (!netif_running(netdev))
15726 goto done;
15728 tg3_full_lock(tp, 0);
15729 tg3_flag_set(tp, INIT_COMPLETE);
15730 err = tg3_restart_hw(tp, 1);
15731 tg3_full_unlock(tp);
15732 if (err) {
15733 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15734 goto done;
15737 netif_device_attach(netdev);
15739 tp->timer.expires = jiffies + tp->timer_offset;
15740 add_timer(&tp->timer);
15742 tg3_netif_start(tp);
15744 tg3_phy_start(tp);
15746 done:
15747 rtnl_unlock();
15750 static struct pci_error_handlers tg3_err_handler = {
15751 .error_detected = tg3_io_error_detected,
15752 .slot_reset = tg3_io_slot_reset,
15753 .resume = tg3_io_resume
15756 static struct pci_driver tg3_driver = {
15757 .name = DRV_MODULE_NAME,
15758 .id_table = tg3_pci_tbl,
15759 .probe = tg3_init_one,
15760 .remove = __devexit_p(tg3_remove_one),
15761 .err_handler = &tg3_err_handler,
15762 .driver.pm = TG3_PM_OPS,
15765 static int __init tg3_init(void)
15767 return pci_register_driver(&tg3_driver);
15770 static void __exit tg3_cleanup(void)
15772 pci_unregister_driver(&tg3_driver);
15775 module_init(tg3_init);
15776 module_exit(tg3_cleanup);