Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / broadcom / tg3.c
blobc1841db1b500fa49f823c79e56cb3bc05f3f9199
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
10 * Firmware is:
11 * Derived from proprietary unpublished source code,
12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
15 * Permission is hereby granted for the distribution of this firmware
16 * data in hexadecimal or equivalent format, provided this copyright
17 * notice is accompanying it.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
54 #include <net/checksum.h>
55 #include <net/ip.h>
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
69 #define BAR_0 0
70 #define BAR_2 2
72 #include "tg3.h"
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 set_bit(flag, bits);
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 #define TG3_MAJ_NUM 3
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
112 (NETIF_MSG_DRV | \
113 NETIF_MSG_PROBE | \
114 NETIF_MSG_LINK | \
115 NETIF_MSG_TIMER | \
116 NETIF_MSG_IFDOWN | \
117 NETIF_MSG_IFUP | \
118 NETIF_MSG_RX_ERR | \
119 NETIF_MSG_TX_ERR)
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #else
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #endif
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 #endif
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version[] =
226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 TG3_DRV_DATA_FLAG_5705_10_100},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 PCI_VENDOR_ID_LENOVO,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
364 static const struct {
365 const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367 { "rx_octets" },
368 { "rx_fragments" },
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
372 { "rx_fcs_errors" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
379 { "rx_jabbers" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
394 { "tx_octets" },
395 { "tx_collisions" },
397 { "tx_xon_sent" },
398 { "tx_xoff_sent" },
399 { "tx_flow_control" },
400 { "tx_mac_errors" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
403 { "tx_deferred" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
424 { "tx_discards" },
425 { "tx_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
429 { "rxbds_empty" },
430 { "rx_discards" },
431 { "rx_errors" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
440 { "nic_irqs" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 [TG3_NVRAM_TEST] = { "nvram test (online) " },
462 [TG3_LINK_TEST] = { "link test (online) " },
463 [TG3_REGISTER_TEST] = { "register test (offline)" },
464 [TG3_MEMORY_TEST] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
476 writel(val, tp->regs + off);
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
481 return readl(tp->regs + off);
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
486 writel(val, tp->aperegs + off);
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
491 return readl(tp->aperegs + off);
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
496 unsigned long flags;
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
506 writel(val, tp->regs + off);
507 readl(tp->regs + off);
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
512 unsigned long flags;
513 u32 val;
515 spin_lock_irqsave(&tp->indirect_lock, flags);
516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 return val;
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
524 unsigned long flags;
526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 TG3_64BIT_REG_LOW, val);
529 return;
531 if (off == TG3_RX_STD_PROD_IDX_REG) {
532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 TG3_64BIT_REG_LOW, val);
534 return;
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 spin_unlock_irqrestore(&tp->indirect_lock, flags);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546 (val == 0x1)) {
547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
554 unsigned long flags;
555 u32 val;
557 spin_lock_irqsave(&tp->indirect_lock, flags);
558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 return val;
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 /* Non-posted methods */
573 tp->write32(tp, off, val);
574 else {
575 /* Posted method */
576 tg3_write32(tp, off, val);
577 if (usec_wait)
578 udelay(usec_wait);
579 tp->read32(tp, off);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
584 if (usec_wait)
585 udelay(usec_wait);
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
590 tp->write32_mbox(tp, off, val);
591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 !tg3_flag(tp, ICH_WORKAROUND)))
594 tp->read32_mbox(tp, off);
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
599 void __iomem *mbox = tp->regs + off;
600 writel(val, mbox);
601 if (tg3_flag(tp, TXD_MBOX_HWBUG))
602 writel(val, mbox);
603 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 tg3_flag(tp, FLUSH_POSTED_WRITES))
605 readl(mbox);
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
610 return readl(tp->regs + off + GRCMBOX_BASE);
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
615 writel(val, tp->regs + off + GRCMBOX_BASE);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
631 unsigned long flags;
633 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635 return;
637 spin_lock_irqsave(&tp->indirect_lock, flags);
638 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 } else {
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 tw32_f(TG3PCI_MEM_WIN_DATA, val);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 spin_unlock_irqrestore(&tp->indirect_lock, flags);
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
656 unsigned long flags;
658 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 *val = 0;
661 return;
664 spin_lock_irqsave(&tp->indirect_lock, flags);
665 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 } else {
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 *val = tr32(TG3PCI_MEM_WIN_DATA);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
678 spin_unlock_irqrestore(&tp->indirect_lock, flags);
681 static void tg3_ape_lock_init(struct tg3 *tp)
683 int i;
684 u32 regbase, bit;
686 if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 regbase = TG3_APE_LOCK_GRANT;
688 else
689 regbase = TG3_APE_PER_LOCK_GRANT;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693 switch (i) {
694 case TG3_APE_LOCK_PHY0:
695 case TG3_APE_LOCK_PHY1:
696 case TG3_APE_LOCK_PHY2:
697 case TG3_APE_LOCK_PHY3:
698 bit = APE_LOCK_GRANT_DRIVER;
699 break;
700 default:
701 if (!tp->pci_fn)
702 bit = APE_LOCK_GRANT_DRIVER;
703 else
704 bit = 1 << tp->pci_fn;
706 tg3_ape_write32(tp, regbase + 4 * i, bit);
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
713 int i, off;
714 int ret = 0;
715 u32 status, req, gnt, bit;
717 if (!tg3_flag(tp, ENABLE_APE))
718 return 0;
720 switch (locknum) {
721 case TG3_APE_LOCK_GPIO:
722 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723 return 0;
724 case TG3_APE_LOCK_GRC:
725 case TG3_APE_LOCK_MEM:
726 if (!tp->pci_fn)
727 bit = APE_LOCK_REQ_DRIVER;
728 else
729 bit = 1 << tp->pci_fn;
730 break;
731 case TG3_APE_LOCK_PHY0:
732 case TG3_APE_LOCK_PHY1:
733 case TG3_APE_LOCK_PHY2:
734 case TG3_APE_LOCK_PHY3:
735 bit = APE_LOCK_REQ_DRIVER;
736 break;
737 default:
738 return -EINVAL;
741 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742 req = TG3_APE_LOCK_REQ;
743 gnt = TG3_APE_LOCK_GRANT;
744 } else {
745 req = TG3_APE_PER_LOCK_REQ;
746 gnt = TG3_APE_PER_LOCK_GRANT;
749 off = 4 * locknum;
751 tg3_ape_write32(tp, req + off, bit);
753 /* Wait for up to 1 millisecond to acquire lock. */
754 for (i = 0; i < 100; i++) {
755 status = tg3_ape_read32(tp, gnt + off);
756 if (status == bit)
757 break;
758 if (pci_channel_offline(tp->pdev))
759 break;
761 udelay(10);
764 if (status != bit) {
765 /* Revoke the lock request. */
766 tg3_ape_write32(tp, gnt + off, bit);
767 ret = -EBUSY;
770 return ret;
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 u32 gnt, bit;
777 if (!tg3_flag(tp, ENABLE_APE))
778 return;
780 switch (locknum) {
781 case TG3_APE_LOCK_GPIO:
782 if (tg3_asic_rev(tp) == ASIC_REV_5761)
783 return;
784 case TG3_APE_LOCK_GRC:
785 case TG3_APE_LOCK_MEM:
786 if (!tp->pci_fn)
787 bit = APE_LOCK_GRANT_DRIVER;
788 else
789 bit = 1 << tp->pci_fn;
790 break;
791 case TG3_APE_LOCK_PHY0:
792 case TG3_APE_LOCK_PHY1:
793 case TG3_APE_LOCK_PHY2:
794 case TG3_APE_LOCK_PHY3:
795 bit = APE_LOCK_GRANT_DRIVER;
796 break;
797 default:
798 return;
801 if (tg3_asic_rev(tp) == ASIC_REV_5761)
802 gnt = TG3_APE_LOCK_GRANT;
803 else
804 gnt = TG3_APE_PER_LOCK_GRANT;
806 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
811 u32 apedata;
813 while (timeout_us) {
814 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815 return -EBUSY;
817 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 break;
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
823 usleep_range(10, 20);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827 return timeout_us ? 0 : -EBUSY;
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 u32 i, apedata;
835 for (i = 0; i < timeout_us / 10; i++) {
836 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
838 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
839 break;
841 udelay(10);
844 return i == timeout_us / 10;
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
848 u32 len)
850 int err;
851 u32 i, bufoff, msgoff, maxlen, apedata;
853 if (!tg3_flag(tp, APE_HAS_NCSI))
854 return 0;
856 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857 if (apedata != APE_SEG_SIG_MAGIC)
858 return -ENODEV;
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
864 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
865 TG3_APE_SHMEM_BASE;
866 msgoff = bufoff + 2 * sizeof(u32);
867 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
869 while (len) {
870 u32 length;
872 /* Cap xfer sizes to scratchpad limits. */
873 length = (len > maxlen) ? maxlen : len;
874 len -= length;
876 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877 if (!(apedata & APE_FW_STATUS_READY))
878 return -EAGAIN;
880 /* Wait for up to 1 msec for APE to service previous event. */
881 err = tg3_ape_event_lock(tp, 1000);
882 if (err)
883 return err;
885 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886 APE_EVENT_STATUS_SCRTCHPD_READ |
887 APE_EVENT_STATUS_EVENT_PENDING;
888 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
890 tg3_ape_write32(tp, bufoff, base_off);
891 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
893 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 base_off += length;
898 if (tg3_ape_wait_for_event(tp, 30000))
899 return -EAGAIN;
901 for (i = 0; length; i += 4, length -= 4) {
902 u32 val = tg3_ape_read32(tp, msgoff + i);
903 memcpy(data, &val, sizeof(u32));
904 data++;
908 return 0;
910 #endif
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
914 int err;
915 u32 apedata;
917 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918 if (apedata != APE_SEG_SIG_MAGIC)
919 return -EAGAIN;
921 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922 if (!(apedata & APE_FW_STATUS_READY))
923 return -EAGAIN;
925 /* Wait for up to 20 millisecond for APE to service previous event. */
926 err = tg3_ape_event_lock(tp, 20000);
927 if (err)
928 return err;
930 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931 event | APE_EVENT_STATUS_EVENT_PENDING);
933 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
936 return 0;
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
941 u32 event;
942 u32 apedata;
944 if (!tg3_flag(tp, ENABLE_APE))
945 return;
947 switch (kind) {
948 case RESET_KIND_INIT:
949 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
951 APE_HOST_SEG_SIG_MAGIC);
952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
953 APE_HOST_SEG_LEN_MAGIC);
954 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
955 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
957 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
958 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
959 APE_HOST_BEHAV_NO_PHYLOCK);
960 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
961 TG3_APE_HOST_DRVR_STATE_START);
963 event = APE_EVENT_STATUS_STATE_START;
964 break;
965 case RESET_KIND_SHUTDOWN:
966 if (device_may_wakeup(&tp->pdev->dev) &&
967 tg3_flag(tp, WOL_ENABLE)) {
968 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
969 TG3_APE_HOST_WOL_SPEED_AUTO);
970 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
971 } else
972 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976 event = APE_EVENT_STATUS_STATE_UNLOAD;
977 break;
978 default:
979 return;
982 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984 tg3_ape_send_event(tp, event);
987 static void tg3_send_ape_heartbeat(struct tg3 *tp,
988 unsigned long interval)
990 /* Check if hb interval has exceeded */
991 if (!tg3_flag(tp, ENABLE_APE) ||
992 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 return;
995 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996 tp->ape_hb_jiffies = jiffies;
999 static void tg3_disable_ints(struct tg3 *tp)
1001 int i;
1003 tw32(TG3PCI_MISC_HOST_CTRL,
1004 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1005 for (i = 0; i < tp->irq_max; i++)
1006 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 static void tg3_enable_ints(struct tg3 *tp)
1011 int i;
1013 tp->irq_sync = 0;
1014 wmb();
1016 tw32(TG3PCI_MISC_HOST_CTRL,
1017 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1019 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1020 for (i = 0; i < tp->irq_cnt; i++) {
1021 struct tg3_napi *tnapi = &tp->napi[i];
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024 if (tg3_flag(tp, 1SHOT_MSI))
1025 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1027 tp->coal_now |= tnapi->coal_now;
1030 /* Force an initial interrupt */
1031 if (!tg3_flag(tp, TAGGED_STATUS) &&
1032 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1033 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1034 else
1035 tw32(HOSTCC_MODE, tp->coal_now);
1037 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1042 struct tg3 *tp = tnapi->tp;
1043 struct tg3_hw_status *sblk = tnapi->hw_status;
1044 unsigned int work_exists = 0;
1046 /* check for phy events */
1047 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1048 if (sblk->status & SD_STATUS_LINK_CHG)
1049 work_exists = 1;
1052 /* check for TX work to do */
1053 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054 work_exists = 1;
1056 /* check for RX work to do */
1057 if (tnapi->rx_rcb_prod_idx &&
1058 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1059 work_exists = 1;
1061 return work_exists;
1064 /* tg3_int_reenable
1065 * similar to tg3_enable_ints, but it accurately determines whether there
1066 * is new work pending and can return without flushing the PIO write
1067 * which reenables interrupts
1069 static void tg3_int_reenable(struct tg3_napi *tnapi)
1071 struct tg3 *tp = tnapi->tp;
1073 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1074 mmiowb();
1076 /* When doing tagged status, this work check is unnecessary.
1077 * The last_tag we write above tells the chip which piece of
1078 * work we've completed.
1080 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1081 tw32(HOSTCC_MODE, tp->coalesce_mode |
1082 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1085 static void tg3_switch_clocks(struct tg3 *tp)
1087 u32 clock_ctrl;
1088 u32 orig_clock_ctrl;
1090 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1091 return;
1093 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1095 orig_clock_ctrl = clock_ctrl;
1096 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1097 CLOCK_CTRL_CLKRUN_OENABLE |
1098 0x1f);
1099 tp->pci_clock_ctrl = clock_ctrl;
1101 if (tg3_flag(tp, 5705_PLUS)) {
1102 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1103 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1106 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1107 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108 clock_ctrl |
1109 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1110 40);
1111 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1112 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1113 40);
1115 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1118 #define PHY_BUSY_LOOPS 5000
1120 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1121 u32 *val)
1123 u32 frame_val;
1124 unsigned int loops;
1125 int ret;
1127 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1128 tw32_f(MAC_MI_MODE,
1129 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130 udelay(80);
1133 tg3_ape_lock(tp, tp->phy_ape_lock);
1135 *val = 0x0;
1137 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1138 MI_COM_PHY_ADDR_MASK);
1139 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1140 MI_COM_REG_ADDR_MASK);
1141 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1143 tw32_f(MAC_MI_COM, frame_val);
1145 loops = PHY_BUSY_LOOPS;
1146 while (loops != 0) {
1147 udelay(10);
1148 frame_val = tr32(MAC_MI_COM);
1150 if ((frame_val & MI_COM_BUSY) == 0) {
1151 udelay(5);
1152 frame_val = tr32(MAC_MI_COM);
1153 break;
1155 loops -= 1;
1158 ret = -EBUSY;
1159 if (loops != 0) {
1160 *val = frame_val & MI_COM_DATA_MASK;
1161 ret = 0;
1164 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1165 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166 udelay(80);
1169 tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 return ret;
1174 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1176 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1179 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1180 u32 val)
1182 u32 frame_val;
1183 unsigned int loops;
1184 int ret;
1186 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1187 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1188 return 0;
1190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191 tw32_f(MAC_MI_MODE,
1192 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193 udelay(80);
1196 tg3_ape_lock(tp, tp->phy_ape_lock);
1198 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1199 MI_COM_PHY_ADDR_MASK);
1200 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1201 MI_COM_REG_ADDR_MASK);
1202 frame_val |= (val & MI_COM_DATA_MASK);
1203 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1205 tw32_f(MAC_MI_COM, frame_val);
1207 loops = PHY_BUSY_LOOPS;
1208 while (loops != 0) {
1209 udelay(10);
1210 frame_val = tr32(MAC_MI_COM);
1211 if ((frame_val & MI_COM_BUSY) == 0) {
1212 udelay(5);
1213 frame_val = tr32(MAC_MI_COM);
1214 break;
1216 loops -= 1;
1219 ret = -EBUSY;
1220 if (loops != 0)
1221 ret = 0;
1223 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1224 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225 udelay(80);
1228 tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 return ret;
1233 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1235 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1238 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1240 int err;
1242 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243 if (err)
1244 goto done;
1246 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247 if (err)
1248 goto done;
1250 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1251 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252 if (err)
1253 goto done;
1255 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1257 done:
1258 return err;
1261 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1263 int err;
1265 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266 if (err)
1267 goto done;
1269 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270 if (err)
1271 goto done;
1273 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1274 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275 if (err)
1276 goto done;
1278 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1280 done:
1281 return err;
1284 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1286 int err;
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 if (!err)
1290 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 return err;
1295 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1297 int err;
1299 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1300 if (!err)
1301 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 return err;
1306 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1308 int err;
1310 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1311 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1312 MII_TG3_AUXCTL_SHDWSEL_MISC);
1313 if (!err)
1314 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 return err;
1319 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1321 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1322 set |= MII_TG3_AUXCTL_MISC_WREN;
1324 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1327 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 u32 val;
1330 int err;
1332 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1334 if (err)
1335 return err;
1337 if (enable)
1338 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 else
1340 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1342 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1343 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 return err;
1348 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1350 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1351 reg | val | MII_TG3_MISC_SHDW_WREN);
1354 static int tg3_bmcr_reset(struct tg3 *tp)
1356 u32 phy_control;
1357 int limit, err;
1359 /* OK, reset it, and poll the BMCR_RESET bit until it
1360 * clears or we time out.
1362 phy_control = BMCR_RESET;
1363 err = tg3_writephy(tp, MII_BMCR, phy_control);
1364 if (err != 0)
1365 return -EBUSY;
1367 limit = 5000;
1368 while (limit--) {
1369 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370 if (err != 0)
1371 return -EBUSY;
1373 if ((phy_control & BMCR_RESET) == 0) {
1374 udelay(40);
1375 break;
1377 udelay(10);
1379 if (limit < 0)
1380 return -EBUSY;
1382 return 0;
1385 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1387 struct tg3 *tp = bp->priv;
1388 u32 val;
1390 spin_lock_bh(&tp->lock);
1392 if (__tg3_readphy(tp, mii_id, reg, &val))
1393 val = -EIO;
1395 spin_unlock_bh(&tp->lock);
1397 return val;
1400 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1402 struct tg3 *tp = bp->priv;
1403 u32 ret = 0;
1405 spin_lock_bh(&tp->lock);
1407 if (__tg3_writephy(tp, mii_id, reg, val))
1408 ret = -EIO;
1410 spin_unlock_bh(&tp->lock);
1412 return ret;
1415 static void tg3_mdio_config_5785(struct tg3 *tp)
1417 u32 val;
1418 struct phy_device *phydev;
1420 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1421 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1422 case PHY_ID_BCM50610:
1423 case PHY_ID_BCM50610M:
1424 val = MAC_PHYCFG2_50610_LED_MODES;
1425 break;
1426 case PHY_ID_BCMAC131:
1427 val = MAC_PHYCFG2_AC131_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8211C:
1430 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1431 break;
1432 case PHY_ID_RTL8201E:
1433 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434 break;
1435 default:
1436 return;
1439 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1440 tw32(MAC_PHYCFG2, val);
1442 val = tr32(MAC_PHYCFG1);
1443 val &= ~(MAC_PHYCFG1_RGMII_INT |
1444 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1445 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1446 tw32(MAC_PHYCFG1, val);
1448 return;
1451 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1452 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1453 MAC_PHYCFG2_FMODE_MASK_MASK |
1454 MAC_PHYCFG2_GMODE_MASK_MASK |
1455 MAC_PHYCFG2_ACT_MASK_MASK |
1456 MAC_PHYCFG2_QUAL_MASK_MASK |
1457 MAC_PHYCFG2_INBAND_ENABLE;
1459 tw32(MAC_PHYCFG2, val);
1461 val = tr32(MAC_PHYCFG1);
1462 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1463 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1464 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1465 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1466 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1470 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1471 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1472 tw32(MAC_PHYCFG1, val);
1474 val = tr32(MAC_EXT_RGMII_MODE);
1475 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1476 MAC_RGMII_MODE_RX_QUALITY |
1477 MAC_RGMII_MODE_RX_ACTIVITY |
1478 MAC_RGMII_MODE_RX_ENG_DET |
1479 MAC_RGMII_MODE_TX_ENABLE |
1480 MAC_RGMII_MODE_TX_LOWPWR |
1481 MAC_RGMII_MODE_TX_RESET);
1482 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1483 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1484 val |= MAC_RGMII_MODE_RX_INT_B |
1485 MAC_RGMII_MODE_RX_QUALITY |
1486 MAC_RGMII_MODE_RX_ACTIVITY |
1487 MAC_RGMII_MODE_RX_ENG_DET;
1488 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1489 val |= MAC_RGMII_MODE_TX_ENABLE |
1490 MAC_RGMII_MODE_TX_LOWPWR |
1491 MAC_RGMII_MODE_TX_RESET;
1493 tw32(MAC_EXT_RGMII_MODE, val);
1496 static void tg3_mdio_start(struct tg3 *tp)
1498 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1499 tw32_f(MAC_MI_MODE, tp->mi_mode);
1500 udelay(80);
1502 if (tg3_flag(tp, MDIOBUS_INITED) &&
1503 tg3_asic_rev(tp) == ASIC_REV_5785)
1504 tg3_mdio_config_5785(tp);
1507 static int tg3_mdio_init(struct tg3 *tp)
1509 int i;
1510 u32 reg;
1511 struct phy_device *phydev;
1513 if (tg3_flag(tp, 5717_PLUS)) {
1514 u32 is_serdes;
1516 tp->phy_addr = tp->pci_fn + 1;
1518 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1519 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1520 else
1521 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1522 TG3_CPMU_PHY_STRAP_IS_SERDES;
1523 if (is_serdes)
1524 tp->phy_addr += 7;
1525 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1526 int addr;
1528 addr = ssb_gige_get_phyaddr(tp->pdev);
1529 if (addr < 0)
1530 return addr;
1531 tp->phy_addr = addr;
1532 } else
1533 tp->phy_addr = TG3_PHY_MII_ADDR;
1535 tg3_mdio_start(tp);
1537 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1538 return 0;
1540 tp->mdio_bus = mdiobus_alloc();
1541 if (tp->mdio_bus == NULL)
1542 return -ENOMEM;
1544 tp->mdio_bus->name = "tg3 mdio bus";
1545 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1546 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1547 tp->mdio_bus->priv = tp;
1548 tp->mdio_bus->parent = &tp->pdev->dev;
1549 tp->mdio_bus->read = &tg3_mdio_read;
1550 tp->mdio_bus->write = &tg3_mdio_write;
1551 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1553 /* The bus registration will look for all the PHYs on the mdio bus.
1554 * Unfortunately, it does not ensure the PHY is powered up before
1555 * accessing the PHY ID registers. A chip reset is the
1556 * quickest way to bring the device back to an operational state..
1558 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1559 tg3_bmcr_reset(tp);
1561 i = mdiobus_register(tp->mdio_bus);
1562 if (i) {
1563 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1564 mdiobus_free(tp->mdio_bus);
1565 return i;
1568 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1570 if (!phydev || !phydev->drv) {
1571 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1572 mdiobus_unregister(tp->mdio_bus);
1573 mdiobus_free(tp->mdio_bus);
1574 return -ENODEV;
1577 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1578 case PHY_ID_BCM57780:
1579 phydev->interface = PHY_INTERFACE_MODE_GMII;
1580 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581 break;
1582 case PHY_ID_BCM50610:
1583 case PHY_ID_BCM50610M:
1584 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1585 PHY_BRCM_RX_REFCLK_UNUSED |
1586 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1587 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1589 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1590 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1591 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1592 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1593 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1594 /* fallthru */
1595 case PHY_ID_RTL8211C:
1596 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1597 break;
1598 case PHY_ID_RTL8201E:
1599 case PHY_ID_BCMAC131:
1600 phydev->interface = PHY_INTERFACE_MODE_MII;
1601 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1602 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1603 break;
1606 tg3_flag_set(tp, MDIOBUS_INITED);
1608 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1609 tg3_mdio_config_5785(tp);
1611 return 0;
1614 static void tg3_mdio_fini(struct tg3 *tp)
1616 if (tg3_flag(tp, MDIOBUS_INITED)) {
1617 tg3_flag_clear(tp, MDIOBUS_INITED);
1618 mdiobus_unregister(tp->mdio_bus);
1619 mdiobus_free(tp->mdio_bus);
1623 /* tp->lock is held. */
1624 static inline void tg3_generate_fw_event(struct tg3 *tp)
1626 u32 val;
1628 val = tr32(GRC_RX_CPU_EVENT);
1629 val |= GRC_RX_CPU_DRIVER_EVENT;
1630 tw32_f(GRC_RX_CPU_EVENT, val);
1632 tp->last_event_jiffies = jiffies;
1635 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1637 /* tp->lock is held. */
1638 static void tg3_wait_for_event_ack(struct tg3 *tp)
1640 int i;
1641 unsigned int delay_cnt;
1642 long time_remain;
1644 /* If enough time has passed, no wait is necessary. */
1645 time_remain = (long)(tp->last_event_jiffies + 1 +
1646 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1647 (long)jiffies;
1648 if (time_remain < 0)
1649 return;
1651 /* Check if we can shorten the wait time. */
1652 delay_cnt = jiffies_to_usecs(time_remain);
1653 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1654 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1655 delay_cnt = (delay_cnt >> 3) + 1;
1657 for (i = 0; i < delay_cnt; i++) {
1658 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1659 break;
1660 if (pci_channel_offline(tp->pdev))
1661 break;
1663 udelay(8);
1667 /* tp->lock is held. */
1668 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1670 u32 reg, val;
1672 val = 0;
1673 if (!tg3_readphy(tp, MII_BMCR, &reg))
1674 val = reg << 16;
1675 if (!tg3_readphy(tp, MII_BMSR, &reg))
1676 val |= (reg & 0xffff);
1677 *data++ = val;
1679 val = 0;
1680 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1681 val = reg << 16;
1682 if (!tg3_readphy(tp, MII_LPA, &reg))
1683 val |= (reg & 0xffff);
1684 *data++ = val;
1686 val = 0;
1687 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1688 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1689 val = reg << 16;
1690 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1691 val |= (reg & 0xffff);
1693 *data++ = val;
1695 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1696 val = reg << 16;
1697 else
1698 val = 0;
1699 *data++ = val;
1702 /* tp->lock is held. */
1703 static void tg3_ump_link_report(struct tg3 *tp)
1705 u32 data[4];
1707 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1708 return;
1710 tg3_phy_gather_ump_data(tp, data);
1712 tg3_wait_for_event_ack(tp);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1721 tg3_generate_fw_event(tp);
1724 /* tp->lock is held. */
1725 static void tg3_stop_fw(struct tg3 *tp)
1727 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1728 /* Wait for RX cpu to ACK the previous event. */
1729 tg3_wait_for_event_ack(tp);
1731 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1733 tg3_generate_fw_event(tp);
1735 /* Wait for RX cpu to ACK this event. */
1736 tg3_wait_for_event_ack(tp);
1740 /* tp->lock is held. */
1741 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1743 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1744 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1746 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1747 switch (kind) {
1748 case RESET_KIND_INIT:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_START);
1751 break;
1753 case RESET_KIND_SHUTDOWN:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_UNLOAD);
1756 break;
1758 case RESET_KIND_SUSPEND:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_SUSPEND);
1761 break;
1763 default:
1764 break;
1769 /* tp->lock is held. */
1770 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1772 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1773 switch (kind) {
1774 case RESET_KIND_INIT:
1775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776 DRV_STATE_START_DONE);
1777 break;
1779 case RESET_KIND_SHUTDOWN:
1780 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781 DRV_STATE_UNLOAD_DONE);
1782 break;
1784 default:
1785 break;
1790 /* tp->lock is held. */
1791 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1793 if (tg3_flag(tp, ENABLE_ASF)) {
1794 switch (kind) {
1795 case RESET_KIND_INIT:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_START);
1798 break;
1800 case RESET_KIND_SHUTDOWN:
1801 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802 DRV_STATE_UNLOAD);
1803 break;
1805 case RESET_KIND_SUSPEND:
1806 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807 DRV_STATE_SUSPEND);
1808 break;
1810 default:
1811 break;
1816 static int tg3_poll_fw(struct tg3 *tp)
1818 int i;
1819 u32 val;
1821 if (tg3_flag(tp, NO_FWARE_REPORTED))
1822 return 0;
1824 if (tg3_flag(tp, IS_SSB_CORE)) {
1825 /* We don't use firmware. */
1826 return 0;
1829 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1830 /* Wait up to 20ms for init done. */
1831 for (i = 0; i < 200; i++) {
1832 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1833 return 0;
1834 if (pci_channel_offline(tp->pdev))
1835 return -ENODEV;
1837 udelay(100);
1839 return -ENODEV;
1842 /* Wait for firmware initialization to complete. */
1843 for (i = 0; i < 100000; i++) {
1844 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1845 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1846 break;
1847 if (pci_channel_offline(tp->pdev)) {
1848 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1849 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850 netdev_info(tp->dev, "No firmware running\n");
1853 break;
1856 udelay(10);
1859 /* Chip might not be fitted with firmware. Some Sun onboard
1860 * parts are configured like that. So don't signal the timeout
1861 * of the above loop as an error, but do report the lack of
1862 * running firmware once.
1864 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1865 tg3_flag_set(tp, NO_FWARE_REPORTED);
1867 netdev_info(tp->dev, "No firmware running\n");
1870 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1871 /* The 57765 A0 needs a little more
1872 * time to do some important work.
1874 mdelay(10);
1877 return 0;
1880 static void tg3_link_report(struct tg3 *tp)
1882 if (!netif_carrier_ok(tp->dev)) {
1883 netif_info(tp, link, tp->dev, "Link is down\n");
1884 tg3_ump_link_report(tp);
1885 } else if (netif_msg_link(tp)) {
1886 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1887 (tp->link_config.active_speed == SPEED_1000 ?
1888 1000 :
1889 (tp->link_config.active_speed == SPEED_100 ?
1890 100 : 10)),
1891 (tp->link_config.active_duplex == DUPLEX_FULL ?
1892 "full" : "half"));
1894 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1895 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1896 "on" : "off",
1897 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1898 "on" : "off");
1900 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1901 netdev_info(tp->dev, "EEE is %s\n",
1902 tp->setlpicnt ? "enabled" : "disabled");
1904 tg3_ump_link_report(tp);
1907 tp->link_up = netif_carrier_ok(tp->dev);
1910 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1912 u32 flowctrl = 0;
1914 if (adv & ADVERTISE_PAUSE_CAP) {
1915 flowctrl |= FLOW_CTRL_RX;
1916 if (!(adv & ADVERTISE_PAUSE_ASYM))
1917 flowctrl |= FLOW_CTRL_TX;
1918 } else if (adv & ADVERTISE_PAUSE_ASYM)
1919 flowctrl |= FLOW_CTRL_TX;
1921 return flowctrl;
1924 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1926 u16 miireg;
1928 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1929 miireg = ADVERTISE_1000XPAUSE;
1930 else if (flow_ctrl & FLOW_CTRL_TX)
1931 miireg = ADVERTISE_1000XPSE_ASYM;
1932 else if (flow_ctrl & FLOW_CTRL_RX)
1933 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1934 else
1935 miireg = 0;
1937 return miireg;
1940 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1942 u32 flowctrl = 0;
1944 if (adv & ADVERTISE_1000XPAUSE) {
1945 flowctrl |= FLOW_CTRL_RX;
1946 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1947 flowctrl |= FLOW_CTRL_TX;
1948 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1949 flowctrl |= FLOW_CTRL_TX;
1951 return flowctrl;
1954 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1956 u8 cap = 0;
1958 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1959 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1960 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1961 if (lcladv & ADVERTISE_1000XPAUSE)
1962 cap = FLOW_CTRL_RX;
1963 if (rmtadv & ADVERTISE_1000XPAUSE)
1964 cap = FLOW_CTRL_TX;
1967 return cap;
1970 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1972 u8 autoneg;
1973 u8 flowctrl = 0;
1974 u32 old_rx_mode = tp->rx_mode;
1975 u32 old_tx_mode = tp->tx_mode;
1977 if (tg3_flag(tp, USE_PHYLIB))
1978 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1979 else
1980 autoneg = tp->link_config.autoneg;
1982 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1983 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1984 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1985 else
1986 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1987 } else
1988 flowctrl = tp->link_config.flowctrl;
1990 tp->link_config.active_flowctrl = flowctrl;
1992 if (flowctrl & FLOW_CTRL_RX)
1993 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1994 else
1995 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1997 if (old_rx_mode != tp->rx_mode)
1998 tw32_f(MAC_RX_MODE, tp->rx_mode);
2000 if (flowctrl & FLOW_CTRL_TX)
2001 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2002 else
2003 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2005 if (old_tx_mode != tp->tx_mode)
2006 tw32_f(MAC_TX_MODE, tp->tx_mode);
2009 static void tg3_adjust_link(struct net_device *dev)
2011 u8 oldflowctrl, linkmesg = 0;
2012 u32 mac_mode, lcl_adv, rmt_adv;
2013 struct tg3 *tp = netdev_priv(dev);
2014 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2016 spin_lock_bh(&tp->lock);
2018 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2019 MAC_MODE_HALF_DUPLEX);
2021 oldflowctrl = tp->link_config.active_flowctrl;
2023 if (phydev->link) {
2024 lcl_adv = 0;
2025 rmt_adv = 0;
2027 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2028 mac_mode |= MAC_MODE_PORT_MODE_MII;
2029 else if (phydev->speed == SPEED_1000 ||
2030 tg3_asic_rev(tp) != ASIC_REV_5785)
2031 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2032 else
2033 mac_mode |= MAC_MODE_PORT_MODE_MII;
2035 if (phydev->duplex == DUPLEX_HALF)
2036 mac_mode |= MAC_MODE_HALF_DUPLEX;
2037 else {
2038 lcl_adv = mii_advertise_flowctrl(
2039 tp->link_config.flowctrl);
2041 if (phydev->pause)
2042 rmt_adv = LPA_PAUSE_CAP;
2043 if (phydev->asym_pause)
2044 rmt_adv |= LPA_PAUSE_ASYM;
2047 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2048 } else
2049 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2051 if (mac_mode != tp->mac_mode) {
2052 tp->mac_mode = mac_mode;
2053 tw32_f(MAC_MODE, tp->mac_mode);
2054 udelay(40);
2057 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2058 if (phydev->speed == SPEED_10)
2059 tw32(MAC_MI_STAT,
2060 MAC_MI_STAT_10MBPS_MODE |
2061 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062 else
2063 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2067 tw32(MAC_TX_LENGTHS,
2068 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069 (6 << TX_LENGTHS_IPG_SHIFT) |
2070 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071 else
2072 tw32(MAC_TX_LENGTHS,
2073 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2074 (6 << TX_LENGTHS_IPG_SHIFT) |
2075 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2077 if (phydev->link != tp->old_link ||
2078 phydev->speed != tp->link_config.active_speed ||
2079 phydev->duplex != tp->link_config.active_duplex ||
2080 oldflowctrl != tp->link_config.active_flowctrl)
2081 linkmesg = 1;
2083 tp->old_link = phydev->link;
2084 tp->link_config.active_speed = phydev->speed;
2085 tp->link_config.active_duplex = phydev->duplex;
2087 spin_unlock_bh(&tp->lock);
2089 if (linkmesg)
2090 tg3_link_report(tp);
2093 static int tg3_phy_init(struct tg3 *tp)
2095 struct phy_device *phydev;
2097 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2098 return 0;
2100 /* Bring the PHY back to a known state. */
2101 tg3_bmcr_reset(tp);
2103 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2105 /* Attach the MAC to the PHY. */
2106 phydev = phy_connect(tp->dev, phydev_name(phydev),
2107 tg3_adjust_link, phydev->interface);
2108 if (IS_ERR(phydev)) {
2109 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2110 return PTR_ERR(phydev);
2113 /* Mask with MAC supported features. */
2114 switch (phydev->interface) {
2115 case PHY_INTERFACE_MODE_GMII:
2116 case PHY_INTERFACE_MODE_RGMII:
2117 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2118 phydev->supported &= (PHY_GBIT_FEATURES |
2119 SUPPORTED_Pause |
2120 SUPPORTED_Asym_Pause);
2121 break;
2123 /* fallthru */
2124 case PHY_INTERFACE_MODE_MII:
2125 phydev->supported &= (PHY_BASIC_FEATURES |
2126 SUPPORTED_Pause |
2127 SUPPORTED_Asym_Pause);
2128 break;
2129 default:
2130 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2131 return -EINVAL;
2134 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2136 phydev->advertising = phydev->supported;
2138 phy_attached_info(phydev);
2140 return 0;
2143 static void tg3_phy_start(struct tg3 *tp)
2145 struct phy_device *phydev;
2147 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2148 return;
2150 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2152 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2154 phydev->speed = tp->link_config.speed;
2155 phydev->duplex = tp->link_config.duplex;
2156 phydev->autoneg = tp->link_config.autoneg;
2157 phydev->advertising = tp->link_config.advertising;
2160 phy_start(phydev);
2162 phy_start_aneg(phydev);
2165 static void tg3_phy_stop(struct tg3 *tp)
2167 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2168 return;
2170 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2173 static void tg3_phy_fini(struct tg3 *tp)
2175 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2176 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2177 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2181 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2183 int err;
2184 u32 val;
2186 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2187 return 0;
2189 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2190 /* Cannot do read-modify-write on 5401 */
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2193 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2194 0x4c20);
2195 goto done;
2198 err = tg3_phy_auxctl_read(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2200 if (err)
2201 return err;
2203 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2204 err = tg3_phy_auxctl_write(tp,
2205 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2207 done:
2208 return err;
2211 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2213 u32 phytest;
2215 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2216 u32 phy;
2218 tg3_writephy(tp, MII_TG3_FET_TEST,
2219 phytest | MII_TG3_FET_SHADOW_EN);
2220 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2221 if (enable)
2222 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223 else
2224 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2225 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2227 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2231 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2233 u32 reg;
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tg3_flag(tp, 5717_PLUS) &&
2237 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2238 return;
2240 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2241 tg3_phy_fet_toggle_apd(tp, enable);
2242 return;
2245 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2246 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2247 MII_TG3_MISC_SHDW_SCR5_SDTL |
2248 MII_TG3_MISC_SHDW_SCR5_C125OE;
2249 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2250 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2252 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2255 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2256 if (enable)
2257 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2259 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2262 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2264 u32 phy;
2266 if (!tg3_flag(tp, 5705_PLUS) ||
2267 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2268 return;
2270 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2271 u32 ephy;
2273 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2274 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2276 tg3_writephy(tp, MII_TG3_FET_TEST,
2277 ephy | MII_TG3_FET_SHADOW_EN);
2278 if (!tg3_readphy(tp, reg, &phy)) {
2279 if (enable)
2280 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281 else
2282 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2283 tg3_writephy(tp, reg, phy);
2285 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2287 } else {
2288 int ret;
2290 ret = tg3_phy_auxctl_read(tp,
2291 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2292 if (!ret) {
2293 if (enable)
2294 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295 else
2296 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2297 tg3_phy_auxctl_write(tp,
2298 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2303 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2305 int ret;
2306 u32 val;
2308 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2309 return;
2311 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2312 if (!ret)
2313 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2314 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2317 static void tg3_phy_apply_otp(struct tg3 *tp)
2319 u32 otp, phy;
2321 if (!tp->phy_otp)
2322 return;
2324 otp = tp->phy_otp;
2326 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2327 return;
2329 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2330 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2331 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2333 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2334 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2337 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2338 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2339 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2341 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2342 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2344 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2345 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2347 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2348 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2349 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2351 tg3_phy_toggle_auxctl_smdsp(tp, false);
2354 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2356 u32 val;
2357 struct ethtool_eee *dest = &tp->eee;
2359 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2360 return;
2362 if (eee)
2363 dest = eee;
2365 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2366 return;
2368 /* Pull eee_active */
2369 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2370 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2371 dest->eee_active = 1;
2372 } else
2373 dest->eee_active = 0;
2375 /* Pull lp advertised settings */
2376 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2377 return;
2378 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 /* Pull advertised and eee_enabled settings */
2381 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2382 return;
2383 dest->eee_enabled = !!val;
2384 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2386 /* Pull tx_lpi_enabled */
2387 val = tr32(TG3_CPMU_EEE_MODE);
2388 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2390 /* Pull lpi timer value */
2391 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2394 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2396 u32 val;
2398 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2399 return;
2401 tp->setlpicnt = 0;
2403 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2404 current_link_up &&
2405 tp->link_config.active_duplex == DUPLEX_FULL &&
2406 (tp->link_config.active_speed == SPEED_100 ||
2407 tp->link_config.active_speed == SPEED_1000)) {
2408 u32 eeectl;
2410 if (tp->link_config.active_speed == SPEED_1000)
2411 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2412 else
2413 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2415 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2417 tg3_eee_pull_config(tp, NULL);
2418 if (tp->eee.eee_active)
2419 tp->setlpicnt = 2;
2422 if (!tp->setlpicnt) {
2423 if (current_link_up &&
2424 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2425 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2426 tg3_phy_toggle_auxctl_smdsp(tp, false);
2429 val = tr32(TG3_CPMU_EEE_MODE);
2430 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2434 static void tg3_phy_eee_enable(struct tg3 *tp)
2436 u32 val;
2438 if (tp->link_config.active_speed == SPEED_1000 &&
2439 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2440 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2441 tg3_flag(tp, 57765_CLASS)) &&
2442 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443 val = MII_TG3_DSP_TAP26_ALNOKO |
2444 MII_TG3_DSP_TAP26_RMRXSTO;
2445 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2446 tg3_phy_toggle_auxctl_smdsp(tp, false);
2449 val = tr32(TG3_CPMU_EEE_MODE);
2450 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2453 static int tg3_wait_macro_done(struct tg3 *tp)
2455 int limit = 100;
2457 while (limit--) {
2458 u32 tmp32;
2460 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2461 if ((tmp32 & 0x1000) == 0)
2462 break;
2465 if (limit < 0)
2466 return -EBUSY;
2468 return 0;
2471 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2473 static const u32 test_pat[4][6] = {
2474 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2475 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2476 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2477 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2479 int chan;
2481 for (chan = 0; chan < 4; chan++) {
2482 int i;
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 (chan * 0x2000) | 0x0200);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2488 for (i = 0; i < 6; i++)
2489 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2490 test_pat[chan][i]);
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2493 if (tg3_wait_macro_done(tp)) {
2494 *resetp = 1;
2495 return -EBUSY;
2498 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2499 (chan * 0x2000) | 0x0200);
2500 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2501 if (tg3_wait_macro_done(tp)) {
2502 *resetp = 1;
2503 return -EBUSY;
2506 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2507 if (tg3_wait_macro_done(tp)) {
2508 *resetp = 1;
2509 return -EBUSY;
2512 for (i = 0; i < 6; i += 2) {
2513 u32 low, high;
2515 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2516 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2517 tg3_wait_macro_done(tp)) {
2518 *resetp = 1;
2519 return -EBUSY;
2521 low &= 0x7fff;
2522 high &= 0x000f;
2523 if (low != test_pat[chan][i] ||
2524 high != test_pat[chan][i+1]) {
2525 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2526 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2527 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2529 return -EBUSY;
2534 return 0;
2537 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2539 int chan;
2541 for (chan = 0; chan < 4; chan++) {
2542 int i;
2544 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2545 (chan * 0x2000) | 0x0200);
2546 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2547 for (i = 0; i < 6; i++)
2548 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2549 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2550 if (tg3_wait_macro_done(tp))
2551 return -EBUSY;
2554 return 0;
2557 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2559 u32 reg32, phy9_orig;
2560 int retries, do_phy_reset, err;
2562 retries = 10;
2563 do_phy_reset = 1;
2564 do {
2565 if (do_phy_reset) {
2566 err = tg3_bmcr_reset(tp);
2567 if (err)
2568 return err;
2569 do_phy_reset = 0;
2572 /* Disable transmitter and interrupt. */
2573 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2574 continue;
2576 reg32 |= 0x3000;
2577 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2579 /* Set full-duplex, 1000 mbps. */
2580 tg3_writephy(tp, MII_BMCR,
2581 BMCR_FULLDPLX | BMCR_SPEED1000);
2583 /* Set to master mode. */
2584 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2585 continue;
2587 tg3_writephy(tp, MII_CTRL1000,
2588 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2590 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2591 if (err)
2592 return err;
2594 /* Block the PHY control access. */
2595 tg3_phydsp_write(tp, 0x8005, 0x0800);
2597 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2598 if (!err)
2599 break;
2600 } while (--retries);
2602 err = tg3_phy_reset_chanpat(tp);
2603 if (err)
2604 return err;
2606 tg3_phydsp_write(tp, 0x8005, 0x0000);
2608 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2609 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2611 tg3_phy_toggle_auxctl_smdsp(tp, false);
2613 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2615 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2616 if (err)
2617 return err;
2619 reg32 &= ~0x3000;
2620 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2622 return 0;
2625 static void tg3_carrier_off(struct tg3 *tp)
2627 netif_carrier_off(tp->dev);
2628 tp->link_up = false;
2631 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2633 if (tg3_flag(tp, ENABLE_ASF))
2634 netdev_warn(tp->dev,
2635 "Management side-band traffic will be interrupted during phy settings change\n");
2638 /* This will reset the tigon3 PHY if there is no valid
2639 * link unless the FORCE argument is non-zero.
2641 static int tg3_phy_reset(struct tg3 *tp)
2643 u32 val, cpmuctrl;
2644 int err;
2646 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2647 val = tr32(GRC_MISC_CFG);
2648 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2649 udelay(40);
2651 err = tg3_readphy(tp, MII_BMSR, &val);
2652 err |= tg3_readphy(tp, MII_BMSR, &val);
2653 if (err != 0)
2654 return -EBUSY;
2656 if (netif_running(tp->dev) && tp->link_up) {
2657 netif_carrier_off(tp->dev);
2658 tg3_link_report(tp);
2661 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2662 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2663 tg3_asic_rev(tp) == ASIC_REV_5705) {
2664 err = tg3_phy_reset_5703_4_5(tp);
2665 if (err)
2666 return err;
2667 goto out;
2670 cpmuctrl = 0;
2671 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2672 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2673 cpmuctrl = tr32(TG3_CPMU_CTRL);
2674 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2675 tw32(TG3_CPMU_CTRL,
2676 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2679 err = tg3_bmcr_reset(tp);
2680 if (err)
2681 return err;
2683 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2684 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2685 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2687 tw32(TG3_CPMU_CTRL, cpmuctrl);
2690 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2691 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2692 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2693 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2694 CPMU_LSPD_1000MB_MACCLK_12_5) {
2695 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2696 udelay(40);
2697 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2701 if (tg3_flag(tp, 5717_PLUS) &&
2702 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2703 return 0;
2705 tg3_phy_apply_otp(tp);
2707 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2708 tg3_phy_toggle_apd(tp, true);
2709 else
2710 tg3_phy_toggle_apd(tp, false);
2712 out:
2713 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2714 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2716 tg3_phydsp_write(tp, 0x000a, 0x0323);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2721 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2726 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 tg3_phydsp_write(tp, 0x000a, 0x310b);
2728 tg3_phydsp_write(tp, 0x201f, 0x9506);
2729 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2730 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2733 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2735 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2736 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2737 tg3_writephy(tp, MII_TG3_TEST1,
2738 MII_TG3_TEST1_TRIM_EN | 0x4);
2739 } else
2740 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2742 tg3_phy_toggle_auxctl_smdsp(tp, false);
2746 /* Set Extended packet length bit (bit 14) on all chips that */
2747 /* support jumbo frames */
2748 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2749 /* Cannot do read-modify-write on 5401 */
2750 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2751 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 /* Set bit 14 with read-modify-write to preserve other bits */
2753 err = tg3_phy_auxctl_read(tp,
2754 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2755 if (!err)
2756 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2757 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2760 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2761 * jumbo frames transmission.
2763 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2764 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2765 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2766 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2769 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2770 /* adjust output voltage */
2771 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2775 tg3_phydsp_write(tp, 0xffb, 0x4000);
2777 tg3_phy_toggle_automdix(tp, true);
2778 tg3_phy_set_wirespeed(tp);
2779 return 0;
2782 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2783 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2784 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2785 TG3_GPIO_MSG_NEED_VAUX)
2786 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2787 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2788 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2789 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2790 (TG3_GPIO_MSG_DRVR_PRES << 12))
2792 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2793 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2794 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2795 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2796 (TG3_GPIO_MSG_NEED_VAUX << 12))
2798 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2800 u32 status, shift;
2802 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2803 tg3_asic_rev(tp) == ASIC_REV_5719)
2804 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2805 else
2806 status = tr32(TG3_CPMU_DRV_STATUS);
2808 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2809 status &= ~(TG3_GPIO_MSG_MASK << shift);
2810 status |= (newstat << shift);
2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5719)
2814 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2815 else
2816 tw32(TG3_CPMU_DRV_STATUS, status);
2818 return status >> TG3_APE_GPIO_MSG_SHIFT;
2821 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2823 if (!tg3_flag(tp, IS_NIC))
2824 return 0;
2826 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2827 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5720) {
2829 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2830 return -EIO;
2832 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2834 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2837 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2838 } else {
2839 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2840 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 return 0;
2846 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2848 u32 grc_local_ctrl;
2850 if (!tg3_flag(tp, IS_NIC) ||
2851 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2852 tg3_asic_rev(tp) == ASIC_REV_5701)
2853 return;
2855 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2857 tw32_wait_f(GRC_LOCAL_CTRL,
2858 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 tw32_wait_f(GRC_LOCAL_CTRL,
2862 grc_local_ctrl,
2863 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2872 if (!tg3_flag(tp, IS_NIC))
2873 return;
2875 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2876 tg3_asic_rev(tp) == ASIC_REV_5701) {
2877 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2878 (GRC_LCLCTRL_GPIO_OE0 |
2879 GRC_LCLCTRL_GPIO_OE1 |
2880 GRC_LCLCTRL_GPIO_OE2 |
2881 GRC_LCLCTRL_GPIO_OUTPUT0 |
2882 GRC_LCLCTRL_GPIO_OUTPUT1),
2883 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2885 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2886 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2887 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2888 GRC_LCLCTRL_GPIO_OE1 |
2889 GRC_LCLCTRL_GPIO_OE2 |
2890 GRC_LCLCTRL_GPIO_OUTPUT0 |
2891 GRC_LCLCTRL_GPIO_OUTPUT1 |
2892 tp->grc_local_ctrl;
2893 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2897 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2901 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902 TG3_GRC_LCLCTL_PWRSW_DELAY);
2903 } else {
2904 u32 no_gpio2;
2905 u32 grc_local_ctrl = 0;
2907 /* Workaround to prevent overdrawing Amps. */
2908 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2909 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2910 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2911 grc_local_ctrl,
2912 TG3_GRC_LCLCTL_PWRSW_DELAY);
2915 /* On 5753 and variants, GPIO2 cannot be used. */
2916 no_gpio2 = tp->nic_sram_data_cfg &
2917 NIC_SRAM_DATA_CFG_NO_GPIO2;
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2920 GRC_LCLCTRL_GPIO_OE1 |
2921 GRC_LCLCTRL_GPIO_OE2 |
2922 GRC_LCLCTRL_GPIO_OUTPUT1 |
2923 GRC_LCLCTRL_GPIO_OUTPUT2;
2924 if (no_gpio2) {
2925 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2926 GRC_LCLCTRL_GPIO_OUTPUT2);
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2934 tw32_wait_f(GRC_LOCAL_CTRL,
2935 tp->grc_local_ctrl | grc_local_ctrl,
2936 TG3_GRC_LCLCTL_PWRSW_DELAY);
2938 if (!no_gpio2) {
2939 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2940 tw32_wait_f(GRC_LOCAL_CTRL,
2941 tp->grc_local_ctrl | grc_local_ctrl,
2942 TG3_GRC_LCLCTL_PWRSW_DELAY);
2947 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2949 u32 msg = 0;
2951 /* Serialize power state transitions */
2952 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2953 return;
2955 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2956 msg = TG3_GPIO_MSG_NEED_VAUX;
2958 msg = tg3_set_function_status(tp, msg);
2960 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2961 goto done;
2963 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2964 tg3_pwrsrc_switch_to_vaux(tp);
2965 else
2966 tg3_pwrsrc_die_with_vmain(tp);
2968 done:
2969 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2972 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2974 bool need_vaux = false;
2976 /* The GPIOs do something completely different on 57765. */
2977 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2978 return;
2980 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2981 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2982 tg3_asic_rev(tp) == ASIC_REV_5720) {
2983 tg3_frob_aux_power_5717(tp, include_wol ?
2984 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2985 return;
2988 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2989 struct net_device *dev_peer;
2991 dev_peer = pci_get_drvdata(tp->pdev_peer);
2993 /* remove_one() may have been run on the peer. */
2994 if (dev_peer) {
2995 struct tg3 *tp_peer = netdev_priv(dev_peer);
2997 if (tg3_flag(tp_peer, INIT_COMPLETE))
2998 return;
3000 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3001 tg3_flag(tp_peer, ENABLE_ASF))
3002 need_vaux = true;
3006 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3007 tg3_flag(tp, ENABLE_ASF))
3008 need_vaux = true;
3010 if (need_vaux)
3011 tg3_pwrsrc_switch_to_vaux(tp);
3012 else
3013 tg3_pwrsrc_die_with_vmain(tp);
3016 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3018 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3019 return 1;
3020 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3021 if (speed != SPEED_10)
3022 return 1;
3023 } else if (speed == SPEED_10)
3024 return 1;
3026 return 0;
3029 static bool tg3_phy_power_bug(struct tg3 *tp)
3031 switch (tg3_asic_rev(tp)) {
3032 case ASIC_REV_5700:
3033 case ASIC_REV_5704:
3034 return true;
3035 case ASIC_REV_5780:
3036 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3037 return true;
3038 return false;
3039 case ASIC_REV_5717:
3040 if (!tp->pci_fn)
3041 return true;
3042 return false;
3043 case ASIC_REV_5719:
3044 case ASIC_REV_5720:
3045 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3046 !tp->pci_fn)
3047 return true;
3048 return false;
3051 return false;
3054 static bool tg3_phy_led_bug(struct tg3 *tp)
3056 switch (tg3_asic_rev(tp)) {
3057 case ASIC_REV_5719:
3058 case ASIC_REV_5720:
3059 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3060 !tp->pci_fn)
3061 return true;
3062 return false;
3065 return false;
3068 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3070 u32 val;
3072 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3073 return;
3075 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3076 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3077 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3078 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3080 sg_dig_ctrl |=
3081 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3082 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3083 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3085 return;
3088 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3089 tg3_bmcr_reset(tp);
3090 val = tr32(GRC_MISC_CFG);
3091 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3092 udelay(40);
3093 return;
3094 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3095 u32 phytest;
3096 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3097 u32 phy;
3099 tg3_writephy(tp, MII_ADVERTISE, 0);
3100 tg3_writephy(tp, MII_BMCR,
3101 BMCR_ANENABLE | BMCR_ANRESTART);
3103 tg3_writephy(tp, MII_TG3_FET_TEST,
3104 phytest | MII_TG3_FET_SHADOW_EN);
3105 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3106 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3107 tg3_writephy(tp,
3108 MII_TG3_FET_SHDW_AUXMODE4,
3109 phy);
3111 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3113 return;
3114 } else if (do_low_power) {
3115 if (!tg3_phy_led_bug(tp))
3116 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3117 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3119 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3120 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3121 MII_TG3_AUXCTL_PCTL_VREG_11V;
3122 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3125 /* The PHY should not be powered down on some chips because
3126 * of bugs.
3128 if (tg3_phy_power_bug(tp))
3129 return;
3131 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3132 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3133 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3134 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3135 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3136 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3139 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3142 /* tp->lock is held. */
3143 static int tg3_nvram_lock(struct tg3 *tp)
3145 if (tg3_flag(tp, NVRAM)) {
3146 int i;
3148 if (tp->nvram_lock_cnt == 0) {
3149 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3150 for (i = 0; i < 8000; i++) {
3151 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3152 break;
3153 udelay(20);
3155 if (i == 8000) {
3156 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3157 return -ENODEV;
3160 tp->nvram_lock_cnt++;
3162 return 0;
3165 /* tp->lock is held. */
3166 static void tg3_nvram_unlock(struct tg3 *tp)
3168 if (tg3_flag(tp, NVRAM)) {
3169 if (tp->nvram_lock_cnt > 0)
3170 tp->nvram_lock_cnt--;
3171 if (tp->nvram_lock_cnt == 0)
3172 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3176 /* tp->lock is held. */
3177 static void tg3_enable_nvram_access(struct tg3 *tp)
3179 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3180 u32 nvaccess = tr32(NVRAM_ACCESS);
3182 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3186 /* tp->lock is held. */
3187 static void tg3_disable_nvram_access(struct tg3 *tp)
3189 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3190 u32 nvaccess = tr32(NVRAM_ACCESS);
3192 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3196 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 *val)
3199 u32 tmp;
3200 int i;
3202 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3203 return -EINVAL;
3205 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3206 EEPROM_ADDR_DEVID_MASK |
3207 EEPROM_ADDR_READ);
3208 tw32(GRC_EEPROM_ADDR,
3209 tmp |
3210 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3211 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3212 EEPROM_ADDR_ADDR_MASK) |
3213 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3215 for (i = 0; i < 1000; i++) {
3216 tmp = tr32(GRC_EEPROM_ADDR);
3218 if (tmp & EEPROM_ADDR_COMPLETE)
3219 break;
3220 msleep(1);
3222 if (!(tmp & EEPROM_ADDR_COMPLETE))
3223 return -EBUSY;
3225 tmp = tr32(GRC_EEPROM_DATA);
3228 * The data will always be opposite the native endian
3229 * format. Perform a blind byteswap to compensate.
3231 *val = swab32(tmp);
3233 return 0;
3236 #define NVRAM_CMD_TIMEOUT 10000
3238 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3240 int i;
3242 tw32(NVRAM_CMD, nvram_cmd);
3243 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3244 usleep_range(10, 40);
3245 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3246 udelay(10);
3247 break;
3251 if (i == NVRAM_CMD_TIMEOUT)
3252 return -EBUSY;
3254 return 0;
3257 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3259 if (tg3_flag(tp, NVRAM) &&
3260 tg3_flag(tp, NVRAM_BUFFERED) &&
3261 tg3_flag(tp, FLASH) &&
3262 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3263 (tp->nvram_jedecnum == JEDEC_ATMEL))
3265 addr = ((addr / tp->nvram_pagesize) <<
3266 ATMEL_AT45DB0X1B_PAGE_POS) +
3267 (addr % tp->nvram_pagesize);
3269 return addr;
3272 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3274 if (tg3_flag(tp, NVRAM) &&
3275 tg3_flag(tp, NVRAM_BUFFERED) &&
3276 tg3_flag(tp, FLASH) &&
3277 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3278 (tp->nvram_jedecnum == JEDEC_ATMEL))
3280 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3281 tp->nvram_pagesize) +
3282 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3284 return addr;
3287 /* NOTE: Data read in from NVRAM is byteswapped according to
3288 * the byteswapping settings for all other register accesses.
3289 * tg3 devices are BE devices, so on a BE machine, the data
3290 * returned will be exactly as it is seen in NVRAM. On a LE
3291 * machine, the 32-bit value will be byteswapped.
3293 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3295 int ret;
3297 if (!tg3_flag(tp, NVRAM))
3298 return tg3_nvram_read_using_eeprom(tp, offset, val);
3300 offset = tg3_nvram_phys_addr(tp, offset);
3302 if (offset > NVRAM_ADDR_MSK)
3303 return -EINVAL;
3305 ret = tg3_nvram_lock(tp);
3306 if (ret)
3307 return ret;
3309 tg3_enable_nvram_access(tp);
3311 tw32(NVRAM_ADDR, offset);
3312 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3313 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3315 if (ret == 0)
3316 *val = tr32(NVRAM_RDDATA);
3318 tg3_disable_nvram_access(tp);
3320 tg3_nvram_unlock(tp);
3322 return ret;
3325 /* Ensures NVRAM data is in bytestream format. */
3326 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3328 u32 v;
3329 int res = tg3_nvram_read(tp, offset, &v);
3330 if (!res)
3331 *val = cpu_to_be32(v);
3332 return res;
3335 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3336 u32 offset, u32 len, u8 *buf)
3338 int i, j, rc = 0;
3339 u32 val;
3341 for (i = 0; i < len; i += 4) {
3342 u32 addr;
3343 __be32 data;
3345 addr = offset + i;
3347 memcpy(&data, buf + i, 4);
3350 * The SEEPROM interface expects the data to always be opposite
3351 * the native endian format. We accomplish this by reversing
3352 * all the operations that would have been performed on the
3353 * data from a call to tg3_nvram_read_be32().
3355 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3357 val = tr32(GRC_EEPROM_ADDR);
3358 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3360 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3361 EEPROM_ADDR_READ);
3362 tw32(GRC_EEPROM_ADDR, val |
3363 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3364 (addr & EEPROM_ADDR_ADDR_MASK) |
3365 EEPROM_ADDR_START |
3366 EEPROM_ADDR_WRITE);
3368 for (j = 0; j < 1000; j++) {
3369 val = tr32(GRC_EEPROM_ADDR);
3371 if (val & EEPROM_ADDR_COMPLETE)
3372 break;
3373 msleep(1);
3375 if (!(val & EEPROM_ADDR_COMPLETE)) {
3376 rc = -EBUSY;
3377 break;
3381 return rc;
3384 /* offset and length are dword aligned */
3385 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3386 u8 *buf)
3388 int ret = 0;
3389 u32 pagesize = tp->nvram_pagesize;
3390 u32 pagemask = pagesize - 1;
3391 u32 nvram_cmd;
3392 u8 *tmp;
3394 tmp = kmalloc(pagesize, GFP_KERNEL);
3395 if (tmp == NULL)
3396 return -ENOMEM;
3398 while (len) {
3399 int j;
3400 u32 phy_addr, page_off, size;
3402 phy_addr = offset & ~pagemask;
3404 for (j = 0; j < pagesize; j += 4) {
3405 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3406 (__be32 *) (tmp + j));
3407 if (ret)
3408 break;
3410 if (ret)
3411 break;
3413 page_off = offset & pagemask;
3414 size = pagesize;
3415 if (len < size)
3416 size = len;
3418 len -= size;
3420 memcpy(tmp + page_off, buf, size);
3422 offset = offset + (pagesize - page_off);
3424 tg3_enable_nvram_access(tp);
3427 * Before we can erase the flash page, we need
3428 * to issue a special "write enable" command.
3430 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 break;
3435 /* Erase the target page */
3436 tw32(NVRAM_ADDR, phy_addr);
3438 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3439 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3441 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442 break;
3444 /* Issue another write enable to start the write. */
3445 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3447 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3448 break;
3450 for (j = 0; j < pagesize; j += 4) {
3451 __be32 data;
3453 data = *((__be32 *) (tmp + j));
3455 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3457 tw32(NVRAM_ADDR, phy_addr + j);
3459 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3460 NVRAM_CMD_WR;
3462 if (j == 0)
3463 nvram_cmd |= NVRAM_CMD_FIRST;
3464 else if (j == (pagesize - 4))
3465 nvram_cmd |= NVRAM_CMD_LAST;
3467 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3468 if (ret)
3469 break;
3471 if (ret)
3472 break;
3475 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3476 tg3_nvram_exec_cmd(tp, nvram_cmd);
3478 kfree(tmp);
3480 return ret;
3483 /* offset and length are dword aligned */
3484 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3485 u8 *buf)
3487 int i, ret = 0;
3489 for (i = 0; i < len; i += 4, offset += 4) {
3490 u32 page_off, phy_addr, nvram_cmd;
3491 __be32 data;
3493 memcpy(&data, buf + i, 4);
3494 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3496 page_off = offset % tp->nvram_pagesize;
3498 phy_addr = tg3_nvram_phys_addr(tp, offset);
3500 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3502 if (page_off == 0 || i == 0)
3503 nvram_cmd |= NVRAM_CMD_FIRST;
3504 if (page_off == (tp->nvram_pagesize - 4))
3505 nvram_cmd |= NVRAM_CMD_LAST;
3507 if (i == (len - 4))
3508 nvram_cmd |= NVRAM_CMD_LAST;
3510 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3511 !tg3_flag(tp, FLASH) ||
3512 !tg3_flag(tp, 57765_PLUS))
3513 tw32(NVRAM_ADDR, phy_addr);
3515 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3516 !tg3_flag(tp, 5755_PLUS) &&
3517 (tp->nvram_jedecnum == JEDEC_ST) &&
3518 (nvram_cmd & NVRAM_CMD_FIRST)) {
3519 u32 cmd;
3521 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3522 ret = tg3_nvram_exec_cmd(tp, cmd);
3523 if (ret)
3524 break;
3526 if (!tg3_flag(tp, FLASH)) {
3527 /* We always do complete word writes to eeprom. */
3528 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3531 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3532 if (ret)
3533 break;
3535 return ret;
3538 /* offset and length are dword aligned */
3539 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3541 int ret;
3543 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3544 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3545 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3546 udelay(40);
3549 if (!tg3_flag(tp, NVRAM)) {
3550 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3551 } else {
3552 u32 grc_mode;
3554 ret = tg3_nvram_lock(tp);
3555 if (ret)
3556 return ret;
3558 tg3_enable_nvram_access(tp);
3559 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3560 tw32(NVRAM_WRITE1, 0x406);
3562 grc_mode = tr32(GRC_MODE);
3563 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3565 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3566 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3567 buf);
3568 } else {
3569 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3570 buf);
3573 grc_mode = tr32(GRC_MODE);
3574 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3576 tg3_disable_nvram_access(tp);
3577 tg3_nvram_unlock(tp);
3580 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3581 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3582 udelay(40);
3585 return ret;
3588 #define RX_CPU_SCRATCH_BASE 0x30000
3589 #define RX_CPU_SCRATCH_SIZE 0x04000
3590 #define TX_CPU_SCRATCH_BASE 0x34000
3591 #define TX_CPU_SCRATCH_SIZE 0x04000
3593 /* tp->lock is held. */
3594 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3596 int i;
3597 const int iters = 10000;
3599 for (i = 0; i < iters; i++) {
3600 tw32(cpu_base + CPU_STATE, 0xffffffff);
3601 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3602 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3603 break;
3604 if (pci_channel_offline(tp->pdev))
3605 return -EBUSY;
3608 return (i == iters) ? -EBUSY : 0;
3611 /* tp->lock is held. */
3612 static int tg3_rxcpu_pause(struct tg3 *tp)
3614 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3616 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3617 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3618 udelay(10);
3620 return rc;
3623 /* tp->lock is held. */
3624 static int tg3_txcpu_pause(struct tg3 *tp)
3626 return tg3_pause_cpu(tp, TX_CPU_BASE);
3629 /* tp->lock is held. */
3630 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3632 tw32(cpu_base + CPU_STATE, 0xffffffff);
3633 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3636 /* tp->lock is held. */
3637 static void tg3_rxcpu_resume(struct tg3 *tp)
3639 tg3_resume_cpu(tp, RX_CPU_BASE);
3642 /* tp->lock is held. */
3643 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3645 int rc;
3647 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3649 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3650 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3652 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3653 return 0;
3655 if (cpu_base == RX_CPU_BASE) {
3656 rc = tg3_rxcpu_pause(tp);
3657 } else {
3659 * There is only an Rx CPU for the 5750 derivative in the
3660 * BCM4785.
3662 if (tg3_flag(tp, IS_SSB_CORE))
3663 return 0;
3665 rc = tg3_txcpu_pause(tp);
3668 if (rc) {
3669 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3670 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3671 return -ENODEV;
3674 /* Clear firmware's nvram arbitration. */
3675 if (tg3_flag(tp, NVRAM))
3676 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3677 return 0;
3680 static int tg3_fw_data_len(struct tg3 *tp,
3681 const struct tg3_firmware_hdr *fw_hdr)
3683 int fw_len;
3685 /* Non fragmented firmware have one firmware header followed by a
3686 * contiguous chunk of data to be written. The length field in that
3687 * header is not the length of data to be written but the complete
3688 * length of the bss. The data length is determined based on
3689 * tp->fw->size minus headers.
3691 * Fragmented firmware have a main header followed by multiple
3692 * fragments. Each fragment is identical to non fragmented firmware
3693 * with a firmware header followed by a contiguous chunk of data. In
3694 * the main header, the length field is unused and set to 0xffffffff.
3695 * In each fragment header the length is the entire size of that
3696 * fragment i.e. fragment data + header length. Data length is
3697 * therefore length field in the header minus TG3_FW_HDR_LEN.
3699 if (tp->fw_len == 0xffffffff)
3700 fw_len = be32_to_cpu(fw_hdr->len);
3701 else
3702 fw_len = tp->fw->size;
3704 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3707 /* tp->lock is held. */
3708 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3709 u32 cpu_scratch_base, int cpu_scratch_size,
3710 const struct tg3_firmware_hdr *fw_hdr)
3712 int err, i;
3713 void (*write_op)(struct tg3 *, u32, u32);
3714 int total_len = tp->fw->size;
3716 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3717 netdev_err(tp->dev,
3718 "%s: Trying to load TX cpu firmware which is 5705\n",
3719 __func__);
3720 return -EINVAL;
3723 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3724 write_op = tg3_write_mem;
3725 else
3726 write_op = tg3_write_indirect_reg32;
3728 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3729 /* It is possible that bootcode is still loading at this point.
3730 * Get the nvram lock first before halting the cpu.
3732 int lock_err = tg3_nvram_lock(tp);
3733 err = tg3_halt_cpu(tp, cpu_base);
3734 if (!lock_err)
3735 tg3_nvram_unlock(tp);
3736 if (err)
3737 goto out;
3739 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3740 write_op(tp, cpu_scratch_base + i, 0);
3741 tw32(cpu_base + CPU_STATE, 0xffffffff);
3742 tw32(cpu_base + CPU_MODE,
3743 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3744 } else {
3745 /* Subtract additional main header for fragmented firmware and
3746 * advance to the first fragment
3748 total_len -= TG3_FW_HDR_LEN;
3749 fw_hdr++;
3752 do {
3753 u32 *fw_data = (u32 *)(fw_hdr + 1);
3754 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3755 write_op(tp, cpu_scratch_base +
3756 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3757 (i * sizeof(u32)),
3758 be32_to_cpu(fw_data[i]));
3760 total_len -= be32_to_cpu(fw_hdr->len);
3762 /* Advance to next fragment */
3763 fw_hdr = (struct tg3_firmware_hdr *)
3764 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3765 } while (total_len > 0);
3767 err = 0;
3769 out:
3770 return err;
3773 /* tp->lock is held. */
3774 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3776 int i;
3777 const int iters = 5;
3779 tw32(cpu_base + CPU_STATE, 0xffffffff);
3780 tw32_f(cpu_base + CPU_PC, pc);
3782 for (i = 0; i < iters; i++) {
3783 if (tr32(cpu_base + CPU_PC) == pc)
3784 break;
3785 tw32(cpu_base + CPU_STATE, 0xffffffff);
3786 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3787 tw32_f(cpu_base + CPU_PC, pc);
3788 udelay(1000);
3791 return (i == iters) ? -EBUSY : 0;
3794 /* tp->lock is held. */
3795 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3797 const struct tg3_firmware_hdr *fw_hdr;
3798 int err;
3800 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3802 /* Firmware blob starts with version numbers, followed by
3803 start address and length. We are setting complete length.
3804 length = end_address_of_bss - start_address_of_text.
3805 Remainder is the blob to be loaded contiguously
3806 from start address. */
3808 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3809 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3810 fw_hdr);
3811 if (err)
3812 return err;
3814 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3815 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3816 fw_hdr);
3817 if (err)
3818 return err;
3820 /* Now startup only the RX cpu. */
3821 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3822 be32_to_cpu(fw_hdr->base_addr));
3823 if (err) {
3824 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3825 "should be %08x\n", __func__,
3826 tr32(RX_CPU_BASE + CPU_PC),
3827 be32_to_cpu(fw_hdr->base_addr));
3828 return -ENODEV;
3831 tg3_rxcpu_resume(tp);
3833 return 0;
3836 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3838 const int iters = 1000;
3839 int i;
3840 u32 val;
3842 /* Wait for boot code to complete initialization and enter service
3843 * loop. It is then safe to download service patches
3845 for (i = 0; i < iters; i++) {
3846 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3847 break;
3849 udelay(10);
3852 if (i == iters) {
3853 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3854 return -EBUSY;
3857 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3858 if (val & 0xff) {
3859 netdev_warn(tp->dev,
3860 "Other patches exist. Not downloading EEE patch\n");
3861 return -EEXIST;
3864 return 0;
3867 /* tp->lock is held. */
3868 static void tg3_load_57766_firmware(struct tg3 *tp)
3870 struct tg3_firmware_hdr *fw_hdr;
3872 if (!tg3_flag(tp, NO_NVRAM))
3873 return;
3875 if (tg3_validate_rxcpu_state(tp))
3876 return;
3878 if (!tp->fw)
3879 return;
3881 /* This firmware blob has a different format than older firmware
3882 * releases as given below. The main difference is we have fragmented
3883 * data to be written to non-contiguous locations.
3885 * In the beginning we have a firmware header identical to other
3886 * firmware which consists of version, base addr and length. The length
3887 * here is unused and set to 0xffffffff.
3889 * This is followed by a series of firmware fragments which are
3890 * individually identical to previous firmware. i.e. they have the
3891 * firmware header and followed by data for that fragment. The version
3892 * field of the individual fragment header is unused.
3895 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3896 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3897 return;
3899 if (tg3_rxcpu_pause(tp))
3900 return;
3902 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3903 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3905 tg3_rxcpu_resume(tp);
3908 /* tp->lock is held. */
3909 static int tg3_load_tso_firmware(struct tg3 *tp)
3911 const struct tg3_firmware_hdr *fw_hdr;
3912 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3913 int err;
3915 if (!tg3_flag(tp, FW_TSO))
3916 return 0;
3918 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3920 /* Firmware blob starts with version numbers, followed by
3921 start address and length. We are setting complete length.
3922 length = end_address_of_bss - start_address_of_text.
3923 Remainder is the blob to be loaded contiguously
3924 from start address. */
3926 cpu_scratch_size = tp->fw_len;
3928 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3929 cpu_base = RX_CPU_BASE;
3930 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3931 } else {
3932 cpu_base = TX_CPU_BASE;
3933 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3934 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3937 err = tg3_load_firmware_cpu(tp, cpu_base,
3938 cpu_scratch_base, cpu_scratch_size,
3939 fw_hdr);
3940 if (err)
3941 return err;
3943 /* Now startup the cpu. */
3944 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3945 be32_to_cpu(fw_hdr->base_addr));
3946 if (err) {
3947 netdev_err(tp->dev,
3948 "%s fails to set CPU PC, is %08x should be %08x\n",
3949 __func__, tr32(cpu_base + CPU_PC),
3950 be32_to_cpu(fw_hdr->base_addr));
3951 return -ENODEV;
3954 tg3_resume_cpu(tp, cpu_base);
3955 return 0;
3958 /* tp->lock is held. */
3959 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3961 u32 addr_high, addr_low;
3963 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3964 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3965 (mac_addr[4] << 8) | mac_addr[5]);
3967 if (index < 4) {
3968 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3969 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3970 } else {
3971 index -= 4;
3972 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3973 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3977 /* tp->lock is held. */
3978 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3980 u32 addr_high;
3981 int i;
3983 for (i = 0; i < 4; i++) {
3984 if (i == 1 && skip_mac_1)
3985 continue;
3986 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3989 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3990 tg3_asic_rev(tp) == ASIC_REV_5704) {
3991 for (i = 4; i < 16; i++)
3992 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3995 addr_high = (tp->dev->dev_addr[0] +
3996 tp->dev->dev_addr[1] +
3997 tp->dev->dev_addr[2] +
3998 tp->dev->dev_addr[3] +
3999 tp->dev->dev_addr[4] +
4000 tp->dev->dev_addr[5]) &
4001 TX_BACKOFF_SEED_MASK;
4002 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4005 static void tg3_enable_register_access(struct tg3 *tp)
4008 * Make sure register accesses (indirect or otherwise) will function
4009 * correctly.
4011 pci_write_config_dword(tp->pdev,
4012 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4015 static int tg3_power_up(struct tg3 *tp)
4017 int err;
4019 tg3_enable_register_access(tp);
4021 err = pci_set_power_state(tp->pdev, PCI_D0);
4022 if (!err) {
4023 /* Switch out of Vaux if it is a NIC */
4024 tg3_pwrsrc_switch_to_vmain(tp);
4025 } else {
4026 netdev_err(tp->dev, "Transition to D0 failed\n");
4029 return err;
4032 static int tg3_setup_phy(struct tg3 *, bool);
4034 static int tg3_power_down_prepare(struct tg3 *tp)
4036 u32 misc_host_ctrl;
4037 bool device_should_wake, do_low_power;
4039 tg3_enable_register_access(tp);
4041 /* Restore the CLKREQ setting. */
4042 if (tg3_flag(tp, CLKREQ_BUG))
4043 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4044 PCI_EXP_LNKCTL_CLKREQ_EN);
4046 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4047 tw32(TG3PCI_MISC_HOST_CTRL,
4048 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4050 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4051 tg3_flag(tp, WOL_ENABLE);
4053 if (tg3_flag(tp, USE_PHYLIB)) {
4054 do_low_power = false;
4055 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4056 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4057 struct phy_device *phydev;
4058 u32 phyid, advertising;
4060 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4062 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4064 tp->link_config.speed = phydev->speed;
4065 tp->link_config.duplex = phydev->duplex;
4066 tp->link_config.autoneg = phydev->autoneg;
4067 tp->link_config.advertising = phydev->advertising;
4069 advertising = ADVERTISED_TP |
4070 ADVERTISED_Pause |
4071 ADVERTISED_Autoneg |
4072 ADVERTISED_10baseT_Half;
4074 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4075 if (tg3_flag(tp, WOL_SPEED_100MB))
4076 advertising |=
4077 ADVERTISED_100baseT_Half |
4078 ADVERTISED_100baseT_Full |
4079 ADVERTISED_10baseT_Full;
4080 else
4081 advertising |= ADVERTISED_10baseT_Full;
4084 phydev->advertising = advertising;
4086 phy_start_aneg(phydev);
4088 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4089 if (phyid != PHY_ID_BCMAC131) {
4090 phyid &= PHY_BCM_OUI_MASK;
4091 if (phyid == PHY_BCM_OUI_1 ||
4092 phyid == PHY_BCM_OUI_2 ||
4093 phyid == PHY_BCM_OUI_3)
4094 do_low_power = true;
4097 } else {
4098 do_low_power = true;
4100 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4101 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4103 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4104 tg3_setup_phy(tp, false);
4107 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4108 u32 val;
4110 val = tr32(GRC_VCPU_EXT_CTRL);
4111 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4112 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4113 int i;
4114 u32 val;
4116 for (i = 0; i < 200; i++) {
4117 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4118 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4119 break;
4120 msleep(1);
4123 if (tg3_flag(tp, WOL_CAP))
4124 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4125 WOL_DRV_STATE_SHUTDOWN |
4126 WOL_DRV_WOL |
4127 WOL_SET_MAGIC_PKT);
4129 if (device_should_wake) {
4130 u32 mac_mode;
4132 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4133 if (do_low_power &&
4134 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4135 tg3_phy_auxctl_write(tp,
4136 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4137 MII_TG3_AUXCTL_PCTL_WOL_EN |
4138 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4139 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4140 udelay(40);
4143 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4144 mac_mode = MAC_MODE_PORT_MODE_GMII;
4145 else if (tp->phy_flags &
4146 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4147 if (tp->link_config.active_speed == SPEED_1000)
4148 mac_mode = MAC_MODE_PORT_MODE_GMII;
4149 else
4150 mac_mode = MAC_MODE_PORT_MODE_MII;
4151 } else
4152 mac_mode = MAC_MODE_PORT_MODE_MII;
4154 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4155 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4156 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4157 SPEED_100 : SPEED_10;
4158 if (tg3_5700_link_polarity(tp, speed))
4159 mac_mode |= MAC_MODE_LINK_POLARITY;
4160 else
4161 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4163 } else {
4164 mac_mode = MAC_MODE_PORT_MODE_TBI;
4167 if (!tg3_flag(tp, 5750_PLUS))
4168 tw32(MAC_LED_CTRL, tp->led_ctrl);
4170 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4171 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4172 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4173 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4175 if (tg3_flag(tp, ENABLE_APE))
4176 mac_mode |= MAC_MODE_APE_TX_EN |
4177 MAC_MODE_APE_RX_EN |
4178 MAC_MODE_TDE_ENABLE;
4180 tw32_f(MAC_MODE, mac_mode);
4181 udelay(100);
4183 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4184 udelay(10);
4187 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4188 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4190 u32 base_val;
4192 base_val = tp->pci_clock_ctrl;
4193 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4194 CLOCK_CTRL_TXCLK_DISABLE);
4196 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4197 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4198 } else if (tg3_flag(tp, 5780_CLASS) ||
4199 tg3_flag(tp, CPMU_PRESENT) ||
4200 tg3_asic_rev(tp) == ASIC_REV_5906) {
4201 /* do nothing */
4202 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4203 u32 newbits1, newbits2;
4205 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4206 tg3_asic_rev(tp) == ASIC_REV_5701) {
4207 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4208 CLOCK_CTRL_TXCLK_DISABLE |
4209 CLOCK_CTRL_ALTCLK);
4210 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4211 } else if (tg3_flag(tp, 5705_PLUS)) {
4212 newbits1 = CLOCK_CTRL_625_CORE;
4213 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4214 } else {
4215 newbits1 = CLOCK_CTRL_ALTCLK;
4216 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4220 40);
4222 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4223 40);
4225 if (!tg3_flag(tp, 5705_PLUS)) {
4226 u32 newbits3;
4228 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4229 tg3_asic_rev(tp) == ASIC_REV_5701) {
4230 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4231 CLOCK_CTRL_TXCLK_DISABLE |
4232 CLOCK_CTRL_44MHZ_CORE);
4233 } else {
4234 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4237 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4238 tp->pci_clock_ctrl | newbits3, 40);
4242 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4243 tg3_power_down_phy(tp, do_low_power);
4245 tg3_frob_aux_power(tp, true);
4247 /* Workaround for unstable PLL clock */
4248 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4249 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4250 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4251 u32 val = tr32(0x7d00);
4253 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4254 tw32(0x7d00, val);
4255 if (!tg3_flag(tp, ENABLE_ASF)) {
4256 int err;
4258 err = tg3_nvram_lock(tp);
4259 tg3_halt_cpu(tp, RX_CPU_BASE);
4260 if (!err)
4261 tg3_nvram_unlock(tp);
4265 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4267 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269 return 0;
4272 static void tg3_power_down(struct tg3 *tp)
4274 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4275 pci_set_power_state(tp->pdev, PCI_D3hot);
4278 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4280 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4281 case MII_TG3_AUX_STAT_10HALF:
4282 *speed = SPEED_10;
4283 *duplex = DUPLEX_HALF;
4284 break;
4286 case MII_TG3_AUX_STAT_10FULL:
4287 *speed = SPEED_10;
4288 *duplex = DUPLEX_FULL;
4289 break;
4291 case MII_TG3_AUX_STAT_100HALF:
4292 *speed = SPEED_100;
4293 *duplex = DUPLEX_HALF;
4294 break;
4296 case MII_TG3_AUX_STAT_100FULL:
4297 *speed = SPEED_100;
4298 *duplex = DUPLEX_FULL;
4299 break;
4301 case MII_TG3_AUX_STAT_1000HALF:
4302 *speed = SPEED_1000;
4303 *duplex = DUPLEX_HALF;
4304 break;
4306 case MII_TG3_AUX_STAT_1000FULL:
4307 *speed = SPEED_1000;
4308 *duplex = DUPLEX_FULL;
4309 break;
4311 default:
4312 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4313 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4314 SPEED_10;
4315 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4316 DUPLEX_HALF;
4317 break;
4319 *speed = SPEED_UNKNOWN;
4320 *duplex = DUPLEX_UNKNOWN;
4321 break;
4325 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 int err = 0;
4328 u32 val, new_adv;
4330 new_adv = ADVERTISE_CSMA;
4331 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4332 new_adv |= mii_advertise_flowctrl(flowctrl);
4334 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4335 if (err)
4336 goto done;
4338 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4339 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4341 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4342 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4343 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4345 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4346 if (err)
4347 goto done;
4350 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4351 goto done;
4353 tw32(TG3_CPMU_EEE_MODE,
4354 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4356 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4357 if (!err) {
4358 u32 err2;
4360 val = 0;
4361 /* Advertise 100-BaseTX EEE ability */
4362 if (advertise & ADVERTISED_100baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_100TX;
4364 /* Advertise 1000-BaseT EEE ability */
4365 if (advertise & ADVERTISED_1000baseT_Full)
4366 val |= MDIO_AN_EEE_ADV_1000T;
4368 if (!tp->eee.eee_enabled) {
4369 val = 0;
4370 tp->eee.advertised = 0;
4371 } else {
4372 tp->eee.advertised = advertise &
4373 (ADVERTISED_100baseT_Full |
4374 ADVERTISED_1000baseT_Full);
4377 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4378 if (err)
4379 val = 0;
4381 switch (tg3_asic_rev(tp)) {
4382 case ASIC_REV_5717:
4383 case ASIC_REV_57765:
4384 case ASIC_REV_57766:
4385 case ASIC_REV_5719:
4386 /* If we advertised any eee advertisements above... */
4387 if (val)
4388 val = MII_TG3_DSP_TAP26_ALNOKO |
4389 MII_TG3_DSP_TAP26_RMRXSTO |
4390 MII_TG3_DSP_TAP26_OPCSINPT;
4391 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4392 /* Fall through */
4393 case ASIC_REV_5720:
4394 case ASIC_REV_5762:
4395 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4396 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4397 MII_TG3_DSP_CH34TP2_HIBW01);
4400 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4401 if (!err)
4402 err = err2;
4405 done:
4406 return err;
4409 static void tg3_phy_copper_begin(struct tg3 *tp)
4411 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4412 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4413 u32 adv, fc;
4415 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4416 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4417 adv = ADVERTISED_10baseT_Half |
4418 ADVERTISED_10baseT_Full;
4419 if (tg3_flag(tp, WOL_SPEED_100MB))
4420 adv |= ADVERTISED_100baseT_Half |
4421 ADVERTISED_100baseT_Full;
4422 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4423 if (!(tp->phy_flags &
4424 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4425 adv |= ADVERTISED_1000baseT_Half;
4426 adv |= ADVERTISED_1000baseT_Full;
4429 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4430 } else {
4431 adv = tp->link_config.advertising;
4432 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4433 adv &= ~(ADVERTISED_1000baseT_Half |
4434 ADVERTISED_1000baseT_Full);
4436 fc = tp->link_config.flowctrl;
4439 tg3_phy_autoneg_cfg(tp, adv, fc);
4441 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4442 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4443 /* Normally during power down we want to autonegotiate
4444 * the lowest possible speed for WOL. However, to avoid
4445 * link flap, we leave it untouched.
4447 return;
4450 tg3_writephy(tp, MII_BMCR,
4451 BMCR_ANENABLE | BMCR_ANRESTART);
4452 } else {
4453 int i;
4454 u32 bmcr, orig_bmcr;
4456 tp->link_config.active_speed = tp->link_config.speed;
4457 tp->link_config.active_duplex = tp->link_config.duplex;
4459 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4460 /* With autoneg disabled, 5715 only links up when the
4461 * advertisement register has the configured speed
4462 * enabled.
4464 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4467 bmcr = 0;
4468 switch (tp->link_config.speed) {
4469 default:
4470 case SPEED_10:
4471 break;
4473 case SPEED_100:
4474 bmcr |= BMCR_SPEED100;
4475 break;
4477 case SPEED_1000:
4478 bmcr |= BMCR_SPEED1000;
4479 break;
4482 if (tp->link_config.duplex == DUPLEX_FULL)
4483 bmcr |= BMCR_FULLDPLX;
4485 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4486 (bmcr != orig_bmcr)) {
4487 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4488 for (i = 0; i < 1500; i++) {
4489 u32 tmp;
4491 udelay(10);
4492 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4493 tg3_readphy(tp, MII_BMSR, &tmp))
4494 continue;
4495 if (!(tmp & BMSR_LSTATUS)) {
4496 udelay(40);
4497 break;
4500 tg3_writephy(tp, MII_BMCR, bmcr);
4501 udelay(40);
4506 static int tg3_phy_pull_config(struct tg3 *tp)
4508 int err;
4509 u32 val;
4511 err = tg3_readphy(tp, MII_BMCR, &val);
4512 if (err)
4513 goto done;
4515 if (!(val & BMCR_ANENABLE)) {
4516 tp->link_config.autoneg = AUTONEG_DISABLE;
4517 tp->link_config.advertising = 0;
4518 tg3_flag_clear(tp, PAUSE_AUTONEG);
4520 err = -EIO;
4522 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4523 case 0:
4524 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525 goto done;
4527 tp->link_config.speed = SPEED_10;
4528 break;
4529 case BMCR_SPEED100:
4530 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4531 goto done;
4533 tp->link_config.speed = SPEED_100;
4534 break;
4535 case BMCR_SPEED1000:
4536 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4537 tp->link_config.speed = SPEED_1000;
4538 break;
4540 /* Fall through */
4541 default:
4542 goto done;
4545 if (val & BMCR_FULLDPLX)
4546 tp->link_config.duplex = DUPLEX_FULL;
4547 else
4548 tp->link_config.duplex = DUPLEX_HALF;
4550 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552 err = 0;
4553 goto done;
4556 tp->link_config.autoneg = AUTONEG_ENABLE;
4557 tp->link_config.advertising = ADVERTISED_Autoneg;
4558 tg3_flag_set(tp, PAUSE_AUTONEG);
4560 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4561 u32 adv;
4563 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4564 if (err)
4565 goto done;
4567 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4568 tp->link_config.advertising |= adv | ADVERTISED_TP;
4570 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4571 } else {
4572 tp->link_config.advertising |= ADVERTISED_FIBRE;
4575 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4576 u32 adv;
4578 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4579 err = tg3_readphy(tp, MII_CTRL1000, &val);
4580 if (err)
4581 goto done;
4583 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4584 } else {
4585 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4586 if (err)
4587 goto done;
4589 adv = tg3_decode_flowctrl_1000X(val);
4590 tp->link_config.flowctrl = adv;
4592 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4593 adv = mii_adv_to_ethtool_adv_x(val);
4596 tp->link_config.advertising |= adv;
4599 done:
4600 return err;
4603 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4605 int err;
4607 /* Turn off tap power management. */
4608 /* Set Extended packet length bit */
4609 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4611 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4612 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4613 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4614 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4615 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4617 udelay(40);
4619 return err;
4622 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4624 struct ethtool_eee eee;
4626 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4627 return true;
4629 tg3_eee_pull_config(tp, &eee);
4631 if (tp->eee.eee_enabled) {
4632 if (tp->eee.advertised != eee.advertised ||
4633 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4634 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4635 return false;
4636 } else {
4637 /* EEE is disabled but we're advertising */
4638 if (eee.advertised)
4639 return false;
4642 return true;
4645 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4647 u32 advmsk, tgtadv, advertising;
4649 advertising = tp->link_config.advertising;
4650 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4652 advmsk = ADVERTISE_ALL;
4653 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4654 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4655 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4658 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4659 return false;
4661 if ((*lcladv & advmsk) != tgtadv)
4662 return false;
4664 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4665 u32 tg3_ctrl;
4667 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4669 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4670 return false;
4672 if (tgtadv &&
4673 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4675 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4677 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4678 } else {
4679 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4682 if (tg3_ctrl != tgtadv)
4683 return false;
4686 return true;
4689 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4691 u32 lpeth = 0;
4693 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4694 u32 val;
4696 if (tg3_readphy(tp, MII_STAT1000, &val))
4697 return false;
4699 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4702 if (tg3_readphy(tp, MII_LPA, rmtadv))
4703 return false;
4705 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4706 tp->link_config.rmt_adv = lpeth;
4708 return true;
4711 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4713 if (curr_link_up != tp->link_up) {
4714 if (curr_link_up) {
4715 netif_carrier_on(tp->dev);
4716 } else {
4717 netif_carrier_off(tp->dev);
4718 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4719 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4722 tg3_link_report(tp);
4723 return true;
4726 return false;
4729 static void tg3_clear_mac_status(struct tg3 *tp)
4731 tw32(MAC_EVENT, 0);
4733 tw32_f(MAC_STATUS,
4734 MAC_STATUS_SYNC_CHANGED |
4735 MAC_STATUS_CFG_CHANGED |
4736 MAC_STATUS_MI_COMPLETION |
4737 MAC_STATUS_LNKSTATE_CHANGED);
4738 udelay(40);
4741 static void tg3_setup_eee(struct tg3 *tp)
4743 u32 val;
4745 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4746 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4747 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4748 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4750 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4752 tw32_f(TG3_CPMU_EEE_CTRL,
4753 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4755 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4756 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4757 TG3_CPMU_EEEMD_LPI_IN_RX |
4758 TG3_CPMU_EEEMD_EEE_ENABLE;
4760 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4761 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4763 if (tg3_flag(tp, ENABLE_APE))
4764 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4766 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4768 tw32_f(TG3_CPMU_EEE_DBTMR1,
4769 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4770 (tp->eee.tx_lpi_timer & 0xffff));
4772 tw32_f(TG3_CPMU_EEE_DBTMR2,
4773 TG3_CPMU_DBTMR2_APE_TX_2047US |
4774 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4777 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4779 bool current_link_up;
4780 u32 bmsr, val;
4781 u32 lcl_adv, rmt_adv;
4782 u16 current_speed;
4783 u8 current_duplex;
4784 int i, err;
4786 tg3_clear_mac_status(tp);
4788 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4789 tw32_f(MAC_MI_MODE,
4790 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4791 udelay(80);
4794 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4796 /* Some third-party PHYs need to be reset on link going
4797 * down.
4799 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4800 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4801 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4802 tp->link_up) {
4803 tg3_readphy(tp, MII_BMSR, &bmsr);
4804 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4805 !(bmsr & BMSR_LSTATUS))
4806 force_reset = true;
4808 if (force_reset)
4809 tg3_phy_reset(tp);
4811 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4812 tg3_readphy(tp, MII_BMSR, &bmsr);
4813 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4814 !tg3_flag(tp, INIT_COMPLETE))
4815 bmsr = 0;
4817 if (!(bmsr & BMSR_LSTATUS)) {
4818 err = tg3_init_5401phy_dsp(tp);
4819 if (err)
4820 return err;
4822 tg3_readphy(tp, MII_BMSR, &bmsr);
4823 for (i = 0; i < 1000; i++) {
4824 udelay(10);
4825 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4826 (bmsr & BMSR_LSTATUS)) {
4827 udelay(40);
4828 break;
4832 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4833 TG3_PHY_REV_BCM5401_B0 &&
4834 !(bmsr & BMSR_LSTATUS) &&
4835 tp->link_config.active_speed == SPEED_1000) {
4836 err = tg3_phy_reset(tp);
4837 if (!err)
4838 err = tg3_init_5401phy_dsp(tp);
4839 if (err)
4840 return err;
4843 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4844 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4845 /* 5701 {A0,B0} CRC bug workaround */
4846 tg3_writephy(tp, 0x15, 0x0a75);
4847 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4849 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 /* Clear pending interrupts... */
4853 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4854 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4857 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4858 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4859 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4861 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4862 tg3_asic_rev(tp) == ASIC_REV_5701) {
4863 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4865 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4866 else
4867 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4870 current_link_up = false;
4871 current_speed = SPEED_UNKNOWN;
4872 current_duplex = DUPLEX_UNKNOWN;
4873 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4874 tp->link_config.rmt_adv = 0;
4876 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4877 err = tg3_phy_auxctl_read(tp,
4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879 &val);
4880 if (!err && !(val & (1 << 10))) {
4881 tg3_phy_auxctl_write(tp,
4882 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4883 val | (1 << 10));
4884 goto relink;
4888 bmsr = 0;
4889 for (i = 0; i < 100; i++) {
4890 tg3_readphy(tp, MII_BMSR, &bmsr);
4891 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4892 (bmsr & BMSR_LSTATUS))
4893 break;
4894 udelay(40);
4897 if (bmsr & BMSR_LSTATUS) {
4898 u32 aux_stat, bmcr;
4900 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4901 for (i = 0; i < 2000; i++) {
4902 udelay(10);
4903 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4904 aux_stat)
4905 break;
4908 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4909 &current_speed,
4910 &current_duplex);
4912 bmcr = 0;
4913 for (i = 0; i < 200; i++) {
4914 tg3_readphy(tp, MII_BMCR, &bmcr);
4915 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4916 continue;
4917 if (bmcr && bmcr != 0x7fff)
4918 break;
4919 udelay(10);
4922 lcl_adv = 0;
4923 rmt_adv = 0;
4925 tp->link_config.active_speed = current_speed;
4926 tp->link_config.active_duplex = current_duplex;
4928 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4929 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4931 if ((bmcr & BMCR_ANENABLE) &&
4932 eee_config_ok &&
4933 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4934 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4935 current_link_up = true;
4937 /* EEE settings changes take effect only after a phy
4938 * reset. If we have skipped a reset due to Link Flap
4939 * Avoidance being enabled, do it now.
4941 if (!eee_config_ok &&
4942 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4943 !force_reset) {
4944 tg3_setup_eee(tp);
4945 tg3_phy_reset(tp);
4947 } else {
4948 if (!(bmcr & BMCR_ANENABLE) &&
4949 tp->link_config.speed == current_speed &&
4950 tp->link_config.duplex == current_duplex) {
4951 current_link_up = true;
4955 if (current_link_up &&
4956 tp->link_config.active_duplex == DUPLEX_FULL) {
4957 u32 reg, bit;
4959 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4960 reg = MII_TG3_FET_GEN_STAT;
4961 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4962 } else {
4963 reg = MII_TG3_EXT_STAT;
4964 bit = MII_TG3_EXT_STAT_MDIX;
4967 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4968 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4970 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4974 relink:
4975 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4976 tg3_phy_copper_begin(tp);
4978 if (tg3_flag(tp, ROBOSWITCH)) {
4979 current_link_up = true;
4980 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4981 current_speed = SPEED_1000;
4982 current_duplex = DUPLEX_FULL;
4983 tp->link_config.active_speed = current_speed;
4984 tp->link_config.active_duplex = current_duplex;
4987 tg3_readphy(tp, MII_BMSR, &bmsr);
4988 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4989 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4990 current_link_up = true;
4993 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4994 if (current_link_up) {
4995 if (tp->link_config.active_speed == SPEED_100 ||
4996 tp->link_config.active_speed == SPEED_10)
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998 else
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5001 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5002 else
5003 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5005 /* In order for the 5750 core in BCM4785 chip to work properly
5006 * in RGMII mode, the Led Control Register must be set up.
5008 if (tg3_flag(tp, RGMII_MODE)) {
5009 u32 led_ctrl = tr32(MAC_LED_CTRL);
5010 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5012 if (tp->link_config.active_speed == SPEED_10)
5013 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5014 else if (tp->link_config.active_speed == SPEED_100)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_100MBPS_ON);
5017 else if (tp->link_config.active_speed == SPEED_1000)
5018 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019 LED_CTRL_1000MBPS_ON);
5021 tw32(MAC_LED_CTRL, led_ctrl);
5022 udelay(40);
5025 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5026 if (tp->link_config.active_duplex == DUPLEX_HALF)
5027 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5029 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5030 if (current_link_up &&
5031 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5032 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5033 else
5034 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5037 /* ??? Without this setting Netgear GA302T PHY does not
5038 * ??? send/receive packets...
5040 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5041 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5042 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5043 tw32_f(MAC_MI_MODE, tp->mi_mode);
5044 udelay(80);
5047 tw32_f(MAC_MODE, tp->mac_mode);
5048 udelay(40);
5050 tg3_phy_eee_adjust(tp, current_link_up);
5052 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5053 /* Polled via timer. */
5054 tw32_f(MAC_EVENT, 0);
5055 } else {
5056 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058 udelay(40);
5060 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5061 current_link_up &&
5062 tp->link_config.active_speed == SPEED_1000 &&
5063 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5064 udelay(120);
5065 tw32_f(MAC_STATUS,
5066 (MAC_STATUS_SYNC_CHANGED |
5067 MAC_STATUS_CFG_CHANGED));
5068 udelay(40);
5069 tg3_write_mem(tp,
5070 NIC_SRAM_FIRMWARE_MBOX,
5071 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5074 /* Prevent send BD corruption. */
5075 if (tg3_flag(tp, CLKREQ_BUG)) {
5076 if (tp->link_config.active_speed == SPEED_100 ||
5077 tp->link_config.active_speed == SPEED_10)
5078 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 else
5081 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5082 PCI_EXP_LNKCTL_CLKREQ_EN);
5085 tg3_test_and_report_link_chg(tp, current_link_up);
5087 return 0;
5090 struct tg3_fiber_aneginfo {
5091 int state;
5092 #define ANEG_STATE_UNKNOWN 0
5093 #define ANEG_STATE_AN_ENABLE 1
5094 #define ANEG_STATE_RESTART_INIT 2
5095 #define ANEG_STATE_RESTART 3
5096 #define ANEG_STATE_DISABLE_LINK_OK 4
5097 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5098 #define ANEG_STATE_ABILITY_DETECT 6
5099 #define ANEG_STATE_ACK_DETECT_INIT 7
5100 #define ANEG_STATE_ACK_DETECT 8
5101 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5102 #define ANEG_STATE_COMPLETE_ACK 10
5103 #define ANEG_STATE_IDLE_DETECT_INIT 11
5104 #define ANEG_STATE_IDLE_DETECT 12
5105 #define ANEG_STATE_LINK_OK 13
5106 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5107 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5109 u32 flags;
5110 #define MR_AN_ENABLE 0x00000001
5111 #define MR_RESTART_AN 0x00000002
5112 #define MR_AN_COMPLETE 0x00000004
5113 #define MR_PAGE_RX 0x00000008
5114 #define MR_NP_LOADED 0x00000010
5115 #define MR_TOGGLE_TX 0x00000020
5116 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5117 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5118 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5119 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5120 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5121 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5122 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5123 #define MR_TOGGLE_RX 0x00002000
5124 #define MR_NP_RX 0x00004000
5126 #define MR_LINK_OK 0x80000000
5128 unsigned long link_time, cur_time;
5130 u32 ability_match_cfg;
5131 int ability_match_count;
5133 char ability_match, idle_match, ack_match;
5135 u32 txconfig, rxconfig;
5136 #define ANEG_CFG_NP 0x00000080
5137 #define ANEG_CFG_ACK 0x00000040
5138 #define ANEG_CFG_RF2 0x00000020
5139 #define ANEG_CFG_RF1 0x00000010
5140 #define ANEG_CFG_PS2 0x00000001
5141 #define ANEG_CFG_PS1 0x00008000
5142 #define ANEG_CFG_HD 0x00004000
5143 #define ANEG_CFG_FD 0x00002000
5144 #define ANEG_CFG_INVAL 0x00001f06
5147 #define ANEG_OK 0
5148 #define ANEG_DONE 1
5149 #define ANEG_TIMER_ENAB 2
5150 #define ANEG_FAILED -1
5152 #define ANEG_STATE_SETTLE_TIME 10000
5154 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5155 struct tg3_fiber_aneginfo *ap)
5157 u16 flowctrl;
5158 unsigned long delta;
5159 u32 rx_cfg_reg;
5160 int ret;
5162 if (ap->state == ANEG_STATE_UNKNOWN) {
5163 ap->rxconfig = 0;
5164 ap->link_time = 0;
5165 ap->cur_time = 0;
5166 ap->ability_match_cfg = 0;
5167 ap->ability_match_count = 0;
5168 ap->ability_match = 0;
5169 ap->idle_match = 0;
5170 ap->ack_match = 0;
5172 ap->cur_time++;
5174 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5175 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5177 if (rx_cfg_reg != ap->ability_match_cfg) {
5178 ap->ability_match_cfg = rx_cfg_reg;
5179 ap->ability_match = 0;
5180 ap->ability_match_count = 0;
5181 } else {
5182 if (++ap->ability_match_count > 1) {
5183 ap->ability_match = 1;
5184 ap->ability_match_cfg = rx_cfg_reg;
5187 if (rx_cfg_reg & ANEG_CFG_ACK)
5188 ap->ack_match = 1;
5189 else
5190 ap->ack_match = 0;
5192 ap->idle_match = 0;
5193 } else {
5194 ap->idle_match = 1;
5195 ap->ability_match_cfg = 0;
5196 ap->ability_match_count = 0;
5197 ap->ability_match = 0;
5198 ap->ack_match = 0;
5200 rx_cfg_reg = 0;
5203 ap->rxconfig = rx_cfg_reg;
5204 ret = ANEG_OK;
5206 switch (ap->state) {
5207 case ANEG_STATE_UNKNOWN:
5208 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5209 ap->state = ANEG_STATE_AN_ENABLE;
5211 /* fallthru */
5212 case ANEG_STATE_AN_ENABLE:
5213 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5214 if (ap->flags & MR_AN_ENABLE) {
5215 ap->link_time = 0;
5216 ap->cur_time = 0;
5217 ap->ability_match_cfg = 0;
5218 ap->ability_match_count = 0;
5219 ap->ability_match = 0;
5220 ap->idle_match = 0;
5221 ap->ack_match = 0;
5223 ap->state = ANEG_STATE_RESTART_INIT;
5224 } else {
5225 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5227 break;
5229 case ANEG_STATE_RESTART_INIT:
5230 ap->link_time = ap->cur_time;
5231 ap->flags &= ~(MR_NP_LOADED);
5232 ap->txconfig = 0;
5233 tw32(MAC_TX_AUTO_NEG, 0);
5234 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5235 tw32_f(MAC_MODE, tp->mac_mode);
5236 udelay(40);
5238 ret = ANEG_TIMER_ENAB;
5239 ap->state = ANEG_STATE_RESTART;
5241 /* fallthru */
5242 case ANEG_STATE_RESTART:
5243 delta = ap->cur_time - ap->link_time;
5244 if (delta > ANEG_STATE_SETTLE_TIME)
5245 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5246 else
5247 ret = ANEG_TIMER_ENAB;
5248 break;
5250 case ANEG_STATE_DISABLE_LINK_OK:
5251 ret = ANEG_DONE;
5252 break;
5254 case ANEG_STATE_ABILITY_DETECT_INIT:
5255 ap->flags &= ~(MR_TOGGLE_TX);
5256 ap->txconfig = ANEG_CFG_FD;
5257 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5258 if (flowctrl & ADVERTISE_1000XPAUSE)
5259 ap->txconfig |= ANEG_CFG_PS1;
5260 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5261 ap->txconfig |= ANEG_CFG_PS2;
5262 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5263 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5264 tw32_f(MAC_MODE, tp->mac_mode);
5265 udelay(40);
5267 ap->state = ANEG_STATE_ABILITY_DETECT;
5268 break;
5270 case ANEG_STATE_ABILITY_DETECT:
5271 if (ap->ability_match != 0 && ap->rxconfig != 0)
5272 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5273 break;
5275 case ANEG_STATE_ACK_DETECT_INIT:
5276 ap->txconfig |= ANEG_CFG_ACK;
5277 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5278 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5279 tw32_f(MAC_MODE, tp->mac_mode);
5280 udelay(40);
5282 ap->state = ANEG_STATE_ACK_DETECT;
5284 /* fallthru */
5285 case ANEG_STATE_ACK_DETECT:
5286 if (ap->ack_match != 0) {
5287 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5288 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5289 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5290 } else {
5291 ap->state = ANEG_STATE_AN_ENABLE;
5293 } else if (ap->ability_match != 0 &&
5294 ap->rxconfig == 0) {
5295 ap->state = ANEG_STATE_AN_ENABLE;
5297 break;
5299 case ANEG_STATE_COMPLETE_ACK_INIT:
5300 if (ap->rxconfig & ANEG_CFG_INVAL) {
5301 ret = ANEG_FAILED;
5302 break;
5304 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5305 MR_LP_ADV_HALF_DUPLEX |
5306 MR_LP_ADV_SYM_PAUSE |
5307 MR_LP_ADV_ASYM_PAUSE |
5308 MR_LP_ADV_REMOTE_FAULT1 |
5309 MR_LP_ADV_REMOTE_FAULT2 |
5310 MR_LP_ADV_NEXT_PAGE |
5311 MR_TOGGLE_RX |
5312 MR_NP_RX);
5313 if (ap->rxconfig & ANEG_CFG_FD)
5314 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5315 if (ap->rxconfig & ANEG_CFG_HD)
5316 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5317 if (ap->rxconfig & ANEG_CFG_PS1)
5318 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5319 if (ap->rxconfig & ANEG_CFG_PS2)
5320 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5321 if (ap->rxconfig & ANEG_CFG_RF1)
5322 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5323 if (ap->rxconfig & ANEG_CFG_RF2)
5324 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5325 if (ap->rxconfig & ANEG_CFG_NP)
5326 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5328 ap->link_time = ap->cur_time;
5330 ap->flags ^= (MR_TOGGLE_TX);
5331 if (ap->rxconfig & 0x0008)
5332 ap->flags |= MR_TOGGLE_RX;
5333 if (ap->rxconfig & ANEG_CFG_NP)
5334 ap->flags |= MR_NP_RX;
5335 ap->flags |= MR_PAGE_RX;
5337 ap->state = ANEG_STATE_COMPLETE_ACK;
5338 ret = ANEG_TIMER_ENAB;
5339 break;
5341 case ANEG_STATE_COMPLETE_ACK:
5342 if (ap->ability_match != 0 &&
5343 ap->rxconfig == 0) {
5344 ap->state = ANEG_STATE_AN_ENABLE;
5345 break;
5347 delta = ap->cur_time - ap->link_time;
5348 if (delta > ANEG_STATE_SETTLE_TIME) {
5349 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351 } else {
5352 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5353 !(ap->flags & MR_NP_RX)) {
5354 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355 } else {
5356 ret = ANEG_FAILED;
5360 break;
5362 case ANEG_STATE_IDLE_DETECT_INIT:
5363 ap->link_time = ap->cur_time;
5364 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5365 tw32_f(MAC_MODE, tp->mac_mode);
5366 udelay(40);
5368 ap->state = ANEG_STATE_IDLE_DETECT;
5369 ret = ANEG_TIMER_ENAB;
5370 break;
5372 case ANEG_STATE_IDLE_DETECT:
5373 if (ap->ability_match != 0 &&
5374 ap->rxconfig == 0) {
5375 ap->state = ANEG_STATE_AN_ENABLE;
5376 break;
5378 delta = ap->cur_time - ap->link_time;
5379 if (delta > ANEG_STATE_SETTLE_TIME) {
5380 /* XXX another gem from the Broadcom driver :( */
5381 ap->state = ANEG_STATE_LINK_OK;
5383 break;
5385 case ANEG_STATE_LINK_OK:
5386 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5387 ret = ANEG_DONE;
5388 break;
5390 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5391 /* ??? unimplemented */
5392 break;
5394 case ANEG_STATE_NEXT_PAGE_WAIT:
5395 /* ??? unimplemented */
5396 break;
5398 default:
5399 ret = ANEG_FAILED;
5400 break;
5403 return ret;
5406 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5408 int res = 0;
5409 struct tg3_fiber_aneginfo aninfo;
5410 int status = ANEG_FAILED;
5411 unsigned int tick;
5412 u32 tmp;
5414 tw32_f(MAC_TX_AUTO_NEG, 0);
5416 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5417 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5418 udelay(40);
5420 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5421 udelay(40);
5423 memset(&aninfo, 0, sizeof(aninfo));
5424 aninfo.flags |= MR_AN_ENABLE;
5425 aninfo.state = ANEG_STATE_UNKNOWN;
5426 aninfo.cur_time = 0;
5427 tick = 0;
5428 while (++tick < 195000) {
5429 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5430 if (status == ANEG_DONE || status == ANEG_FAILED)
5431 break;
5433 udelay(1);
5436 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5437 tw32_f(MAC_MODE, tp->mac_mode);
5438 udelay(40);
5440 *txflags = aninfo.txconfig;
5441 *rxflags = aninfo.flags;
5443 if (status == ANEG_DONE &&
5444 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5445 MR_LP_ADV_FULL_DUPLEX)))
5446 res = 1;
5448 return res;
5451 static void tg3_init_bcm8002(struct tg3 *tp)
5453 u32 mac_status = tr32(MAC_STATUS);
5454 int i;
5456 /* Reset when initting first time or we have a link. */
5457 if (tg3_flag(tp, INIT_COMPLETE) &&
5458 !(mac_status & MAC_STATUS_PCS_SYNCED))
5459 return;
5461 /* Set PLL lock range. */
5462 tg3_writephy(tp, 0x16, 0x8007);
5464 /* SW reset */
5465 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5467 /* Wait for reset to complete. */
5468 /* XXX schedule_timeout() ... */
5469 for (i = 0; i < 500; i++)
5470 udelay(10);
5472 /* Config mode; select PMA/Ch 1 regs. */
5473 tg3_writephy(tp, 0x10, 0x8411);
5475 /* Enable auto-lock and comdet, select txclk for tx. */
5476 tg3_writephy(tp, 0x11, 0x0a10);
5478 tg3_writephy(tp, 0x18, 0x00a0);
5479 tg3_writephy(tp, 0x16, 0x41ff);
5481 /* Assert and deassert POR. */
5482 tg3_writephy(tp, 0x13, 0x0400);
5483 udelay(40);
5484 tg3_writephy(tp, 0x13, 0x0000);
5486 tg3_writephy(tp, 0x11, 0x0a50);
5487 udelay(40);
5488 tg3_writephy(tp, 0x11, 0x0a10);
5490 /* Wait for signal to stabilize */
5491 /* XXX schedule_timeout() ... */
5492 for (i = 0; i < 15000; i++)
5493 udelay(10);
5495 /* Deselect the channel register so we can read the PHYID
5496 * later.
5498 tg3_writephy(tp, 0x10, 0x8011);
5501 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5503 u16 flowctrl;
5504 bool current_link_up;
5505 u32 sg_dig_ctrl, sg_dig_status;
5506 u32 serdes_cfg, expected_sg_dig_ctrl;
5507 int workaround, port_a;
5509 serdes_cfg = 0;
5510 expected_sg_dig_ctrl = 0;
5511 workaround = 0;
5512 port_a = 1;
5513 current_link_up = false;
5515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5516 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5517 workaround = 1;
5518 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5519 port_a = 0;
5521 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5522 /* preserve bits 20-23 for voltage regulator */
5523 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5526 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5528 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5529 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5530 if (workaround) {
5531 u32 val = serdes_cfg;
5533 if (port_a)
5534 val |= 0xc010000;
5535 else
5536 val |= 0x4010000;
5537 tw32_f(MAC_SERDES_CFG, val);
5540 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5542 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5543 tg3_setup_flow_control(tp, 0, 0);
5544 current_link_up = true;
5546 goto out;
5549 /* Want auto-negotiation. */
5550 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5552 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5553 if (flowctrl & ADVERTISE_1000XPAUSE)
5554 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5555 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5556 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5558 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5559 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5560 tp->serdes_counter &&
5561 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5562 MAC_STATUS_RCVD_CFG)) ==
5563 MAC_STATUS_PCS_SYNCED)) {
5564 tp->serdes_counter--;
5565 current_link_up = true;
5566 goto out;
5568 restart_autoneg:
5569 if (workaround)
5570 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5571 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5572 udelay(5);
5573 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5575 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5576 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5577 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5578 MAC_STATUS_SIGNAL_DET)) {
5579 sg_dig_status = tr32(SG_DIG_STATUS);
5580 mac_status = tr32(MAC_STATUS);
5582 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5583 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5584 u32 local_adv = 0, remote_adv = 0;
5586 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5587 local_adv |= ADVERTISE_1000XPAUSE;
5588 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5589 local_adv |= ADVERTISE_1000XPSE_ASYM;
5591 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5592 remote_adv |= LPA_1000XPAUSE;
5593 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5594 remote_adv |= LPA_1000XPAUSE_ASYM;
5596 tp->link_config.rmt_adv =
5597 mii_adv_to_ethtool_adv_x(remote_adv);
5599 tg3_setup_flow_control(tp, local_adv, remote_adv);
5600 current_link_up = true;
5601 tp->serdes_counter = 0;
5602 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5603 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5604 if (tp->serdes_counter)
5605 tp->serdes_counter--;
5606 else {
5607 if (workaround) {
5608 u32 val = serdes_cfg;
5610 if (port_a)
5611 val |= 0xc010000;
5612 else
5613 val |= 0x4010000;
5615 tw32_f(MAC_SERDES_CFG, val);
5618 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5619 udelay(40);
5621 /* Link parallel detection - link is up */
5622 /* only if we have PCS_SYNC and not */
5623 /* receiving config code words */
5624 mac_status = tr32(MAC_STATUS);
5625 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5626 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5627 tg3_setup_flow_control(tp, 0, 0);
5628 current_link_up = true;
5629 tp->phy_flags |=
5630 TG3_PHYFLG_PARALLEL_DETECT;
5631 tp->serdes_counter =
5632 SERDES_PARALLEL_DET_TIMEOUT;
5633 } else
5634 goto restart_autoneg;
5637 } else {
5638 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5639 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5642 out:
5643 return current_link_up;
5646 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5648 bool current_link_up = false;
5650 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5651 goto out;
5653 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5654 u32 txflags, rxflags;
5655 int i;
5657 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5658 u32 local_adv = 0, remote_adv = 0;
5660 if (txflags & ANEG_CFG_PS1)
5661 local_adv |= ADVERTISE_1000XPAUSE;
5662 if (txflags & ANEG_CFG_PS2)
5663 local_adv |= ADVERTISE_1000XPSE_ASYM;
5665 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5666 remote_adv |= LPA_1000XPAUSE;
5667 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5668 remote_adv |= LPA_1000XPAUSE_ASYM;
5670 tp->link_config.rmt_adv =
5671 mii_adv_to_ethtool_adv_x(remote_adv);
5673 tg3_setup_flow_control(tp, local_adv, remote_adv);
5675 current_link_up = true;
5677 for (i = 0; i < 30; i++) {
5678 udelay(20);
5679 tw32_f(MAC_STATUS,
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED));
5682 udelay(40);
5683 if ((tr32(MAC_STATUS) &
5684 (MAC_STATUS_SYNC_CHANGED |
5685 MAC_STATUS_CFG_CHANGED)) == 0)
5686 break;
5689 mac_status = tr32(MAC_STATUS);
5690 if (!current_link_up &&
5691 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5692 !(mac_status & MAC_STATUS_RCVD_CFG))
5693 current_link_up = true;
5694 } else {
5695 tg3_setup_flow_control(tp, 0, 0);
5697 /* Forcing 1000FD link up. */
5698 current_link_up = true;
5700 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5701 udelay(40);
5703 tw32_f(MAC_MODE, tp->mac_mode);
5704 udelay(40);
5707 out:
5708 return current_link_up;
5711 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5713 u32 orig_pause_cfg;
5714 u16 orig_active_speed;
5715 u8 orig_active_duplex;
5716 u32 mac_status;
5717 bool current_link_up;
5718 int i;
5720 orig_pause_cfg = tp->link_config.active_flowctrl;
5721 orig_active_speed = tp->link_config.active_speed;
5722 orig_active_duplex = tp->link_config.active_duplex;
5724 if (!tg3_flag(tp, HW_AUTONEG) &&
5725 tp->link_up &&
5726 tg3_flag(tp, INIT_COMPLETE)) {
5727 mac_status = tr32(MAC_STATUS);
5728 mac_status &= (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET |
5730 MAC_STATUS_CFG_CHANGED |
5731 MAC_STATUS_RCVD_CFG);
5732 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5733 MAC_STATUS_SIGNAL_DET)) {
5734 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735 MAC_STATUS_CFG_CHANGED));
5736 return 0;
5740 tw32_f(MAC_TX_AUTO_NEG, 0);
5742 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5743 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5744 tw32_f(MAC_MODE, tp->mac_mode);
5745 udelay(40);
5747 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5748 tg3_init_bcm8002(tp);
5750 /* Enable link change event even when serdes polling. */
5751 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5752 udelay(40);
5754 current_link_up = false;
5755 tp->link_config.rmt_adv = 0;
5756 mac_status = tr32(MAC_STATUS);
5758 if (tg3_flag(tp, HW_AUTONEG))
5759 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5760 else
5761 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5763 tp->napi[0].hw_status->status =
5764 (SD_STATUS_UPDATED |
5765 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5767 for (i = 0; i < 100; i++) {
5768 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5769 MAC_STATUS_CFG_CHANGED));
5770 udelay(5);
5771 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5772 MAC_STATUS_CFG_CHANGED |
5773 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5774 break;
5777 mac_status = tr32(MAC_STATUS);
5778 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5779 current_link_up = false;
5780 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5781 tp->serdes_counter == 0) {
5782 tw32_f(MAC_MODE, (tp->mac_mode |
5783 MAC_MODE_SEND_CONFIGS));
5784 udelay(1);
5785 tw32_f(MAC_MODE, tp->mac_mode);
5789 if (current_link_up) {
5790 tp->link_config.active_speed = SPEED_1000;
5791 tp->link_config.active_duplex = DUPLEX_FULL;
5792 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 LED_CTRL_LNKLED_OVERRIDE |
5794 LED_CTRL_1000MBPS_ON));
5795 } else {
5796 tp->link_config.active_speed = SPEED_UNKNOWN;
5797 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5798 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5799 LED_CTRL_LNKLED_OVERRIDE |
5800 LED_CTRL_TRAFFIC_OVERRIDE));
5803 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5804 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5805 if (orig_pause_cfg != now_pause_cfg ||
5806 orig_active_speed != tp->link_config.active_speed ||
5807 orig_active_duplex != tp->link_config.active_duplex)
5808 tg3_link_report(tp);
5811 return 0;
5814 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5816 int err = 0;
5817 u32 bmsr, bmcr;
5818 u16 current_speed = SPEED_UNKNOWN;
5819 u8 current_duplex = DUPLEX_UNKNOWN;
5820 bool current_link_up = false;
5821 u32 local_adv, remote_adv, sgsr;
5823 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5824 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5825 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5826 (sgsr & SERDES_TG3_SGMII_MODE)) {
5828 if (force_reset)
5829 tg3_phy_reset(tp);
5831 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5833 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else {
5836 current_link_up = true;
5837 if (sgsr & SERDES_TG3_SPEED_1000) {
5838 current_speed = SPEED_1000;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5840 } else if (sgsr & SERDES_TG3_SPEED_100) {
5841 current_speed = SPEED_100;
5842 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843 } else {
5844 current_speed = SPEED_10;
5845 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5848 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5849 current_duplex = DUPLEX_FULL;
5850 else
5851 current_duplex = DUPLEX_HALF;
5854 tw32_f(MAC_MODE, tp->mac_mode);
5855 udelay(40);
5857 tg3_clear_mac_status(tp);
5859 goto fiber_setup_done;
5862 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5863 tw32_f(MAC_MODE, tp->mac_mode);
5864 udelay(40);
5866 tg3_clear_mac_status(tp);
5868 if (force_reset)
5869 tg3_phy_reset(tp);
5871 tp->link_config.rmt_adv = 0;
5873 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877 bmsr |= BMSR_LSTATUS;
5878 else
5879 bmsr &= ~BMSR_LSTATUS;
5882 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5884 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5885 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886 /* do nothing, just check for link up at the end */
5887 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5888 u32 adv, newadv;
5890 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5891 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5892 ADVERTISE_1000XPAUSE |
5893 ADVERTISE_1000XPSE_ASYM |
5894 ADVERTISE_SLCT);
5896 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5897 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5899 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5900 tg3_writephy(tp, MII_ADVERTISE, newadv);
5901 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5902 tg3_writephy(tp, MII_BMCR, bmcr);
5904 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5905 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5906 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908 return err;
5910 } else {
5911 u32 new_bmcr;
5913 bmcr &= ~BMCR_SPEED1000;
5914 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5916 if (tp->link_config.duplex == DUPLEX_FULL)
5917 new_bmcr |= BMCR_FULLDPLX;
5919 if (new_bmcr != bmcr) {
5920 /* BMCR_SPEED1000 is a reserved bit that needs
5921 * to be set on write.
5923 new_bmcr |= BMCR_SPEED1000;
5925 /* Force a linkdown */
5926 if (tp->link_up) {
5927 u32 adv;
5929 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5930 adv &= ~(ADVERTISE_1000XFULL |
5931 ADVERTISE_1000XHALF |
5932 ADVERTISE_SLCT);
5933 tg3_writephy(tp, MII_ADVERTISE, adv);
5934 tg3_writephy(tp, MII_BMCR, bmcr |
5935 BMCR_ANRESTART |
5936 BMCR_ANENABLE);
5937 udelay(10);
5938 tg3_carrier_off(tp);
5940 tg3_writephy(tp, MII_BMCR, new_bmcr);
5941 bmcr = new_bmcr;
5942 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5943 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5945 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5946 bmsr |= BMSR_LSTATUS;
5947 else
5948 bmsr &= ~BMSR_LSTATUS;
5950 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5954 if (bmsr & BMSR_LSTATUS) {
5955 current_speed = SPEED_1000;
5956 current_link_up = true;
5957 if (bmcr & BMCR_FULLDPLX)
5958 current_duplex = DUPLEX_FULL;
5959 else
5960 current_duplex = DUPLEX_HALF;
5962 local_adv = 0;
5963 remote_adv = 0;
5965 if (bmcr & BMCR_ANENABLE) {
5966 u32 common;
5968 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5969 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5970 common = local_adv & remote_adv;
5971 if (common & (ADVERTISE_1000XHALF |
5972 ADVERTISE_1000XFULL)) {
5973 if (common & ADVERTISE_1000XFULL)
5974 current_duplex = DUPLEX_FULL;
5975 else
5976 current_duplex = DUPLEX_HALF;
5978 tp->link_config.rmt_adv =
5979 mii_adv_to_ethtool_adv_x(remote_adv);
5980 } else if (!tg3_flag(tp, 5780_CLASS)) {
5981 /* Link is up via parallel detect */
5982 } else {
5983 current_link_up = false;
5988 fiber_setup_done:
5989 if (current_link_up && current_duplex == DUPLEX_FULL)
5990 tg3_setup_flow_control(tp, local_adv, remote_adv);
5992 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5993 if (tp->link_config.active_duplex == DUPLEX_HALF)
5994 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5996 tw32_f(MAC_MODE, tp->mac_mode);
5997 udelay(40);
5999 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6001 tp->link_config.active_speed = current_speed;
6002 tp->link_config.active_duplex = current_duplex;
6004 tg3_test_and_report_link_chg(tp, current_link_up);
6005 return err;
6008 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6010 if (tp->serdes_counter) {
6011 /* Give autoneg time to complete. */
6012 tp->serdes_counter--;
6013 return;
6016 if (!tp->link_up &&
6017 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6018 u32 bmcr;
6020 tg3_readphy(tp, MII_BMCR, &bmcr);
6021 if (bmcr & BMCR_ANENABLE) {
6022 u32 phy1, phy2;
6024 /* Select shadow register 0x1f */
6025 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6026 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6028 /* Select expansion interrupt status register */
6029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6030 MII_TG3_DSP_EXP1_INT_STAT);
6031 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6032 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6035 /* We have signal detect and not receiving
6036 * config code words, link is up by parallel
6037 * detection.
6040 bmcr &= ~BMCR_ANENABLE;
6041 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6042 tg3_writephy(tp, MII_BMCR, bmcr);
6043 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6046 } else if (tp->link_up &&
6047 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6048 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6049 u32 phy2;
6051 /* Select expansion interrupt status register */
6052 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6053 MII_TG3_DSP_EXP1_INT_STAT);
6054 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6055 if (phy2 & 0x20) {
6056 u32 bmcr;
6058 /* Config code words received, turn on autoneg. */
6059 tg3_readphy(tp, MII_BMCR, &bmcr);
6060 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6062 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6068 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6070 u32 val;
6071 int err;
6073 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6074 err = tg3_setup_fiber_phy(tp, force_reset);
6075 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6076 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6077 else
6078 err = tg3_setup_copper_phy(tp, force_reset);
6080 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6081 u32 scale;
6083 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6084 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6085 scale = 65;
6086 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6087 scale = 6;
6088 else
6089 scale = 12;
6091 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6092 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6093 tw32(GRC_MISC_CFG, val);
6096 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6097 (6 << TX_LENGTHS_IPG_SHIFT);
6098 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6099 tg3_asic_rev(tp) == ASIC_REV_5762)
6100 val |= tr32(MAC_TX_LENGTHS) &
6101 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6102 TX_LENGTHS_CNT_DWN_VAL_MSK);
6104 if (tp->link_config.active_speed == SPEED_1000 &&
6105 tp->link_config.active_duplex == DUPLEX_HALF)
6106 tw32(MAC_TX_LENGTHS, val |
6107 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6108 else
6109 tw32(MAC_TX_LENGTHS, val |
6110 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6112 if (!tg3_flag(tp, 5705_PLUS)) {
6113 if (tp->link_up) {
6114 tw32(HOSTCC_STAT_COAL_TICKS,
6115 tp->coal.stats_block_coalesce_usecs);
6116 } else {
6117 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6121 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6122 val = tr32(PCIE_PWR_MGMT_THRESH);
6123 if (!tp->link_up)
6124 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6125 tp->pwrmgmt_thresh;
6126 else
6127 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6128 tw32(PCIE_PWR_MGMT_THRESH, val);
6131 return err;
6134 /* tp->lock must be held */
6135 static u64 tg3_refclk_read(struct tg3 *tp)
6137 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6138 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 struct tg3 *tp = netdev_priv(dev);
6158 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 SOF_TIMESTAMPING_RX_SOFTWARE |
6160 SOF_TIMESTAMPING_SOFTWARE;
6162 if (tg3_flag(tp, PTP_CAPABLE)) {
6163 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 SOF_TIMESTAMPING_RX_HARDWARE |
6165 SOF_TIMESTAMPING_RAW_HARDWARE;
6168 if (tp->ptp_clock)
6169 info->phc_index = ptp_clock_index(tp->ptp_clock);
6170 else
6171 info->phc_index = -1;
6173 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6179 return 0;
6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185 bool neg_adj = false;
6186 u32 correction = 0;
6188 if (ppb < 0) {
6189 neg_adj = true;
6190 ppb = -ppb;
6193 /* Frequency adjustment is performed using hardware with a 24 bit
6194 * accumulator and a programmable correction value. On each clk, the
6195 * correction value gets added to the accumulator and when it
6196 * overflows, the time counter is incremented/decremented.
6198 * So conversion from ppb to correction value is
6199 * ppb * (1 << 24) / 1000000000
6201 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202 TG3_EAV_REF_CLK_CORRECT_MASK;
6204 tg3_full_lock(tp, 0);
6206 if (correction)
6207 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208 TG3_EAV_REF_CLK_CORRECT_EN |
6209 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6210 else
6211 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213 tg3_full_unlock(tp);
6215 return 0;
6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222 tg3_full_lock(tp, 0);
6223 tp->ptp_adjust += delta;
6224 tg3_full_unlock(tp);
6226 return 0;
6229 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6231 u64 ns;
6232 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234 tg3_full_lock(tp, 0);
6235 ns = tg3_refclk_read(tp);
6236 ns += tp->ptp_adjust;
6237 tg3_full_unlock(tp);
6239 *ts = ns_to_timespec64(ns);
6241 return 0;
6244 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6245 const struct timespec64 *ts)
6247 u64 ns;
6248 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6250 ns = timespec64_to_ns(ts);
6252 tg3_full_lock(tp, 0);
6253 tg3_refclk_write(tp, ns);
6254 tp->ptp_adjust = 0;
6255 tg3_full_unlock(tp);
6257 return 0;
6260 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6261 struct ptp_clock_request *rq, int on)
6263 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264 u32 clock_ctl;
6265 int rval = 0;
6267 switch (rq->type) {
6268 case PTP_CLK_REQ_PEROUT:
6269 if (rq->perout.index != 0)
6270 return -EINVAL;
6272 tg3_full_lock(tp, 0);
6273 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6274 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6276 if (on) {
6277 u64 nsec;
6279 nsec = rq->perout.start.sec * 1000000000ULL +
6280 rq->perout.start.nsec;
6282 if (rq->perout.period.sec || rq->perout.period.nsec) {
6283 netdev_warn(tp->dev,
6284 "Device supports only a one-shot timesync output, period must be 0\n");
6285 rval = -EINVAL;
6286 goto err_out;
6289 if (nsec & (1ULL << 63)) {
6290 netdev_warn(tp->dev,
6291 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6292 rval = -EINVAL;
6293 goto err_out;
6296 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6297 tw32(TG3_EAV_WATCHDOG0_MSB,
6298 TG3_EAV_WATCHDOG0_EN |
6299 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6301 tw32(TG3_EAV_REF_CLCK_CTL,
6302 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6303 } else {
6304 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6305 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6308 err_out:
6309 tg3_full_unlock(tp);
6310 return rval;
6312 default:
6313 break;
6316 return -EOPNOTSUPP;
6319 static const struct ptp_clock_info tg3_ptp_caps = {
6320 .owner = THIS_MODULE,
6321 .name = "tg3 clock",
6322 .max_adj = 250000000,
6323 .n_alarm = 0,
6324 .n_ext_ts = 0,
6325 .n_per_out = 1,
6326 .n_pins = 0,
6327 .pps = 0,
6328 .adjfreq = tg3_ptp_adjfreq,
6329 .adjtime = tg3_ptp_adjtime,
6330 .gettime64 = tg3_ptp_gettime,
6331 .settime64 = tg3_ptp_settime,
6332 .enable = tg3_ptp_enable,
6335 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6336 struct skb_shared_hwtstamps *timestamp)
6338 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6339 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6340 tp->ptp_adjust);
6343 /* tp->lock must be held */
6344 static void tg3_ptp_init(struct tg3 *tp)
6346 if (!tg3_flag(tp, PTP_CAPABLE))
6347 return;
6349 /* Initialize the hardware clock to the system time. */
6350 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6351 tp->ptp_adjust = 0;
6352 tp->ptp_info = tg3_ptp_caps;
6355 /* tp->lock must be held */
6356 static void tg3_ptp_resume(struct tg3 *tp)
6358 if (!tg3_flag(tp, PTP_CAPABLE))
6359 return;
6361 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6362 tp->ptp_adjust = 0;
6365 static void tg3_ptp_fini(struct tg3 *tp)
6367 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6368 return;
6370 ptp_clock_unregister(tp->ptp_clock);
6371 tp->ptp_clock = NULL;
6372 tp->ptp_adjust = 0;
6375 static inline int tg3_irq_sync(struct tg3 *tp)
6377 return tp->irq_sync;
6380 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6382 int i;
6384 dst = (u32 *)((u8 *)dst + off);
6385 for (i = 0; i < len; i += sizeof(u32))
6386 *dst++ = tr32(off + i);
6389 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6391 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6392 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6393 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6394 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6395 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6396 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6397 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6398 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6399 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6400 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6402 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6403 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6404 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6405 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6406 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6407 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6408 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6409 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6411 if (tg3_flag(tp, SUPPORT_MSIX))
6412 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6414 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6415 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6416 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6417 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6418 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6420 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6421 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6423 if (!tg3_flag(tp, 5705_PLUS)) {
6424 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6425 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6426 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6429 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6430 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6431 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6432 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6433 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6435 if (tg3_flag(tp, NVRAM))
6436 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6439 static void tg3_dump_state(struct tg3 *tp)
6441 int i;
6442 u32 *regs;
6444 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6445 if (!regs)
6446 return;
6448 if (tg3_flag(tp, PCI_EXPRESS)) {
6449 /* Read up to but not including private PCI registers */
6450 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6451 regs[i / sizeof(u32)] = tr32(i);
6452 } else
6453 tg3_dump_legacy_regs(tp, regs);
6455 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6456 if (!regs[i + 0] && !regs[i + 1] &&
6457 !regs[i + 2] && !regs[i + 3])
6458 continue;
6460 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6461 i * 4,
6462 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6465 kfree(regs);
6467 for (i = 0; i < tp->irq_cnt; i++) {
6468 struct tg3_napi *tnapi = &tp->napi[i];
6470 /* SW status block */
6471 netdev_err(tp->dev,
6472 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6474 tnapi->hw_status->status,
6475 tnapi->hw_status->status_tag,
6476 tnapi->hw_status->rx_jumbo_consumer,
6477 tnapi->hw_status->rx_consumer,
6478 tnapi->hw_status->rx_mini_consumer,
6479 tnapi->hw_status->idx[0].rx_producer,
6480 tnapi->hw_status->idx[0].tx_consumer);
6482 netdev_err(tp->dev,
6483 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6485 tnapi->last_tag, tnapi->last_irq_tag,
6486 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6487 tnapi->rx_rcb_ptr,
6488 tnapi->prodring.rx_std_prod_idx,
6489 tnapi->prodring.rx_std_cons_idx,
6490 tnapi->prodring.rx_jmb_prod_idx,
6491 tnapi->prodring.rx_jmb_cons_idx);
6495 /* This is called whenever we suspect that the system chipset is re-
6496 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6497 * is bogus tx completions. We try to recover by setting the
6498 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6499 * in the workqueue.
6501 static void tg3_tx_recover(struct tg3 *tp)
6503 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6504 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6506 netdev_warn(tp->dev,
6507 "The system may be re-ordering memory-mapped I/O "
6508 "cycles to the network device, attempting to recover. "
6509 "Please report the problem to the driver maintainer "
6510 "and include system chipset information.\n");
6512 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6515 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6517 /* Tell compiler to fetch tx indices from memory. */
6518 barrier();
6519 return tnapi->tx_pending -
6520 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6523 /* Tigon3 never reports partial packet sends. So we do not
6524 * need special logic to handle SKBs that have not had all
6525 * of their frags sent yet, like SunGEM does.
6527 static void tg3_tx(struct tg3_napi *tnapi)
6529 struct tg3 *tp = tnapi->tp;
6530 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6531 u32 sw_idx = tnapi->tx_cons;
6532 struct netdev_queue *txq;
6533 int index = tnapi - tp->napi;
6534 unsigned int pkts_compl = 0, bytes_compl = 0;
6536 if (tg3_flag(tp, ENABLE_TSS))
6537 index--;
6539 txq = netdev_get_tx_queue(tp->dev, index);
6541 while (sw_idx != hw_idx) {
6542 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6543 struct sk_buff *skb = ri->skb;
6544 int i, tx_bug = 0;
6546 if (unlikely(skb == NULL)) {
6547 tg3_tx_recover(tp);
6548 return;
6551 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6552 struct skb_shared_hwtstamps timestamp;
6553 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6554 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6556 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6558 skb_tstamp_tx(skb, &timestamp);
6561 pci_unmap_single(tp->pdev,
6562 dma_unmap_addr(ri, mapping),
6563 skb_headlen(skb),
6564 PCI_DMA_TODEVICE);
6566 ri->skb = NULL;
6568 while (ri->fragmented) {
6569 ri->fragmented = false;
6570 sw_idx = NEXT_TX(sw_idx);
6571 ri = &tnapi->tx_buffers[sw_idx];
6574 sw_idx = NEXT_TX(sw_idx);
6576 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6577 ri = &tnapi->tx_buffers[sw_idx];
6578 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6579 tx_bug = 1;
6581 pci_unmap_page(tp->pdev,
6582 dma_unmap_addr(ri, mapping),
6583 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6584 PCI_DMA_TODEVICE);
6586 while (ri->fragmented) {
6587 ri->fragmented = false;
6588 sw_idx = NEXT_TX(sw_idx);
6589 ri = &tnapi->tx_buffers[sw_idx];
6592 sw_idx = NEXT_TX(sw_idx);
6595 pkts_compl++;
6596 bytes_compl += skb->len;
6598 dev_consume_skb_any(skb);
6600 if (unlikely(tx_bug)) {
6601 tg3_tx_recover(tp);
6602 return;
6606 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6608 tnapi->tx_cons = sw_idx;
6610 /* Need to make the tx_cons update visible to tg3_start_xmit()
6611 * before checking for netif_queue_stopped(). Without the
6612 * memory barrier, there is a small possibility that tg3_start_xmit()
6613 * will miss it and cause the queue to be stopped forever.
6615 smp_mb();
6617 if (unlikely(netif_tx_queue_stopped(txq) &&
6618 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6619 __netif_tx_lock(txq, smp_processor_id());
6620 if (netif_tx_queue_stopped(txq) &&
6621 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6622 netif_tx_wake_queue(txq);
6623 __netif_tx_unlock(txq);
6627 static void tg3_frag_free(bool is_frag, void *data)
6629 if (is_frag)
6630 skb_free_frag(data);
6631 else
6632 kfree(data);
6635 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6637 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6638 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6640 if (!ri->data)
6641 return;
6643 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6644 map_sz, PCI_DMA_FROMDEVICE);
6645 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6646 ri->data = NULL;
6650 /* Returns size of skb allocated or < 0 on error.
6652 * We only need to fill in the address because the other members
6653 * of the RX descriptor are invariant, see tg3_init_rings.
6655 * Note the purposeful assymetry of cpu vs. chip accesses. For
6656 * posting buffers we only dirty the first cache line of the RX
6657 * descriptor (containing the address). Whereas for the RX status
6658 * buffers the cpu only reads the last cacheline of the RX descriptor
6659 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6661 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6662 u32 opaque_key, u32 dest_idx_unmasked,
6663 unsigned int *frag_size)
6665 struct tg3_rx_buffer_desc *desc;
6666 struct ring_info *map;
6667 u8 *data;
6668 dma_addr_t mapping;
6669 int skb_size, data_size, dest_idx;
6671 switch (opaque_key) {
6672 case RXD_OPAQUE_RING_STD:
6673 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6674 desc = &tpr->rx_std[dest_idx];
6675 map = &tpr->rx_std_buffers[dest_idx];
6676 data_size = tp->rx_pkt_map_sz;
6677 break;
6679 case RXD_OPAQUE_RING_JUMBO:
6680 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6681 desc = &tpr->rx_jmb[dest_idx].std;
6682 map = &tpr->rx_jmb_buffers[dest_idx];
6683 data_size = TG3_RX_JMB_MAP_SZ;
6684 break;
6686 default:
6687 return -EINVAL;
6690 /* Do not overwrite any of the map or rp information
6691 * until we are sure we can commit to a new buffer.
6693 * Callers depend upon this behavior and assume that
6694 * we leave everything unchanged if we fail.
6696 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6697 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6698 if (skb_size <= PAGE_SIZE) {
6699 data = netdev_alloc_frag(skb_size);
6700 *frag_size = skb_size;
6701 } else {
6702 data = kmalloc(skb_size, GFP_ATOMIC);
6703 *frag_size = 0;
6705 if (!data)
6706 return -ENOMEM;
6708 mapping = pci_map_single(tp->pdev,
6709 data + TG3_RX_OFFSET(tp),
6710 data_size,
6711 PCI_DMA_FROMDEVICE);
6712 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6713 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6714 return -EIO;
6717 map->data = data;
6718 dma_unmap_addr_set(map, mapping, mapping);
6720 desc->addr_hi = ((u64)mapping >> 32);
6721 desc->addr_lo = ((u64)mapping & 0xffffffff);
6723 return data_size;
6726 /* We only need to move over in the address because the other
6727 * members of the RX descriptor are invariant. See notes above
6728 * tg3_alloc_rx_data for full details.
6730 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6731 struct tg3_rx_prodring_set *dpr,
6732 u32 opaque_key, int src_idx,
6733 u32 dest_idx_unmasked)
6735 struct tg3 *tp = tnapi->tp;
6736 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6737 struct ring_info *src_map, *dest_map;
6738 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6739 int dest_idx;
6741 switch (opaque_key) {
6742 case RXD_OPAQUE_RING_STD:
6743 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6744 dest_desc = &dpr->rx_std[dest_idx];
6745 dest_map = &dpr->rx_std_buffers[dest_idx];
6746 src_desc = &spr->rx_std[src_idx];
6747 src_map = &spr->rx_std_buffers[src_idx];
6748 break;
6750 case RXD_OPAQUE_RING_JUMBO:
6751 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6752 dest_desc = &dpr->rx_jmb[dest_idx].std;
6753 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6754 src_desc = &spr->rx_jmb[src_idx].std;
6755 src_map = &spr->rx_jmb_buffers[src_idx];
6756 break;
6758 default:
6759 return;
6762 dest_map->data = src_map->data;
6763 dma_unmap_addr_set(dest_map, mapping,
6764 dma_unmap_addr(src_map, mapping));
6765 dest_desc->addr_hi = src_desc->addr_hi;
6766 dest_desc->addr_lo = src_desc->addr_lo;
6768 /* Ensure that the update to the skb happens after the physical
6769 * addresses have been transferred to the new BD location.
6771 smp_wmb();
6773 src_map->data = NULL;
6776 /* The RX ring scheme is composed of multiple rings which post fresh
6777 * buffers to the chip, and one special ring the chip uses to report
6778 * status back to the host.
6780 * The special ring reports the status of received packets to the
6781 * host. The chip does not write into the original descriptor the
6782 * RX buffer was obtained from. The chip simply takes the original
6783 * descriptor as provided by the host, updates the status and length
6784 * field, then writes this into the next status ring entry.
6786 * Each ring the host uses to post buffers to the chip is described
6787 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6788 * it is first placed into the on-chip ram. When the packet's length
6789 * is known, it walks down the TG3_BDINFO entries to select the ring.
6790 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6791 * which is within the range of the new packet's length is chosen.
6793 * The "separate ring for rx status" scheme may sound queer, but it makes
6794 * sense from a cache coherency perspective. If only the host writes
6795 * to the buffer post rings, and only the chip writes to the rx status
6796 * rings, then cache lines never move beyond shared-modified state.
6797 * If both the host and chip were to write into the same ring, cache line
6798 * eviction could occur since both entities want it in an exclusive state.
6800 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6802 struct tg3 *tp = tnapi->tp;
6803 u32 work_mask, rx_std_posted = 0;
6804 u32 std_prod_idx, jmb_prod_idx;
6805 u32 sw_idx = tnapi->rx_rcb_ptr;
6806 u16 hw_idx;
6807 int received;
6808 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6810 hw_idx = *(tnapi->rx_rcb_prod_idx);
6812 * We need to order the read of hw_idx and the read of
6813 * the opaque cookie.
6815 rmb();
6816 work_mask = 0;
6817 received = 0;
6818 std_prod_idx = tpr->rx_std_prod_idx;
6819 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6820 while (sw_idx != hw_idx && budget > 0) {
6821 struct ring_info *ri;
6822 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6823 unsigned int len;
6824 struct sk_buff *skb;
6825 dma_addr_t dma_addr;
6826 u32 opaque_key, desc_idx, *post_ptr;
6827 u8 *data;
6828 u64 tstamp = 0;
6830 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6831 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6832 if (opaque_key == RXD_OPAQUE_RING_STD) {
6833 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6835 data = ri->data;
6836 post_ptr = &std_prod_idx;
6837 rx_std_posted++;
6838 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6839 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6840 dma_addr = dma_unmap_addr(ri, mapping);
6841 data = ri->data;
6842 post_ptr = &jmb_prod_idx;
6843 } else
6844 goto next_pkt_nopost;
6846 work_mask |= opaque_key;
6848 if (desc->err_vlan & RXD_ERR_MASK) {
6849 drop_it:
6850 tg3_recycle_rx(tnapi, tpr, opaque_key,
6851 desc_idx, *post_ptr);
6852 drop_it_no_recycle:
6853 /* Other statistics kept track of by card. */
6854 tp->rx_dropped++;
6855 goto next_pkt;
6858 prefetch(data + TG3_RX_OFFSET(tp));
6859 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6860 ETH_FCS_LEN;
6862 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863 RXD_FLAG_PTPSTAT_PTPV1 ||
6864 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865 RXD_FLAG_PTPSTAT_PTPV2) {
6866 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6867 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6870 if (len > TG3_RX_COPY_THRESH(tp)) {
6871 int skb_size;
6872 unsigned int frag_size;
6874 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6875 *post_ptr, &frag_size);
6876 if (skb_size < 0)
6877 goto drop_it;
6879 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6880 PCI_DMA_FROMDEVICE);
6882 /* Ensure that the update to the data happens
6883 * after the usage of the old DMA mapping.
6885 smp_wmb();
6887 ri->data = NULL;
6889 skb = build_skb(data, frag_size);
6890 if (!skb) {
6891 tg3_frag_free(frag_size != 0, data);
6892 goto drop_it_no_recycle;
6894 skb_reserve(skb, TG3_RX_OFFSET(tp));
6895 } else {
6896 tg3_recycle_rx(tnapi, tpr, opaque_key,
6897 desc_idx, *post_ptr);
6899 skb = netdev_alloc_skb(tp->dev,
6900 len + TG3_RAW_IP_ALIGN);
6901 if (skb == NULL)
6902 goto drop_it_no_recycle;
6904 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6905 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6906 memcpy(skb->data,
6907 data + TG3_RX_OFFSET(tp),
6908 len);
6909 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6912 skb_put(skb, len);
6913 if (tstamp)
6914 tg3_hwclock_to_timestamp(tp, tstamp,
6915 skb_hwtstamps(skb));
6917 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6918 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6919 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6920 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6921 skb->ip_summed = CHECKSUM_UNNECESSARY;
6922 else
6923 skb_checksum_none_assert(skb);
6925 skb->protocol = eth_type_trans(skb, tp->dev);
6927 if (len > (tp->dev->mtu + ETH_HLEN) &&
6928 skb->protocol != htons(ETH_P_8021Q) &&
6929 skb->protocol != htons(ETH_P_8021AD)) {
6930 dev_kfree_skb_any(skb);
6931 goto drop_it_no_recycle;
6934 if (desc->type_flags & RXD_FLAG_VLAN &&
6935 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6937 desc->err_vlan & RXD_VLAN_MASK);
6939 napi_gro_receive(&tnapi->napi, skb);
6941 received++;
6942 budget--;
6944 next_pkt:
6945 (*post_ptr)++;
6947 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6948 tpr->rx_std_prod_idx = std_prod_idx &
6949 tp->rx_std_ring_mask;
6950 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6951 tpr->rx_std_prod_idx);
6952 work_mask &= ~RXD_OPAQUE_RING_STD;
6953 rx_std_posted = 0;
6955 next_pkt_nopost:
6956 sw_idx++;
6957 sw_idx &= tp->rx_ret_ring_mask;
6959 /* Refresh hw_idx to see if there is new work */
6960 if (sw_idx == hw_idx) {
6961 hw_idx = *(tnapi->rx_rcb_prod_idx);
6962 rmb();
6966 /* ACK the status ring. */
6967 tnapi->rx_rcb_ptr = sw_idx;
6968 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6970 /* Refill RX ring(s). */
6971 if (!tg3_flag(tp, ENABLE_RSS)) {
6972 /* Sync BD data before updating mailbox */
6973 wmb();
6975 if (work_mask & RXD_OPAQUE_RING_STD) {
6976 tpr->rx_std_prod_idx = std_prod_idx &
6977 tp->rx_std_ring_mask;
6978 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6979 tpr->rx_std_prod_idx);
6981 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6982 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6983 tp->rx_jmb_ring_mask;
6984 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6985 tpr->rx_jmb_prod_idx);
6987 mmiowb();
6988 } else if (work_mask) {
6989 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6990 * updated before the producer indices can be updated.
6992 smp_wmb();
6994 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6995 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6997 if (tnapi != &tp->napi[1]) {
6998 tp->rx_refill = true;
6999 napi_schedule(&tp->napi[1].napi);
7003 return received;
7006 static void tg3_poll_link(struct tg3 *tp)
7008 /* handle link change and other phy events */
7009 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7010 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7012 if (sblk->status & SD_STATUS_LINK_CHG) {
7013 sblk->status = SD_STATUS_UPDATED |
7014 (sblk->status & ~SD_STATUS_LINK_CHG);
7015 spin_lock(&tp->lock);
7016 if (tg3_flag(tp, USE_PHYLIB)) {
7017 tw32_f(MAC_STATUS,
7018 (MAC_STATUS_SYNC_CHANGED |
7019 MAC_STATUS_CFG_CHANGED |
7020 MAC_STATUS_MI_COMPLETION |
7021 MAC_STATUS_LNKSTATE_CHANGED));
7022 udelay(40);
7023 } else
7024 tg3_setup_phy(tp, false);
7025 spin_unlock(&tp->lock);
7030 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7031 struct tg3_rx_prodring_set *dpr,
7032 struct tg3_rx_prodring_set *spr)
7034 u32 si, di, cpycnt, src_prod_idx;
7035 int i, err = 0;
7037 while (1) {
7038 src_prod_idx = spr->rx_std_prod_idx;
7040 /* Make sure updates to the rx_std_buffers[] entries and the
7041 * standard producer index are seen in the correct order.
7043 smp_rmb();
7045 if (spr->rx_std_cons_idx == src_prod_idx)
7046 break;
7048 if (spr->rx_std_cons_idx < src_prod_idx)
7049 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7050 else
7051 cpycnt = tp->rx_std_ring_mask + 1 -
7052 spr->rx_std_cons_idx;
7054 cpycnt = min(cpycnt,
7055 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7057 si = spr->rx_std_cons_idx;
7058 di = dpr->rx_std_prod_idx;
7060 for (i = di; i < di + cpycnt; i++) {
7061 if (dpr->rx_std_buffers[i].data) {
7062 cpycnt = i - di;
7063 err = -ENOSPC;
7064 break;
7068 if (!cpycnt)
7069 break;
7071 /* Ensure that updates to the rx_std_buffers ring and the
7072 * shadowed hardware producer ring from tg3_recycle_skb() are
7073 * ordered correctly WRT the skb check above.
7075 smp_rmb();
7077 memcpy(&dpr->rx_std_buffers[di],
7078 &spr->rx_std_buffers[si],
7079 cpycnt * sizeof(struct ring_info));
7081 for (i = 0; i < cpycnt; i++, di++, si++) {
7082 struct tg3_rx_buffer_desc *sbd, *dbd;
7083 sbd = &spr->rx_std[si];
7084 dbd = &dpr->rx_std[di];
7085 dbd->addr_hi = sbd->addr_hi;
7086 dbd->addr_lo = sbd->addr_lo;
7089 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7090 tp->rx_std_ring_mask;
7091 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7092 tp->rx_std_ring_mask;
7095 while (1) {
7096 src_prod_idx = spr->rx_jmb_prod_idx;
7098 /* Make sure updates to the rx_jmb_buffers[] entries and
7099 * the jumbo producer index are seen in the correct order.
7101 smp_rmb();
7103 if (spr->rx_jmb_cons_idx == src_prod_idx)
7104 break;
7106 if (spr->rx_jmb_cons_idx < src_prod_idx)
7107 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7108 else
7109 cpycnt = tp->rx_jmb_ring_mask + 1 -
7110 spr->rx_jmb_cons_idx;
7112 cpycnt = min(cpycnt,
7113 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7115 si = spr->rx_jmb_cons_idx;
7116 di = dpr->rx_jmb_prod_idx;
7118 for (i = di; i < di + cpycnt; i++) {
7119 if (dpr->rx_jmb_buffers[i].data) {
7120 cpycnt = i - di;
7121 err = -ENOSPC;
7122 break;
7126 if (!cpycnt)
7127 break;
7129 /* Ensure that updates to the rx_jmb_buffers ring and the
7130 * shadowed hardware producer ring from tg3_recycle_skb() are
7131 * ordered correctly WRT the skb check above.
7133 smp_rmb();
7135 memcpy(&dpr->rx_jmb_buffers[di],
7136 &spr->rx_jmb_buffers[si],
7137 cpycnt * sizeof(struct ring_info));
7139 for (i = 0; i < cpycnt; i++, di++, si++) {
7140 struct tg3_rx_buffer_desc *sbd, *dbd;
7141 sbd = &spr->rx_jmb[si].std;
7142 dbd = &dpr->rx_jmb[di].std;
7143 dbd->addr_hi = sbd->addr_hi;
7144 dbd->addr_lo = sbd->addr_lo;
7147 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7148 tp->rx_jmb_ring_mask;
7149 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7150 tp->rx_jmb_ring_mask;
7153 return err;
7156 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7158 struct tg3 *tp = tnapi->tp;
7160 /* run TX completion thread */
7161 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7162 tg3_tx(tnapi);
7163 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7164 return work_done;
7167 if (!tnapi->rx_rcb_prod_idx)
7168 return work_done;
7170 /* run RX thread, within the bounds set by NAPI.
7171 * All RX "locking" is done by ensuring outside
7172 * code synchronizes with tg3->napi.poll()
7174 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7175 work_done += tg3_rx(tnapi, budget - work_done);
7177 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7178 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7179 int i, err = 0;
7180 u32 std_prod_idx = dpr->rx_std_prod_idx;
7181 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7183 tp->rx_refill = false;
7184 for (i = 1; i <= tp->rxq_cnt; i++)
7185 err |= tg3_rx_prodring_xfer(tp, dpr,
7186 &tp->napi[i].prodring);
7188 wmb();
7190 if (std_prod_idx != dpr->rx_std_prod_idx)
7191 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7192 dpr->rx_std_prod_idx);
7194 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7195 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7196 dpr->rx_jmb_prod_idx);
7198 mmiowb();
7200 if (err)
7201 tw32_f(HOSTCC_MODE, tp->coal_now);
7204 return work_done;
7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7209 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210 schedule_work(&tp->reset_task);
7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7215 cancel_work_sync(&tp->reset_task);
7216 tg3_flag_clear(tp, RESET_TASK_PENDING);
7217 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7222 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223 struct tg3 *tp = tnapi->tp;
7224 int work_done = 0;
7225 struct tg3_hw_status *sblk = tnapi->hw_status;
7227 while (1) {
7228 work_done = tg3_poll_work(tnapi, work_done, budget);
7230 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7231 goto tx_recovery;
7233 if (unlikely(work_done >= budget))
7234 break;
7236 /* tp->last_tag is used in tg3_int_reenable() below
7237 * to tell the hw how much work has been processed,
7238 * so we must read it before checking for more work.
7240 tnapi->last_tag = sblk->status_tag;
7241 tnapi->last_irq_tag = tnapi->last_tag;
7242 rmb();
7244 /* check for RX/TX work to do */
7245 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7248 /* This test here is not race free, but will reduce
7249 * the number of interrupts by looping again.
7251 if (tnapi == &tp->napi[1] && tp->rx_refill)
7252 continue;
7254 napi_complete_done(napi, work_done);
7255 /* Reenable interrupts. */
7256 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7258 /* This test here is synchronized by napi_schedule()
7259 * and napi_complete() to close the race condition.
7261 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262 tw32(HOSTCC_MODE, tp->coalesce_mode |
7263 HOSTCC_MODE_ENABLE |
7264 tnapi->coal_now);
7266 mmiowb();
7267 break;
7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7272 return work_done;
7274 tx_recovery:
7275 /* work_done is guaranteed to be less than budget. */
7276 napi_complete(napi);
7277 tg3_reset_task_schedule(tp);
7278 return work_done;
7281 static void tg3_process_error(struct tg3 *tp)
7283 u32 val;
7284 bool real_error = false;
7286 if (tg3_flag(tp, ERROR_PROCESSED))
7287 return;
7289 /* Check Flow Attention register */
7290 val = tr32(HOSTCC_FLOW_ATTN);
7291 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7293 real_error = true;
7296 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7298 real_error = true;
7301 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7303 real_error = true;
7306 if (!real_error)
7307 return;
7309 tg3_dump_state(tp);
7311 tg3_flag_set(tp, ERROR_PROCESSED);
7312 tg3_reset_task_schedule(tp);
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7317 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318 struct tg3 *tp = tnapi->tp;
7319 int work_done = 0;
7320 struct tg3_hw_status *sblk = tnapi->hw_status;
7322 while (1) {
7323 if (sblk->status & SD_STATUS_ERROR)
7324 tg3_process_error(tp);
7326 tg3_poll_link(tp);
7328 work_done = tg3_poll_work(tnapi, work_done, budget);
7330 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7331 goto tx_recovery;
7333 if (unlikely(work_done >= budget))
7334 break;
7336 if (tg3_flag(tp, TAGGED_STATUS)) {
7337 /* tp->last_tag is used in tg3_int_reenable() below
7338 * to tell the hw how much work has been processed,
7339 * so we must read it before checking for more work.
7341 tnapi->last_tag = sblk->status_tag;
7342 tnapi->last_irq_tag = tnapi->last_tag;
7343 rmb();
7344 } else
7345 sblk->status &= ~SD_STATUS_UPDATED;
7347 if (likely(!tg3_has_work(tnapi))) {
7348 napi_complete_done(napi, work_done);
7349 tg3_int_reenable(tnapi);
7350 break;
7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7355 return work_done;
7357 tx_recovery:
7358 /* work_done is guaranteed to be less than budget. */
7359 napi_complete(napi);
7360 tg3_reset_task_schedule(tp);
7361 return work_done;
7364 static void tg3_napi_disable(struct tg3 *tp)
7366 int i;
7368 for (i = tp->irq_cnt - 1; i >= 0; i--)
7369 napi_disable(&tp->napi[i].napi);
7372 static void tg3_napi_enable(struct tg3 *tp)
7374 int i;
7376 for (i = 0; i < tp->irq_cnt; i++)
7377 napi_enable(&tp->napi[i].napi);
7380 static void tg3_napi_init(struct tg3 *tp)
7382 int i;
7384 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385 for (i = 1; i < tp->irq_cnt; i++)
7386 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7389 static void tg3_napi_fini(struct tg3 *tp)
7391 int i;
7393 for (i = 0; i < tp->irq_cnt; i++)
7394 netif_napi_del(&tp->napi[i].napi);
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7399 netif_trans_update(tp->dev); /* prevent tx timeout */
7400 tg3_napi_disable(tp);
7401 netif_carrier_off(tp->dev);
7402 netif_tx_disable(tp->dev);
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7408 tg3_ptp_resume(tp);
7410 /* NOTE: unconditional netif_tx_wake_all_queues is only
7411 * appropriate so long as all callers are assured to
7412 * have free tx slots (such as after tg3_init_hw)
7414 netif_tx_wake_all_queues(tp->dev);
7416 if (tp->link_up)
7417 netif_carrier_on(tp->dev);
7419 tg3_napi_enable(tp);
7420 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421 tg3_enable_ints(tp);
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425 __releases(tp->lock)
7426 __acquires(tp->lock)
7428 int i;
7430 BUG_ON(tp->irq_sync);
7432 tp->irq_sync = 1;
7433 smp_mb();
7435 spin_unlock_bh(&tp->lock);
7437 for (i = 0; i < tp->irq_cnt; i++)
7438 synchronize_irq(tp->napi[i].irq_vec);
7440 spin_lock_bh(&tp->lock);
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445 * with as well. Most of the time, this is not necessary except when
7446 * shutting down the device.
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7450 spin_lock_bh(&tp->lock);
7451 if (irq_sync)
7452 tg3_irq_quiesce(tp);
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7457 spin_unlock_bh(&tp->lock);
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461 * after sending MSI so driver doesn't have to do it.
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7468 prefetch(tnapi->hw_status);
7469 if (tnapi->rx_rcb)
7470 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7472 if (likely(!tg3_irq_sync(tp)))
7473 napi_schedule(&tnapi->napi);
7475 return IRQ_HANDLED;
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479 * flush status block and interrupt mailbox. PCI ordering rules
7480 * guarantee that MSI will arrive after the status block.
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7484 struct tg3_napi *tnapi = dev_id;
7485 struct tg3 *tp = tnapi->tp;
7487 prefetch(tnapi->hw_status);
7488 if (tnapi->rx_rcb)
7489 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7491 * Writing any value to intr-mbox-0 clears PCI INTA# and
7492 * chip-internal interrupt pending events.
7493 * Writing non-zero to intr-mbox-0 additional tells the
7494 * NIC to stop sending us irqs, engaging "in-intr-handler"
7495 * event coalescing.
7497 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498 if (likely(!tg3_irq_sync(tp)))
7499 napi_schedule(&tnapi->napi);
7501 return IRQ_RETVAL(1);
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7506 struct tg3_napi *tnapi = dev_id;
7507 struct tg3 *tp = tnapi->tp;
7508 struct tg3_hw_status *sblk = tnapi->hw_status;
7509 unsigned int handled = 1;
7511 /* In INTx mode, it is possible for the interrupt to arrive at
7512 * the CPU before the status block posted prior to the interrupt.
7513 * Reading the PCI State register will confirm whether the
7514 * interrupt is ours and will flush the status block.
7516 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517 if (tg3_flag(tp, CHIP_RESETTING) ||
7518 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7519 handled = 0;
7520 goto out;
7525 * Writing any value to intr-mbox-0 clears PCI INTA# and
7526 * chip-internal interrupt pending events.
7527 * Writing non-zero to intr-mbox-0 additional tells the
7528 * NIC to stop sending us irqs, engaging "in-intr-handler"
7529 * event coalescing.
7531 * Flush the mailbox to de-assert the IRQ immediately to prevent
7532 * spurious interrupts. The flush impacts performance but
7533 * excessive spurious interrupts can be worse in some cases.
7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536 if (tg3_irq_sync(tp))
7537 goto out;
7538 sblk->status &= ~SD_STATUS_UPDATED;
7539 if (likely(tg3_has_work(tnapi))) {
7540 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541 napi_schedule(&tnapi->napi);
7542 } else {
7543 /* No work, shared interrupt perhaps? re-enable
7544 * interrupts, and flush that PCI write
7546 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7547 0x00000000);
7549 out:
7550 return IRQ_RETVAL(handled);
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7555 struct tg3_napi *tnapi = dev_id;
7556 struct tg3 *tp = tnapi->tp;
7557 struct tg3_hw_status *sblk = tnapi->hw_status;
7558 unsigned int handled = 1;
7560 /* In INTx mode, it is possible for the interrupt to arrive at
7561 * the CPU before the status block posted prior to the interrupt.
7562 * Reading the PCI State register will confirm whether the
7563 * interrupt is ours and will flush the status block.
7565 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566 if (tg3_flag(tp, CHIP_RESETTING) ||
7567 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7568 handled = 0;
7569 goto out;
7574 * writing any value to intr-mbox-0 clears PCI INTA# and
7575 * chip-internal interrupt pending events.
7576 * writing non-zero to intr-mbox-0 additional tells the
7577 * NIC to stop sending us irqs, engaging "in-intr-handler"
7578 * event coalescing.
7580 * Flush the mailbox to de-assert the IRQ immediately to prevent
7581 * spurious interrupts. The flush impacts performance but
7582 * excessive spurious interrupts can be worse in some cases.
7584 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7587 * In a shared interrupt configuration, sometimes other devices'
7588 * interrupts will scream. We record the current status tag here
7589 * so that the above check can report that the screaming interrupts
7590 * are unhandled. Eventually they will be silenced.
7592 tnapi->last_irq_tag = sblk->status_tag;
7594 if (tg3_irq_sync(tp))
7595 goto out;
7597 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7599 napi_schedule(&tnapi->napi);
7601 out:
7602 return IRQ_RETVAL(handled);
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7608 struct tg3_napi *tnapi = dev_id;
7609 struct tg3 *tp = tnapi->tp;
7610 struct tg3_hw_status *sblk = tnapi->hw_status;
7612 if ((sblk->status & SD_STATUS_UPDATED) ||
7613 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614 tg3_disable_ints(tp);
7615 return IRQ_RETVAL(1);
7617 return IRQ_RETVAL(0);
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7623 int i;
7624 struct tg3 *tp = netdev_priv(dev);
7626 if (tg3_irq_sync(tp))
7627 return;
7629 for (i = 0; i < tp->irq_cnt; i++)
7630 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7632 #endif
7634 static void tg3_tx_timeout(struct net_device *dev)
7636 struct tg3 *tp = netdev_priv(dev);
7638 if (netif_msg_tx_err(tp)) {
7639 netdev_err(dev, "transmit timed out, resetting\n");
7640 tg3_dump_state(tp);
7643 tg3_reset_task_schedule(tp);
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7649 u32 base = (u32) mapping & 0xffffffff;
7651 return base + len + 8 < base;
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655 * of any 4GB boundaries: 4G, 8G, etc
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658 u32 len, u32 mss)
7660 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661 u32 base = (u32) mapping & 0xffffffff;
7663 return ((base + len + (mss & 0x3fff)) < base);
7665 return 0;
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7670 int len)
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673 if (tg3_flag(tp, 40BIT_DMA_BUG))
7674 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7675 return 0;
7676 #else
7677 return 0;
7678 #endif
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682 dma_addr_t mapping, u32 len, u32 flags,
7683 u32 mss, u32 vlan)
7685 txbd->addr_hi = ((u64) mapping >> 32);
7686 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692 dma_addr_t map, u32 len, u32 flags,
7693 u32 mss, u32 vlan)
7695 struct tg3 *tp = tnapi->tp;
7696 bool hwbug = false;
7698 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7699 hwbug = true;
7701 if (tg3_4g_overflow_test(map, len))
7702 hwbug = true;
7704 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7705 hwbug = true;
7707 if (tg3_40bit_overflow_test(tp, map, len))
7708 hwbug = true;
7710 if (tp->dma_limit) {
7711 u32 prvidx = *entry;
7712 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713 while (len > tp->dma_limit && *budget) {
7714 u32 frag_len = tp->dma_limit;
7715 len -= tp->dma_limit;
7717 /* Avoid the 8byte DMA problem */
7718 if (len <= 8) {
7719 len += tp->dma_limit / 2;
7720 frag_len = tp->dma_limit / 2;
7723 tnapi->tx_buffers[*entry].fragmented = true;
7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726 frag_len, tmp_flag, mss, vlan);
7727 *budget -= 1;
7728 prvidx = *entry;
7729 *entry = NEXT_TX(*entry);
7731 map += frag_len;
7734 if (len) {
7735 if (*budget) {
7736 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737 len, flags, mss, vlan);
7738 *budget -= 1;
7739 *entry = NEXT_TX(*entry);
7740 } else {
7741 hwbug = true;
7742 tnapi->tx_buffers[prvidx].fragmented = false;
7745 } else {
7746 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747 len, flags, mss, vlan);
7748 *entry = NEXT_TX(*entry);
7751 return hwbug;
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7756 int i;
7757 struct sk_buff *skb;
7758 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7760 skb = txb->skb;
7761 txb->skb = NULL;
7763 pci_unmap_single(tnapi->tp->pdev,
7764 dma_unmap_addr(txb, mapping),
7765 skb_headlen(skb),
7766 PCI_DMA_TODEVICE);
7768 while (txb->fragmented) {
7769 txb->fragmented = false;
7770 entry = NEXT_TX(entry);
7771 txb = &tnapi->tx_buffers[entry];
7774 for (i = 0; i <= last; i++) {
7775 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7777 entry = NEXT_TX(entry);
7778 txb = &tnapi->tx_buffers[entry];
7780 pci_unmap_page(tnapi->tp->pdev,
7781 dma_unmap_addr(txb, mapping),
7782 skb_frag_size(frag), PCI_DMA_TODEVICE);
7784 while (txb->fragmented) {
7785 txb->fragmented = false;
7786 entry = NEXT_TX(entry);
7787 txb = &tnapi->tx_buffers[entry];
7792 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7793 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7794 struct sk_buff **pskb,
7795 u32 *entry, u32 *budget,
7796 u32 base_flags, u32 mss, u32 vlan)
7798 struct tg3 *tp = tnapi->tp;
7799 struct sk_buff *new_skb, *skb = *pskb;
7800 dma_addr_t new_addr = 0;
7801 int ret = 0;
7803 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7804 new_skb = skb_copy(skb, GFP_ATOMIC);
7805 else {
7806 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7808 new_skb = skb_copy_expand(skb,
7809 skb_headroom(skb) + more_headroom,
7810 skb_tailroom(skb), GFP_ATOMIC);
7813 if (!new_skb) {
7814 ret = -1;
7815 } else {
7816 /* New SKB is guaranteed to be linear. */
7817 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7818 PCI_DMA_TODEVICE);
7819 /* Make sure the mapping succeeded */
7820 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7821 dev_kfree_skb_any(new_skb);
7822 ret = -1;
7823 } else {
7824 u32 save_entry = *entry;
7826 base_flags |= TXD_FLAG_END;
7828 tnapi->tx_buffers[*entry].skb = new_skb;
7829 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7830 mapping, new_addr);
7832 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7833 new_skb->len, base_flags,
7834 mss, vlan)) {
7835 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7836 dev_kfree_skb_any(new_skb);
7837 ret = -1;
7842 dev_consume_skb_any(skb);
7843 *pskb = new_skb;
7844 return ret;
7847 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7849 /* Check if we will never have enough descriptors,
7850 * as gso_segs can be more than current ring size
7852 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7855 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7857 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7858 * indicated in tg3_tx_frag_set()
7860 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7861 struct netdev_queue *txq, struct sk_buff *skb)
7863 struct sk_buff *segs, *nskb;
7864 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7866 /* Estimate the number of fragments in the worst case */
7867 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7868 netif_tx_stop_queue(txq);
7870 /* netif_tx_stop_queue() must be done before checking
7871 * checking tx index in tg3_tx_avail() below, because in
7872 * tg3_tx(), we update tx index before checking for
7873 * netif_tx_queue_stopped().
7875 smp_mb();
7876 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7877 return NETDEV_TX_BUSY;
7879 netif_tx_wake_queue(txq);
7882 segs = skb_gso_segment(skb, tp->dev->features &
7883 ~(NETIF_F_TSO | NETIF_F_TSO6));
7884 if (IS_ERR(segs) || !segs)
7885 goto tg3_tso_bug_end;
7887 do {
7888 nskb = segs;
7889 segs = segs->next;
7890 nskb->next = NULL;
7891 tg3_start_xmit(nskb, tp->dev);
7892 } while (segs);
7894 tg3_tso_bug_end:
7895 dev_consume_skb_any(skb);
7897 return NETDEV_TX_OK;
7900 /* hard_start_xmit for all devices */
7901 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7903 struct tg3 *tp = netdev_priv(dev);
7904 u32 len, entry, base_flags, mss, vlan = 0;
7905 u32 budget;
7906 int i = -1, would_hit_hwbug;
7907 dma_addr_t mapping;
7908 struct tg3_napi *tnapi;
7909 struct netdev_queue *txq;
7910 unsigned int last;
7911 struct iphdr *iph = NULL;
7912 struct tcphdr *tcph = NULL;
7913 __sum16 tcp_csum = 0, ip_csum = 0;
7914 __be16 ip_tot_len = 0;
7916 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7917 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7918 if (tg3_flag(tp, ENABLE_TSS))
7919 tnapi++;
7921 budget = tg3_tx_avail(tnapi);
7923 /* We are running in BH disabled context with netif_tx_lock
7924 * and TX reclaim runs via tp->napi.poll inside of a software
7925 * interrupt. Furthermore, IRQ processing runs lockless so we have
7926 * no IRQ context deadlocks to worry about either. Rejoice!
7928 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7929 if (!netif_tx_queue_stopped(txq)) {
7930 netif_tx_stop_queue(txq);
7932 /* This is a hard error, log it. */
7933 netdev_err(dev,
7934 "BUG! Tx Ring full when queue awake!\n");
7936 return NETDEV_TX_BUSY;
7939 entry = tnapi->tx_prod;
7940 base_flags = 0;
7942 mss = skb_shinfo(skb)->gso_size;
7943 if (mss) {
7944 u32 tcp_opt_len, hdr_len;
7946 if (skb_cow_head(skb, 0))
7947 goto drop;
7949 iph = ip_hdr(skb);
7950 tcp_opt_len = tcp_optlen(skb);
7952 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7954 /* HW/FW can not correctly segment packets that have been
7955 * vlan encapsulated.
7957 if (skb->protocol == htons(ETH_P_8021Q) ||
7958 skb->protocol == htons(ETH_P_8021AD)) {
7959 if (tg3_tso_bug_gso_check(tnapi, skb))
7960 return tg3_tso_bug(tp, tnapi, txq, skb);
7961 goto drop;
7964 if (!skb_is_gso_v6(skb)) {
7965 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7966 tg3_flag(tp, TSO_BUG)) {
7967 if (tg3_tso_bug_gso_check(tnapi, skb))
7968 return tg3_tso_bug(tp, tnapi, txq, skb);
7969 goto drop;
7971 ip_csum = iph->check;
7972 ip_tot_len = iph->tot_len;
7973 iph->check = 0;
7974 iph->tot_len = htons(mss + hdr_len);
7977 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7978 TXD_FLAG_CPU_POST_DMA);
7980 tcph = tcp_hdr(skb);
7981 tcp_csum = tcph->check;
7983 if (tg3_flag(tp, HW_TSO_1) ||
7984 tg3_flag(tp, HW_TSO_2) ||
7985 tg3_flag(tp, HW_TSO_3)) {
7986 tcph->check = 0;
7987 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7988 } else {
7989 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7990 0, IPPROTO_TCP, 0);
7993 if (tg3_flag(tp, HW_TSO_3)) {
7994 mss |= (hdr_len & 0xc) << 12;
7995 if (hdr_len & 0x10)
7996 base_flags |= 0x00000010;
7997 base_flags |= (hdr_len & 0x3e0) << 5;
7998 } else if (tg3_flag(tp, HW_TSO_2))
7999 mss |= hdr_len << 9;
8000 else if (tg3_flag(tp, HW_TSO_1) ||
8001 tg3_asic_rev(tp) == ASIC_REV_5705) {
8002 if (tcp_opt_len || iph->ihl > 5) {
8003 int tsflags;
8005 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8006 mss |= (tsflags << 11);
8008 } else {
8009 if (tcp_opt_len || iph->ihl > 5) {
8010 int tsflags;
8012 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013 base_flags |= tsflags << 12;
8016 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8017 /* HW/FW can not correctly checksum packets that have been
8018 * vlan encapsulated.
8020 if (skb->protocol == htons(ETH_P_8021Q) ||
8021 skb->protocol == htons(ETH_P_8021AD)) {
8022 if (skb_checksum_help(skb))
8023 goto drop;
8024 } else {
8025 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8029 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8030 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8031 base_flags |= TXD_FLAG_JMB_PKT;
8033 if (skb_vlan_tag_present(skb)) {
8034 base_flags |= TXD_FLAG_VLAN;
8035 vlan = skb_vlan_tag_get(skb);
8038 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8039 tg3_flag(tp, TX_TSTAMP_EN)) {
8040 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8041 base_flags |= TXD_FLAG_HWTSTAMP;
8044 len = skb_headlen(skb);
8046 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8047 if (pci_dma_mapping_error(tp->pdev, mapping))
8048 goto drop;
8051 tnapi->tx_buffers[entry].skb = skb;
8052 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8054 would_hit_hwbug = 0;
8056 if (tg3_flag(tp, 5701_DMA_BUG))
8057 would_hit_hwbug = 1;
8059 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8060 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8061 mss, vlan)) {
8062 would_hit_hwbug = 1;
8063 } else if (skb_shinfo(skb)->nr_frags > 0) {
8064 u32 tmp_mss = mss;
8066 if (!tg3_flag(tp, HW_TSO_1) &&
8067 !tg3_flag(tp, HW_TSO_2) &&
8068 !tg3_flag(tp, HW_TSO_3))
8069 tmp_mss = 0;
8071 /* Now loop through additional data
8072 * fragments, and queue them.
8074 last = skb_shinfo(skb)->nr_frags - 1;
8075 for (i = 0; i <= last; i++) {
8076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8078 len = skb_frag_size(frag);
8079 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8080 len, DMA_TO_DEVICE);
8082 tnapi->tx_buffers[entry].skb = NULL;
8083 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8084 mapping);
8085 if (dma_mapping_error(&tp->pdev->dev, mapping))
8086 goto dma_error;
8088 if (!budget ||
8089 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8090 len, base_flags |
8091 ((i == last) ? TXD_FLAG_END : 0),
8092 tmp_mss, vlan)) {
8093 would_hit_hwbug = 1;
8094 break;
8099 if (would_hit_hwbug) {
8100 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8102 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8103 /* If it's a TSO packet, do GSO instead of
8104 * allocating and copying to a large linear SKB
8106 if (ip_tot_len) {
8107 iph->check = ip_csum;
8108 iph->tot_len = ip_tot_len;
8110 tcph->check = tcp_csum;
8111 return tg3_tso_bug(tp, tnapi, txq, skb);
8114 /* If the workaround fails due to memory/mapping
8115 * failure, silently drop this packet.
8117 entry = tnapi->tx_prod;
8118 budget = tg3_tx_avail(tnapi);
8119 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8120 base_flags, mss, vlan))
8121 goto drop_nofree;
8124 skb_tx_timestamp(skb);
8125 netdev_tx_sent_queue(txq, skb->len);
8127 /* Sync BD data before updating mailbox */
8128 wmb();
8130 tnapi->tx_prod = entry;
8131 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8132 netif_tx_stop_queue(txq);
8134 /* netif_tx_stop_queue() must be done before checking
8135 * checking tx index in tg3_tx_avail() below, because in
8136 * tg3_tx(), we update tx index before checking for
8137 * netif_tx_queue_stopped().
8139 smp_mb();
8140 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8141 netif_tx_wake_queue(txq);
8144 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8145 /* Packets are ready, update Tx producer idx on card. */
8146 tw32_tx_mbox(tnapi->prodmbox, entry);
8147 mmiowb();
8150 return NETDEV_TX_OK;
8152 dma_error:
8153 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8154 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8155 drop:
8156 dev_kfree_skb_any(skb);
8157 drop_nofree:
8158 tp->tx_dropped++;
8159 return NETDEV_TX_OK;
8162 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8164 if (enable) {
8165 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8166 MAC_MODE_PORT_MODE_MASK);
8168 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8170 if (!tg3_flag(tp, 5705_PLUS))
8171 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8173 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8174 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8175 else
8176 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8177 } else {
8178 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8180 if (tg3_flag(tp, 5705_PLUS) ||
8181 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8182 tg3_asic_rev(tp) == ASIC_REV_5700)
8183 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8186 tw32(MAC_MODE, tp->mac_mode);
8187 udelay(40);
8190 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8192 u32 val, bmcr, mac_mode, ptest = 0;
8194 tg3_phy_toggle_apd(tp, false);
8195 tg3_phy_toggle_automdix(tp, false);
8197 if (extlpbk && tg3_phy_set_extloopbk(tp))
8198 return -EIO;
8200 bmcr = BMCR_FULLDPLX;
8201 switch (speed) {
8202 case SPEED_10:
8203 break;
8204 case SPEED_100:
8205 bmcr |= BMCR_SPEED100;
8206 break;
8207 case SPEED_1000:
8208 default:
8209 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8210 speed = SPEED_100;
8211 bmcr |= BMCR_SPEED100;
8212 } else {
8213 speed = SPEED_1000;
8214 bmcr |= BMCR_SPEED1000;
8218 if (extlpbk) {
8219 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8220 tg3_readphy(tp, MII_CTRL1000, &val);
8221 val |= CTL1000_AS_MASTER |
8222 CTL1000_ENABLE_MASTER;
8223 tg3_writephy(tp, MII_CTRL1000, val);
8224 } else {
8225 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8226 MII_TG3_FET_PTEST_TRIM_2;
8227 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8229 } else
8230 bmcr |= BMCR_LOOPBACK;
8232 tg3_writephy(tp, MII_BMCR, bmcr);
8234 /* The write needs to be flushed for the FETs */
8235 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8236 tg3_readphy(tp, MII_BMCR, &bmcr);
8238 udelay(40);
8240 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8241 tg3_asic_rev(tp) == ASIC_REV_5785) {
8242 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8243 MII_TG3_FET_PTEST_FRC_TX_LINK |
8244 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8246 /* The write needs to be flushed for the AC131 */
8247 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8250 /* Reset to prevent losing 1st rx packet intermittently */
8251 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8252 tg3_flag(tp, 5780_CLASS)) {
8253 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8254 udelay(10);
8255 tw32_f(MAC_RX_MODE, tp->rx_mode);
8258 mac_mode = tp->mac_mode &
8259 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8260 if (speed == SPEED_1000)
8261 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8262 else
8263 mac_mode |= MAC_MODE_PORT_MODE_MII;
8265 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8266 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8268 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8269 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8270 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8271 mac_mode |= MAC_MODE_LINK_POLARITY;
8273 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8274 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8277 tw32(MAC_MODE, mac_mode);
8278 udelay(40);
8280 return 0;
8283 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8285 struct tg3 *tp = netdev_priv(dev);
8287 if (features & NETIF_F_LOOPBACK) {
8288 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8289 return;
8291 spin_lock_bh(&tp->lock);
8292 tg3_mac_loopback(tp, true);
8293 netif_carrier_on(tp->dev);
8294 spin_unlock_bh(&tp->lock);
8295 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8296 } else {
8297 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8298 return;
8300 spin_lock_bh(&tp->lock);
8301 tg3_mac_loopback(tp, false);
8302 /* Force link status check */
8303 tg3_setup_phy(tp, true);
8304 spin_unlock_bh(&tp->lock);
8305 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8309 static netdev_features_t tg3_fix_features(struct net_device *dev,
8310 netdev_features_t features)
8312 struct tg3 *tp = netdev_priv(dev);
8314 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8315 features &= ~NETIF_F_ALL_TSO;
8317 return features;
8320 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8322 netdev_features_t changed = dev->features ^ features;
8324 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8325 tg3_set_loopback(dev, features);
8327 return 0;
8330 static void tg3_rx_prodring_free(struct tg3 *tp,
8331 struct tg3_rx_prodring_set *tpr)
8333 int i;
8335 if (tpr != &tp->napi[0].prodring) {
8336 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8337 i = (i + 1) & tp->rx_std_ring_mask)
8338 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8339 tp->rx_pkt_map_sz);
8341 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8342 for (i = tpr->rx_jmb_cons_idx;
8343 i != tpr->rx_jmb_prod_idx;
8344 i = (i + 1) & tp->rx_jmb_ring_mask) {
8345 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8346 TG3_RX_JMB_MAP_SZ);
8350 return;
8353 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8354 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8355 tp->rx_pkt_map_sz);
8357 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8358 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8359 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8360 TG3_RX_JMB_MAP_SZ);
8364 /* Initialize rx rings for packet processing.
8366 * The chip has been shut down and the driver detached from
8367 * the networking, so no interrupts or new tx packets will
8368 * end up in the driver. tp->{tx,}lock are held and thus
8369 * we may not sleep.
8371 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8372 struct tg3_rx_prodring_set *tpr)
8374 u32 i, rx_pkt_dma_sz;
8376 tpr->rx_std_cons_idx = 0;
8377 tpr->rx_std_prod_idx = 0;
8378 tpr->rx_jmb_cons_idx = 0;
8379 tpr->rx_jmb_prod_idx = 0;
8381 if (tpr != &tp->napi[0].prodring) {
8382 memset(&tpr->rx_std_buffers[0], 0,
8383 TG3_RX_STD_BUFF_RING_SIZE(tp));
8384 if (tpr->rx_jmb_buffers)
8385 memset(&tpr->rx_jmb_buffers[0], 0,
8386 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8387 goto done;
8390 /* Zero out all descriptors. */
8391 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8393 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8394 if (tg3_flag(tp, 5780_CLASS) &&
8395 tp->dev->mtu > ETH_DATA_LEN)
8396 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8397 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8399 /* Initialize invariants of the rings, we only set this
8400 * stuff once. This works because the card does not
8401 * write into the rx buffer posting rings.
8403 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8404 struct tg3_rx_buffer_desc *rxd;
8406 rxd = &tpr->rx_std[i];
8407 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8408 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8409 rxd->opaque = (RXD_OPAQUE_RING_STD |
8410 (i << RXD_OPAQUE_INDEX_SHIFT));
8413 /* Now allocate fresh SKBs for each rx ring. */
8414 for (i = 0; i < tp->rx_pending; i++) {
8415 unsigned int frag_size;
8417 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8418 &frag_size) < 0) {
8419 netdev_warn(tp->dev,
8420 "Using a smaller RX standard ring. Only "
8421 "%d out of %d buffers were allocated "
8422 "successfully\n", i, tp->rx_pending);
8423 if (i == 0)
8424 goto initfail;
8425 tp->rx_pending = i;
8426 break;
8430 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8431 goto done;
8433 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8435 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8436 goto done;
8438 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8439 struct tg3_rx_buffer_desc *rxd;
8441 rxd = &tpr->rx_jmb[i].std;
8442 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8443 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8444 RXD_FLAG_JUMBO;
8445 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8446 (i << RXD_OPAQUE_INDEX_SHIFT));
8449 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8450 unsigned int frag_size;
8452 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8453 &frag_size) < 0) {
8454 netdev_warn(tp->dev,
8455 "Using a smaller RX jumbo ring. Only %d "
8456 "out of %d buffers were allocated "
8457 "successfully\n", i, tp->rx_jumbo_pending);
8458 if (i == 0)
8459 goto initfail;
8460 tp->rx_jumbo_pending = i;
8461 break;
8465 done:
8466 return 0;
8468 initfail:
8469 tg3_rx_prodring_free(tp, tpr);
8470 return -ENOMEM;
8473 static void tg3_rx_prodring_fini(struct tg3 *tp,
8474 struct tg3_rx_prodring_set *tpr)
8476 kfree(tpr->rx_std_buffers);
8477 tpr->rx_std_buffers = NULL;
8478 kfree(tpr->rx_jmb_buffers);
8479 tpr->rx_jmb_buffers = NULL;
8480 if (tpr->rx_std) {
8481 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8482 tpr->rx_std, tpr->rx_std_mapping);
8483 tpr->rx_std = NULL;
8485 if (tpr->rx_jmb) {
8486 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8487 tpr->rx_jmb, tpr->rx_jmb_mapping);
8488 tpr->rx_jmb = NULL;
8492 static int tg3_rx_prodring_init(struct tg3 *tp,
8493 struct tg3_rx_prodring_set *tpr)
8495 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8496 GFP_KERNEL);
8497 if (!tpr->rx_std_buffers)
8498 return -ENOMEM;
8500 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_STD_RING_BYTES(tp),
8502 &tpr->rx_std_mapping,
8503 GFP_KERNEL);
8504 if (!tpr->rx_std)
8505 goto err_out;
8507 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8508 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8509 GFP_KERNEL);
8510 if (!tpr->rx_jmb_buffers)
8511 goto err_out;
8513 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8514 TG3_RX_JMB_RING_BYTES(tp),
8515 &tpr->rx_jmb_mapping,
8516 GFP_KERNEL);
8517 if (!tpr->rx_jmb)
8518 goto err_out;
8521 return 0;
8523 err_out:
8524 tg3_rx_prodring_fini(tp, tpr);
8525 return -ENOMEM;
8528 /* Free up pending packets in all rx/tx rings.
8530 * The chip has been shut down and the driver detached from
8531 * the networking, so no interrupts or new tx packets will
8532 * end up in the driver. tp->{tx,}lock is not held and we are not
8533 * in an interrupt context and thus may sleep.
8535 static void tg3_free_rings(struct tg3 *tp)
8537 int i, j;
8539 for (j = 0; j < tp->irq_cnt; j++) {
8540 struct tg3_napi *tnapi = &tp->napi[j];
8542 tg3_rx_prodring_free(tp, &tnapi->prodring);
8544 if (!tnapi->tx_buffers)
8545 continue;
8547 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8548 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8550 if (!skb)
8551 continue;
8553 tg3_tx_skb_unmap(tnapi, i,
8554 skb_shinfo(skb)->nr_frags - 1);
8556 dev_consume_skb_any(skb);
8558 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8562 /* Initialize tx/rx rings for packet processing.
8564 * The chip has been shut down and the driver detached from
8565 * the networking, so no interrupts or new tx packets will
8566 * end up in the driver. tp->{tx,}lock are held and thus
8567 * we may not sleep.
8569 static int tg3_init_rings(struct tg3 *tp)
8571 int i;
8573 /* Free up all the SKBs. */
8574 tg3_free_rings(tp);
8576 for (i = 0; i < tp->irq_cnt; i++) {
8577 struct tg3_napi *tnapi = &tp->napi[i];
8579 tnapi->last_tag = 0;
8580 tnapi->last_irq_tag = 0;
8581 tnapi->hw_status->status = 0;
8582 tnapi->hw_status->status_tag = 0;
8583 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8585 tnapi->tx_prod = 0;
8586 tnapi->tx_cons = 0;
8587 if (tnapi->tx_ring)
8588 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8590 tnapi->rx_rcb_ptr = 0;
8591 if (tnapi->rx_rcb)
8592 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8594 if (tnapi->prodring.rx_std &&
8595 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8596 tg3_free_rings(tp);
8597 return -ENOMEM;
8601 return 0;
8604 static void tg3_mem_tx_release(struct tg3 *tp)
8606 int i;
8608 for (i = 0; i < tp->irq_max; i++) {
8609 struct tg3_napi *tnapi = &tp->napi[i];
8611 if (tnapi->tx_ring) {
8612 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8613 tnapi->tx_ring, tnapi->tx_desc_mapping);
8614 tnapi->tx_ring = NULL;
8617 kfree(tnapi->tx_buffers);
8618 tnapi->tx_buffers = NULL;
8622 static int tg3_mem_tx_acquire(struct tg3 *tp)
8624 int i;
8625 struct tg3_napi *tnapi = &tp->napi[0];
8627 /* If multivector TSS is enabled, vector 0 does not handle
8628 * tx interrupts. Don't allocate any resources for it.
8630 if (tg3_flag(tp, ENABLE_TSS))
8631 tnapi++;
8633 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8634 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8635 TG3_TX_RING_SIZE, GFP_KERNEL);
8636 if (!tnapi->tx_buffers)
8637 goto err_out;
8639 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8640 TG3_TX_RING_BYTES,
8641 &tnapi->tx_desc_mapping,
8642 GFP_KERNEL);
8643 if (!tnapi->tx_ring)
8644 goto err_out;
8647 return 0;
8649 err_out:
8650 tg3_mem_tx_release(tp);
8651 return -ENOMEM;
8654 static void tg3_mem_rx_release(struct tg3 *tp)
8656 int i;
8658 for (i = 0; i < tp->irq_max; i++) {
8659 struct tg3_napi *tnapi = &tp->napi[i];
8661 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8663 if (!tnapi->rx_rcb)
8664 continue;
8666 dma_free_coherent(&tp->pdev->dev,
8667 TG3_RX_RCB_RING_BYTES(tp),
8668 tnapi->rx_rcb,
8669 tnapi->rx_rcb_mapping);
8670 tnapi->rx_rcb = NULL;
8674 static int tg3_mem_rx_acquire(struct tg3 *tp)
8676 unsigned int i, limit;
8678 limit = tp->rxq_cnt;
8680 /* If RSS is enabled, we need a (dummy) producer ring
8681 * set on vector zero. This is the true hw prodring.
8683 if (tg3_flag(tp, ENABLE_RSS))
8684 limit++;
8686 for (i = 0; i < limit; i++) {
8687 struct tg3_napi *tnapi = &tp->napi[i];
8689 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8690 goto err_out;
8692 /* If multivector RSS is enabled, vector 0
8693 * does not handle rx or tx interrupts.
8694 * Don't allocate any resources for it.
8696 if (!i && tg3_flag(tp, ENABLE_RSS))
8697 continue;
8699 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8700 TG3_RX_RCB_RING_BYTES(tp),
8701 &tnapi->rx_rcb_mapping,
8702 GFP_KERNEL);
8703 if (!tnapi->rx_rcb)
8704 goto err_out;
8707 return 0;
8709 err_out:
8710 tg3_mem_rx_release(tp);
8711 return -ENOMEM;
8715 * Must not be invoked with interrupt sources disabled and
8716 * the hardware shutdown down.
8718 static void tg3_free_consistent(struct tg3 *tp)
8720 int i;
8722 for (i = 0; i < tp->irq_cnt; i++) {
8723 struct tg3_napi *tnapi = &tp->napi[i];
8725 if (tnapi->hw_status) {
8726 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8727 tnapi->hw_status,
8728 tnapi->status_mapping);
8729 tnapi->hw_status = NULL;
8733 tg3_mem_rx_release(tp);
8734 tg3_mem_tx_release(tp);
8736 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8737 tg3_full_lock(tp, 0);
8738 if (tp->hw_stats) {
8739 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8740 tp->hw_stats, tp->stats_mapping);
8741 tp->hw_stats = NULL;
8743 tg3_full_unlock(tp);
8747 * Must not be invoked with interrupt sources disabled and
8748 * the hardware shutdown down. Can sleep.
8750 static int tg3_alloc_consistent(struct tg3 *tp)
8752 int i;
8754 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8755 sizeof(struct tg3_hw_stats),
8756 &tp->stats_mapping, GFP_KERNEL);
8757 if (!tp->hw_stats)
8758 goto err_out;
8760 for (i = 0; i < tp->irq_cnt; i++) {
8761 struct tg3_napi *tnapi = &tp->napi[i];
8762 struct tg3_hw_status *sblk;
8764 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8765 TG3_HW_STATUS_SIZE,
8766 &tnapi->status_mapping,
8767 GFP_KERNEL);
8768 if (!tnapi->hw_status)
8769 goto err_out;
8771 sblk = tnapi->hw_status;
8773 if (tg3_flag(tp, ENABLE_RSS)) {
8774 u16 *prodptr = NULL;
8777 * When RSS is enabled, the status block format changes
8778 * slightly. The "rx_jumbo_consumer", "reserved",
8779 * and "rx_mini_consumer" members get mapped to the
8780 * other three rx return ring producer indexes.
8782 switch (i) {
8783 case 1:
8784 prodptr = &sblk->idx[0].rx_producer;
8785 break;
8786 case 2:
8787 prodptr = &sblk->rx_jumbo_consumer;
8788 break;
8789 case 3:
8790 prodptr = &sblk->reserved;
8791 break;
8792 case 4:
8793 prodptr = &sblk->rx_mini_consumer;
8794 break;
8796 tnapi->rx_rcb_prod_idx = prodptr;
8797 } else {
8798 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8802 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8803 goto err_out;
8805 return 0;
8807 err_out:
8808 tg3_free_consistent(tp);
8809 return -ENOMEM;
8812 #define MAX_WAIT_CNT 1000
8814 /* To stop a block, clear the enable bit and poll till it
8815 * clears. tp->lock is held.
8817 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8819 unsigned int i;
8820 u32 val;
8822 if (tg3_flag(tp, 5705_PLUS)) {
8823 switch (ofs) {
8824 case RCVLSC_MODE:
8825 case DMAC_MODE:
8826 case MBFREE_MODE:
8827 case BUFMGR_MODE:
8828 case MEMARB_MODE:
8829 /* We can't enable/disable these bits of the
8830 * 5705/5750, just say success.
8832 return 0;
8834 default:
8835 break;
8839 val = tr32(ofs);
8840 val &= ~enable_bit;
8841 tw32_f(ofs, val);
8843 for (i = 0; i < MAX_WAIT_CNT; i++) {
8844 if (pci_channel_offline(tp->pdev)) {
8845 dev_err(&tp->pdev->dev,
8846 "tg3_stop_block device offline, "
8847 "ofs=%lx enable_bit=%x\n",
8848 ofs, enable_bit);
8849 return -ENODEV;
8852 udelay(100);
8853 val = tr32(ofs);
8854 if ((val & enable_bit) == 0)
8855 break;
8858 if (i == MAX_WAIT_CNT && !silent) {
8859 dev_err(&tp->pdev->dev,
8860 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8861 ofs, enable_bit);
8862 return -ENODEV;
8865 return 0;
8868 /* tp->lock is held. */
8869 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8871 int i, err;
8873 tg3_disable_ints(tp);
8875 if (pci_channel_offline(tp->pdev)) {
8876 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8877 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8878 err = -ENODEV;
8879 goto err_no_dev;
8882 tp->rx_mode &= ~RX_MODE_ENABLE;
8883 tw32_f(MAC_RX_MODE, tp->rx_mode);
8884 udelay(10);
8886 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8901 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8902 tw32_f(MAC_MODE, tp->mac_mode);
8903 udelay(40);
8905 tp->tx_mode &= ~TX_MODE_ENABLE;
8906 tw32_f(MAC_TX_MODE, tp->tx_mode);
8908 for (i = 0; i < MAX_WAIT_CNT; i++) {
8909 udelay(100);
8910 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8911 break;
8913 if (i >= MAX_WAIT_CNT) {
8914 dev_err(&tp->pdev->dev,
8915 "%s timed out, TX_MODE_ENABLE will not clear "
8916 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8917 err |= -ENODEV;
8920 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8921 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8922 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8924 tw32(FTQ_RESET, 0xffffffff);
8925 tw32(FTQ_RESET, 0x00000000);
8927 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8928 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8930 err_no_dev:
8931 for (i = 0; i < tp->irq_cnt; i++) {
8932 struct tg3_napi *tnapi = &tp->napi[i];
8933 if (tnapi->hw_status)
8934 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8937 return err;
8940 /* Save PCI command register before chip reset */
8941 static void tg3_save_pci_state(struct tg3 *tp)
8943 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8946 /* Restore PCI state after chip reset */
8947 static void tg3_restore_pci_state(struct tg3 *tp)
8949 u32 val;
8951 /* Re-enable indirect register accesses. */
8952 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8953 tp->misc_host_ctrl);
8955 /* Set MAX PCI retry to zero. */
8956 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8957 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8958 tg3_flag(tp, PCIX_MODE))
8959 val |= PCISTATE_RETRY_SAME_DMA;
8960 /* Allow reads and writes to the APE register and memory space. */
8961 if (tg3_flag(tp, ENABLE_APE))
8962 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8963 PCISTATE_ALLOW_APE_SHMEM_WR |
8964 PCISTATE_ALLOW_APE_PSPACE_WR;
8965 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8967 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8969 if (!tg3_flag(tp, PCI_EXPRESS)) {
8970 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8971 tp->pci_cacheline_sz);
8972 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8973 tp->pci_lat_timer);
8976 /* Make sure PCI-X relaxed ordering bit is clear. */
8977 if (tg3_flag(tp, PCIX_MODE)) {
8978 u16 pcix_cmd;
8980 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8981 &pcix_cmd);
8982 pcix_cmd &= ~PCI_X_CMD_ERO;
8983 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8984 pcix_cmd);
8987 if (tg3_flag(tp, 5780_CLASS)) {
8989 /* Chip reset on 5780 will reset MSI enable bit,
8990 * so need to restore it.
8992 if (tg3_flag(tp, USING_MSI)) {
8993 u16 ctrl;
8995 pci_read_config_word(tp->pdev,
8996 tp->msi_cap + PCI_MSI_FLAGS,
8997 &ctrl);
8998 pci_write_config_word(tp->pdev,
8999 tp->msi_cap + PCI_MSI_FLAGS,
9000 ctrl | PCI_MSI_FLAGS_ENABLE);
9001 val = tr32(MSGINT_MODE);
9002 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9007 static void tg3_override_clk(struct tg3 *tp)
9009 u32 val;
9011 switch (tg3_asic_rev(tp)) {
9012 case ASIC_REV_5717:
9013 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9014 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9015 TG3_CPMU_MAC_ORIDE_ENABLE);
9016 break;
9018 case ASIC_REV_5719:
9019 case ASIC_REV_5720:
9020 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9021 break;
9023 default:
9024 return;
9028 static void tg3_restore_clk(struct tg3 *tp)
9030 u32 val;
9032 switch (tg3_asic_rev(tp)) {
9033 case ASIC_REV_5717:
9034 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9035 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9036 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9037 break;
9039 case ASIC_REV_5719:
9040 case ASIC_REV_5720:
9041 val = tr32(TG3_CPMU_CLCK_ORIDE);
9042 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9043 break;
9045 default:
9046 return;
9050 /* tp->lock is held. */
9051 static int tg3_chip_reset(struct tg3 *tp)
9052 __releases(tp->lock)
9053 __acquires(tp->lock)
9055 u32 val;
9056 void (*write_op)(struct tg3 *, u32, u32);
9057 int i, err;
9059 if (!pci_device_is_present(tp->pdev))
9060 return -ENODEV;
9062 tg3_nvram_lock(tp);
9064 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9066 /* No matching tg3_nvram_unlock() after this because
9067 * chip reset below will undo the nvram lock.
9069 tp->nvram_lock_cnt = 0;
9071 /* GRC_MISC_CFG core clock reset will clear the memory
9072 * enable bit in PCI register 4 and the MSI enable bit
9073 * on some chips, so we save relevant registers here.
9075 tg3_save_pci_state(tp);
9077 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9078 tg3_flag(tp, 5755_PLUS))
9079 tw32(GRC_FASTBOOT_PC, 0);
9082 * We must avoid the readl() that normally takes place.
9083 * It locks machines, causes machine checks, and other
9084 * fun things. So, temporarily disable the 5701
9085 * hardware workaround, while we do the reset.
9087 write_op = tp->write32;
9088 if (write_op == tg3_write_flush_reg32)
9089 tp->write32 = tg3_write32;
9091 /* Prevent the irq handler from reading or writing PCI registers
9092 * during chip reset when the memory enable bit in the PCI command
9093 * register may be cleared. The chip does not generate interrupt
9094 * at this time, but the irq handler may still be called due to irq
9095 * sharing or irqpoll.
9097 tg3_flag_set(tp, CHIP_RESETTING);
9098 for (i = 0; i < tp->irq_cnt; i++) {
9099 struct tg3_napi *tnapi = &tp->napi[i];
9100 if (tnapi->hw_status) {
9101 tnapi->hw_status->status = 0;
9102 tnapi->hw_status->status_tag = 0;
9104 tnapi->last_tag = 0;
9105 tnapi->last_irq_tag = 0;
9107 smp_mb();
9109 tg3_full_unlock(tp);
9111 for (i = 0; i < tp->irq_cnt; i++)
9112 synchronize_irq(tp->napi[i].irq_vec);
9114 tg3_full_lock(tp, 0);
9116 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9117 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9118 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9121 /* do the reset */
9122 val = GRC_MISC_CFG_CORECLK_RESET;
9124 if (tg3_flag(tp, PCI_EXPRESS)) {
9125 /* Force PCIe 1.0a mode */
9126 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9127 !tg3_flag(tp, 57765_PLUS) &&
9128 tr32(TG3_PCIE_PHY_TSTCTL) ==
9129 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9130 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9132 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9133 tw32(GRC_MISC_CFG, (1 << 29));
9134 val |= (1 << 29);
9138 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9139 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9140 tw32(GRC_VCPU_EXT_CTRL,
9141 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9144 /* Set the clock to the highest frequency to avoid timeouts. With link
9145 * aware mode, the clock speed could be slow and bootcode does not
9146 * complete within the expected time. Override the clock to allow the
9147 * bootcode to finish sooner and then restore it.
9149 tg3_override_clk(tp);
9151 /* Manage gphy power for all CPMU absent PCIe devices. */
9152 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9153 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9155 tw32(GRC_MISC_CFG, val);
9157 /* restore 5701 hardware bug workaround write method */
9158 tp->write32 = write_op;
9160 /* Unfortunately, we have to delay before the PCI read back.
9161 * Some 575X chips even will not respond to a PCI cfg access
9162 * when the reset command is given to the chip.
9164 * How do these hardware designers expect things to work
9165 * properly if the PCI write is posted for a long period
9166 * of time? It is always necessary to have some method by
9167 * which a register read back can occur to push the write
9168 * out which does the reset.
9170 * For most tg3 variants the trick below was working.
9171 * Ho hum...
9173 udelay(120);
9175 /* Flush PCI posted writes. The normal MMIO registers
9176 * are inaccessible at this time so this is the only
9177 * way to make this reliably (actually, this is no longer
9178 * the case, see above). I tried to use indirect
9179 * register read/write but this upset some 5701 variants.
9181 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9183 udelay(120);
9185 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9186 u16 val16;
9188 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9189 int j;
9190 u32 cfg_val;
9192 /* Wait for link training to complete. */
9193 for (j = 0; j < 5000; j++)
9194 udelay(100);
9196 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9197 pci_write_config_dword(tp->pdev, 0xc4,
9198 cfg_val | (1 << 15));
9201 /* Clear the "no snoop" and "relaxed ordering" bits. */
9202 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9204 * Older PCIe devices only support the 128 byte
9205 * MPS setting. Enforce the restriction.
9207 if (!tg3_flag(tp, CPMU_PRESENT))
9208 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9209 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9211 /* Clear error status */
9212 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9213 PCI_EXP_DEVSTA_CED |
9214 PCI_EXP_DEVSTA_NFED |
9215 PCI_EXP_DEVSTA_FED |
9216 PCI_EXP_DEVSTA_URD);
9219 tg3_restore_pci_state(tp);
9221 tg3_flag_clear(tp, CHIP_RESETTING);
9222 tg3_flag_clear(tp, ERROR_PROCESSED);
9224 val = 0;
9225 if (tg3_flag(tp, 5780_CLASS))
9226 val = tr32(MEMARB_MODE);
9227 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9229 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9230 tg3_stop_fw(tp);
9231 tw32(0x5000, 0x400);
9234 if (tg3_flag(tp, IS_SSB_CORE)) {
9236 * BCM4785: In order to avoid repercussions from using
9237 * potentially defective internal ROM, stop the Rx RISC CPU,
9238 * which is not required.
9240 tg3_stop_fw(tp);
9241 tg3_halt_cpu(tp, RX_CPU_BASE);
9244 err = tg3_poll_fw(tp);
9245 if (err)
9246 return err;
9248 tw32(GRC_MODE, tp->grc_mode);
9250 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9251 val = tr32(0xc4);
9253 tw32(0xc4, val | (1 << 15));
9256 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9257 tg3_asic_rev(tp) == ASIC_REV_5705) {
9258 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9259 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9260 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9261 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9264 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9265 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9266 val = tp->mac_mode;
9267 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9268 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9269 val = tp->mac_mode;
9270 } else
9271 val = 0;
9273 tw32_f(MAC_MODE, val);
9274 udelay(40);
9276 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9278 tg3_mdio_start(tp);
9280 if (tg3_flag(tp, PCI_EXPRESS) &&
9281 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9282 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9283 !tg3_flag(tp, 57765_PLUS)) {
9284 val = tr32(0x7c00);
9286 tw32(0x7c00, val | (1 << 25));
9289 tg3_restore_clk(tp);
9291 /* Reprobe ASF enable state. */
9292 tg3_flag_clear(tp, ENABLE_ASF);
9293 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9294 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9296 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9297 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9298 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9299 u32 nic_cfg;
9301 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9302 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9303 tg3_flag_set(tp, ENABLE_ASF);
9304 tp->last_event_jiffies = jiffies;
9305 if (tg3_flag(tp, 5750_PLUS))
9306 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9309 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9310 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9311 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9312 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9316 return 0;
9319 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9320 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9321 static void __tg3_set_rx_mode(struct net_device *);
9323 /* tp->lock is held. */
9324 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9326 int err;
9328 tg3_stop_fw(tp);
9330 tg3_write_sig_pre_reset(tp, kind);
9332 tg3_abort_hw(tp, silent);
9333 err = tg3_chip_reset(tp);
9335 __tg3_set_mac_addr(tp, false);
9337 tg3_write_sig_legacy(tp, kind);
9338 tg3_write_sig_post_reset(tp, kind);
9340 if (tp->hw_stats) {
9341 /* Save the stats across chip resets... */
9342 tg3_get_nstats(tp, &tp->net_stats_prev);
9343 tg3_get_estats(tp, &tp->estats_prev);
9345 /* And make sure the next sample is new data */
9346 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9349 return err;
9352 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9354 struct tg3 *tp = netdev_priv(dev);
9355 struct sockaddr *addr = p;
9356 int err = 0;
9357 bool skip_mac_1 = false;
9359 if (!is_valid_ether_addr(addr->sa_data))
9360 return -EADDRNOTAVAIL;
9362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9364 if (!netif_running(dev))
9365 return 0;
9367 if (tg3_flag(tp, ENABLE_ASF)) {
9368 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9370 addr0_high = tr32(MAC_ADDR_0_HIGH);
9371 addr0_low = tr32(MAC_ADDR_0_LOW);
9372 addr1_high = tr32(MAC_ADDR_1_HIGH);
9373 addr1_low = tr32(MAC_ADDR_1_LOW);
9375 /* Skip MAC addr 1 if ASF is using it. */
9376 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9377 !(addr1_high == 0 && addr1_low == 0))
9378 skip_mac_1 = true;
9380 spin_lock_bh(&tp->lock);
9381 __tg3_set_mac_addr(tp, skip_mac_1);
9382 __tg3_set_rx_mode(dev);
9383 spin_unlock_bh(&tp->lock);
9385 return err;
9388 /* tp->lock is held. */
9389 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9390 dma_addr_t mapping, u32 maxlen_flags,
9391 u32 nic_addr)
9393 tg3_write_mem(tp,
9394 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9395 ((u64) mapping >> 32));
9396 tg3_write_mem(tp,
9397 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9398 ((u64) mapping & 0xffffffff));
9399 tg3_write_mem(tp,
9400 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9401 maxlen_flags);
9403 if (!tg3_flag(tp, 5705_PLUS))
9404 tg3_write_mem(tp,
9405 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9406 nic_addr);
9410 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9412 int i = 0;
9414 if (!tg3_flag(tp, ENABLE_TSS)) {
9415 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9416 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9417 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9418 } else {
9419 tw32(HOSTCC_TXCOL_TICKS, 0);
9420 tw32(HOSTCC_TXMAX_FRAMES, 0);
9421 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9423 for (; i < tp->txq_cnt; i++) {
9424 u32 reg;
9426 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9427 tw32(reg, ec->tx_coalesce_usecs);
9428 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9429 tw32(reg, ec->tx_max_coalesced_frames);
9430 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9431 tw32(reg, ec->tx_max_coalesced_frames_irq);
9435 for (; i < tp->irq_max - 1; i++) {
9436 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9437 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9438 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9442 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9444 int i = 0;
9445 u32 limit = tp->rxq_cnt;
9447 if (!tg3_flag(tp, ENABLE_RSS)) {
9448 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9449 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9450 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9451 limit--;
9452 } else {
9453 tw32(HOSTCC_RXCOL_TICKS, 0);
9454 tw32(HOSTCC_RXMAX_FRAMES, 0);
9455 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9458 for (; i < limit; i++) {
9459 u32 reg;
9461 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9462 tw32(reg, ec->rx_coalesce_usecs);
9463 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9464 tw32(reg, ec->rx_max_coalesced_frames);
9465 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9466 tw32(reg, ec->rx_max_coalesced_frames_irq);
9469 for (; i < tp->irq_max - 1; i++) {
9470 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9471 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9472 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9476 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9478 tg3_coal_tx_init(tp, ec);
9479 tg3_coal_rx_init(tp, ec);
9481 if (!tg3_flag(tp, 5705_PLUS)) {
9482 u32 val = ec->stats_block_coalesce_usecs;
9484 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9485 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9487 if (!tp->link_up)
9488 val = 0;
9490 tw32(HOSTCC_STAT_COAL_TICKS, val);
9494 /* tp->lock is held. */
9495 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9497 u32 txrcb, limit;
9499 /* Disable all transmit rings but the first. */
9500 if (!tg3_flag(tp, 5705_PLUS))
9501 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9502 else if (tg3_flag(tp, 5717_PLUS))
9503 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9504 else if (tg3_flag(tp, 57765_CLASS) ||
9505 tg3_asic_rev(tp) == ASIC_REV_5762)
9506 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9507 else
9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9510 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9512 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9513 BDINFO_FLAGS_DISABLED);
9516 /* tp->lock is held. */
9517 static void tg3_tx_rcbs_init(struct tg3 *tp)
9519 int i = 0;
9520 u32 txrcb = NIC_SRAM_SEND_RCB;
9522 if (tg3_flag(tp, ENABLE_TSS))
9523 i++;
9525 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9526 struct tg3_napi *tnapi = &tp->napi[i];
9528 if (!tnapi->tx_ring)
9529 continue;
9531 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9532 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9533 NIC_SRAM_TX_BUFFER_DESC);
9537 /* tp->lock is held. */
9538 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9540 u32 rxrcb, limit;
9542 /* Disable all receive return rings but the first. */
9543 if (tg3_flag(tp, 5717_PLUS))
9544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9545 else if (!tg3_flag(tp, 5705_PLUS))
9546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9547 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9548 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9549 tg3_flag(tp, 57765_CLASS))
9550 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9551 else
9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9554 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9556 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9557 BDINFO_FLAGS_DISABLED);
9560 /* tp->lock is held. */
9561 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9563 int i = 0;
9564 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9566 if (tg3_flag(tp, ENABLE_RSS))
9567 i++;
9569 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9570 struct tg3_napi *tnapi = &tp->napi[i];
9572 if (!tnapi->rx_rcb)
9573 continue;
9575 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9576 (tp->rx_ret_ring_mask + 1) <<
9577 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9581 /* tp->lock is held. */
9582 static void tg3_rings_reset(struct tg3 *tp)
9584 int i;
9585 u32 stblk;
9586 struct tg3_napi *tnapi = &tp->napi[0];
9588 tg3_tx_rcbs_disable(tp);
9590 tg3_rx_ret_rcbs_disable(tp);
9592 /* Disable interrupts */
9593 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9594 tp->napi[0].chk_msi_cnt = 0;
9595 tp->napi[0].last_rx_cons = 0;
9596 tp->napi[0].last_tx_cons = 0;
9598 /* Zero mailbox registers. */
9599 if (tg3_flag(tp, SUPPORT_MSIX)) {
9600 for (i = 1; i < tp->irq_max; i++) {
9601 tp->napi[i].tx_prod = 0;
9602 tp->napi[i].tx_cons = 0;
9603 if (tg3_flag(tp, ENABLE_TSS))
9604 tw32_mailbox(tp->napi[i].prodmbox, 0);
9605 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9606 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9607 tp->napi[i].chk_msi_cnt = 0;
9608 tp->napi[i].last_rx_cons = 0;
9609 tp->napi[i].last_tx_cons = 0;
9611 if (!tg3_flag(tp, ENABLE_TSS))
9612 tw32_mailbox(tp->napi[0].prodmbox, 0);
9613 } else {
9614 tp->napi[0].tx_prod = 0;
9615 tp->napi[0].tx_cons = 0;
9616 tw32_mailbox(tp->napi[0].prodmbox, 0);
9617 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9620 /* Make sure the NIC-based send BD rings are disabled. */
9621 if (!tg3_flag(tp, 5705_PLUS)) {
9622 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9623 for (i = 0; i < 16; i++)
9624 tw32_tx_mbox(mbox + i * 8, 0);
9627 /* Clear status block in ram. */
9628 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9630 /* Set status block DMA address */
9631 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9632 ((u64) tnapi->status_mapping >> 32));
9633 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9634 ((u64) tnapi->status_mapping & 0xffffffff));
9636 stblk = HOSTCC_STATBLCK_RING1;
9638 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9639 u64 mapping = (u64)tnapi->status_mapping;
9640 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9641 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9642 stblk += 8;
9644 /* Clear status block in ram. */
9645 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9648 tg3_tx_rcbs_init(tp);
9649 tg3_rx_ret_rcbs_init(tp);
9652 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9654 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9656 if (!tg3_flag(tp, 5750_PLUS) ||
9657 tg3_flag(tp, 5780_CLASS) ||
9658 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9659 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9660 tg3_flag(tp, 57765_PLUS))
9661 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9662 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9663 tg3_asic_rev(tp) == ASIC_REV_5787)
9664 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9665 else
9666 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9668 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9669 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9671 val = min(nic_rep_thresh, host_rep_thresh);
9672 tw32(RCVBDI_STD_THRESH, val);
9674 if (tg3_flag(tp, 57765_PLUS))
9675 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9677 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9678 return;
9680 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9682 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9684 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9685 tw32(RCVBDI_JUMBO_THRESH, val);
9687 if (tg3_flag(tp, 57765_PLUS))
9688 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9691 static inline u32 calc_crc(unsigned char *buf, int len)
9693 u32 reg;
9694 u32 tmp;
9695 int j, k;
9697 reg = 0xffffffff;
9699 for (j = 0; j < len; j++) {
9700 reg ^= buf[j];
9702 for (k = 0; k < 8; k++) {
9703 tmp = reg & 0x01;
9705 reg >>= 1;
9707 if (tmp)
9708 reg ^= 0xedb88320;
9712 return ~reg;
9715 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9717 /* accept or reject all multicast frames */
9718 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9719 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9720 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9724 static void __tg3_set_rx_mode(struct net_device *dev)
9726 struct tg3 *tp = netdev_priv(dev);
9727 u32 rx_mode;
9729 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9730 RX_MODE_KEEP_VLAN_TAG);
9732 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9733 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9734 * flag clear.
9736 if (!tg3_flag(tp, ENABLE_ASF))
9737 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9738 #endif
9740 if (dev->flags & IFF_PROMISC) {
9741 /* Promiscuous mode. */
9742 rx_mode |= RX_MODE_PROMISC;
9743 } else if (dev->flags & IFF_ALLMULTI) {
9744 /* Accept all multicast. */
9745 tg3_set_multi(tp, 1);
9746 } else if (netdev_mc_empty(dev)) {
9747 /* Reject all multicast. */
9748 tg3_set_multi(tp, 0);
9749 } else {
9750 /* Accept one or more multicast(s). */
9751 struct netdev_hw_addr *ha;
9752 u32 mc_filter[4] = { 0, };
9753 u32 regidx;
9754 u32 bit;
9755 u32 crc;
9757 netdev_for_each_mc_addr(ha, dev) {
9758 crc = calc_crc(ha->addr, ETH_ALEN);
9759 bit = ~crc & 0x7f;
9760 regidx = (bit & 0x60) >> 5;
9761 bit &= 0x1f;
9762 mc_filter[regidx] |= (1 << bit);
9765 tw32(MAC_HASH_REG_0, mc_filter[0]);
9766 tw32(MAC_HASH_REG_1, mc_filter[1]);
9767 tw32(MAC_HASH_REG_2, mc_filter[2]);
9768 tw32(MAC_HASH_REG_3, mc_filter[3]);
9771 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9772 rx_mode |= RX_MODE_PROMISC;
9773 } else if (!(dev->flags & IFF_PROMISC)) {
9774 /* Add all entries into to the mac addr filter list */
9775 int i = 0;
9776 struct netdev_hw_addr *ha;
9778 netdev_for_each_uc_addr(ha, dev) {
9779 __tg3_set_one_mac_addr(tp, ha->addr,
9780 i + TG3_UCAST_ADDR_IDX(tp));
9781 i++;
9785 if (rx_mode != tp->rx_mode) {
9786 tp->rx_mode = rx_mode;
9787 tw32_f(MAC_RX_MODE, rx_mode);
9788 udelay(10);
9792 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9794 int i;
9796 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9797 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9800 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9802 int i;
9804 if (!tg3_flag(tp, SUPPORT_MSIX))
9805 return;
9807 if (tp->rxq_cnt == 1) {
9808 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9809 return;
9812 /* Validate table against current IRQ count */
9813 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9814 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9815 break;
9818 if (i != TG3_RSS_INDIR_TBL_SIZE)
9819 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9822 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9824 int i = 0;
9825 u32 reg = MAC_RSS_INDIR_TBL_0;
9827 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9828 u32 val = tp->rss_ind_tbl[i];
9829 i++;
9830 for (; i % 8; i++) {
9831 val <<= 4;
9832 val |= tp->rss_ind_tbl[i];
9834 tw32(reg, val);
9835 reg += 4;
9839 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9841 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9842 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9843 else
9844 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9847 /* tp->lock is held. */
9848 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9850 u32 val, rdmac_mode;
9851 int i, err, limit;
9852 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9854 tg3_disable_ints(tp);
9856 tg3_stop_fw(tp);
9858 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9860 if (tg3_flag(tp, INIT_COMPLETE))
9861 tg3_abort_hw(tp, 1);
9863 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9864 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9865 tg3_phy_pull_config(tp);
9866 tg3_eee_pull_config(tp, NULL);
9867 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9870 /* Enable MAC control of LPI */
9871 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9872 tg3_setup_eee(tp);
9874 if (reset_phy)
9875 tg3_phy_reset(tp);
9877 err = tg3_chip_reset(tp);
9878 if (err)
9879 return err;
9881 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9883 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9884 val = tr32(TG3_CPMU_CTRL);
9885 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9886 tw32(TG3_CPMU_CTRL, val);
9888 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9889 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9890 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9891 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9893 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9894 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9895 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9896 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9898 val = tr32(TG3_CPMU_HST_ACC);
9899 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9900 val |= CPMU_HST_ACC_MACCLK_6_25;
9901 tw32(TG3_CPMU_HST_ACC, val);
9904 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9905 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9906 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9907 PCIE_PWR_MGMT_L1_THRESH_4MS;
9908 tw32(PCIE_PWR_MGMT_THRESH, val);
9910 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9911 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9913 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9915 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9916 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9919 if (tg3_flag(tp, L1PLLPD_EN)) {
9920 u32 grc_mode = tr32(GRC_MODE);
9922 /* Access the lower 1K of PL PCIE block registers. */
9923 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9924 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9926 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9927 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9928 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9930 tw32(GRC_MODE, grc_mode);
9933 if (tg3_flag(tp, 57765_CLASS)) {
9934 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9935 u32 grc_mode = tr32(GRC_MODE);
9937 /* Access the lower 1K of PL PCIE block registers. */
9938 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9939 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941 val = tr32(TG3_PCIE_TLDLPL_PORT +
9942 TG3_PCIE_PL_LO_PHYCTL5);
9943 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9944 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9946 tw32(GRC_MODE, grc_mode);
9949 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9950 u32 grc_mode;
9952 /* Fix transmit hangs */
9953 val = tr32(TG3_CPMU_PADRNG_CTL);
9954 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9955 tw32(TG3_CPMU_PADRNG_CTL, val);
9957 grc_mode = tr32(GRC_MODE);
9959 /* Access the lower 1K of DL PCIE block registers. */
9960 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9961 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9963 val = tr32(TG3_PCIE_TLDLPL_PORT +
9964 TG3_PCIE_DL_LO_FTSMAX);
9965 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9966 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9967 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9969 tw32(GRC_MODE, grc_mode);
9972 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9973 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9974 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9975 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9978 /* This works around an issue with Athlon chipsets on
9979 * B3 tigon3 silicon. This bit has no effect on any
9980 * other revision. But do not set this on PCI Express
9981 * chips and don't even touch the clocks if the CPMU is present.
9983 if (!tg3_flag(tp, CPMU_PRESENT)) {
9984 if (!tg3_flag(tp, PCI_EXPRESS))
9985 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9986 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9989 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9990 tg3_flag(tp, PCIX_MODE)) {
9991 val = tr32(TG3PCI_PCISTATE);
9992 val |= PCISTATE_RETRY_SAME_DMA;
9993 tw32(TG3PCI_PCISTATE, val);
9996 if (tg3_flag(tp, ENABLE_APE)) {
9997 /* Allow reads and writes to the
9998 * APE register and memory space.
10000 val = tr32(TG3PCI_PCISTATE);
10001 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10002 PCISTATE_ALLOW_APE_SHMEM_WR |
10003 PCISTATE_ALLOW_APE_PSPACE_WR;
10004 tw32(TG3PCI_PCISTATE, val);
10007 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10008 /* Enable some hw fixes. */
10009 val = tr32(TG3PCI_MSI_DATA);
10010 val |= (1 << 26) | (1 << 28) | (1 << 29);
10011 tw32(TG3PCI_MSI_DATA, val);
10014 /* Descriptor ring init may make accesses to the
10015 * NIC SRAM area to setup the TX descriptors, so we
10016 * can only do this after the hardware has been
10017 * successfully reset.
10019 err = tg3_init_rings(tp);
10020 if (err)
10021 return err;
10023 if (tg3_flag(tp, 57765_PLUS)) {
10024 val = tr32(TG3PCI_DMA_RW_CTRL) &
10025 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10026 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10027 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10028 if (!tg3_flag(tp, 57765_CLASS) &&
10029 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10030 tg3_asic_rev(tp) != ASIC_REV_5762)
10031 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10032 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10033 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10034 tg3_asic_rev(tp) != ASIC_REV_5761) {
10035 /* This value is determined during the probe time DMA
10036 * engine test, tg3_test_dma.
10038 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10041 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10042 GRC_MODE_4X_NIC_SEND_RINGS |
10043 GRC_MODE_NO_TX_PHDR_CSUM |
10044 GRC_MODE_NO_RX_PHDR_CSUM);
10045 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10047 /* Pseudo-header checksum is done by hardware logic and not
10048 * the offload processers, so make the chip do the pseudo-
10049 * header checksums on receive. For transmit it is more
10050 * convenient to do the pseudo-header checksum in software
10051 * as Linux does that on transmit for us in all cases.
10053 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10055 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10056 if (tp->rxptpctl)
10057 tw32(TG3_RX_PTP_CTL,
10058 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10060 if (tg3_flag(tp, PTP_CAPABLE))
10061 val |= GRC_MODE_TIME_SYNC_ENABLE;
10063 tw32(GRC_MODE, tp->grc_mode | val);
10065 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10066 * south bridge limitation. As a workaround, Driver is setting MRRS
10067 * to 2048 instead of default 4096.
10069 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10070 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10071 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10072 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10075 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10076 val = tr32(GRC_MISC_CFG);
10077 val &= ~0xff;
10078 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10079 tw32(GRC_MISC_CFG, val);
10081 /* Initialize MBUF/DESC pool. */
10082 if (tg3_flag(tp, 5750_PLUS)) {
10083 /* Do nothing. */
10084 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10085 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10086 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10087 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10088 else
10089 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10090 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10091 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10092 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10093 int fw_len;
10095 fw_len = tp->fw_len;
10096 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10097 tw32(BUFMGR_MB_POOL_ADDR,
10098 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10099 tw32(BUFMGR_MB_POOL_SIZE,
10100 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10103 if (tp->dev->mtu <= ETH_DATA_LEN) {
10104 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10105 tp->bufmgr_config.mbuf_read_dma_low_water);
10106 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10107 tp->bufmgr_config.mbuf_mac_rx_low_water);
10108 tw32(BUFMGR_MB_HIGH_WATER,
10109 tp->bufmgr_config.mbuf_high_water);
10110 } else {
10111 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10112 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10113 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10114 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10115 tw32(BUFMGR_MB_HIGH_WATER,
10116 tp->bufmgr_config.mbuf_high_water_jumbo);
10118 tw32(BUFMGR_DMA_LOW_WATER,
10119 tp->bufmgr_config.dma_low_water);
10120 tw32(BUFMGR_DMA_HIGH_WATER,
10121 tp->bufmgr_config.dma_high_water);
10123 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10124 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10125 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10126 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10127 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10128 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10129 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10130 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10131 tw32(BUFMGR_MODE, val);
10132 for (i = 0; i < 2000; i++) {
10133 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10134 break;
10135 udelay(10);
10137 if (i >= 2000) {
10138 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10139 return -ENODEV;
10142 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10143 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10145 tg3_setup_rxbd_thresholds(tp);
10147 /* Initialize TG3_BDINFO's at:
10148 * RCVDBDI_STD_BD: standard eth size rx ring
10149 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10150 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10152 * like so:
10153 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10154 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10155 * ring attribute flags
10156 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10158 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10159 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10161 * The size of each ring is fixed in the firmware, but the location is
10162 * configurable.
10164 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165 ((u64) tpr->rx_std_mapping >> 32));
10166 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167 ((u64) tpr->rx_std_mapping & 0xffffffff));
10168 if (!tg3_flag(tp, 5717_PLUS))
10169 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10170 NIC_SRAM_RX_BUFFER_DESC);
10172 /* Disable the mini ring */
10173 if (!tg3_flag(tp, 5705_PLUS))
10174 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10175 BDINFO_FLAGS_DISABLED);
10177 /* Program the jumbo buffer descriptor ring control
10178 * blocks on those devices that have them.
10180 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10181 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10183 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10184 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185 ((u64) tpr->rx_jmb_mapping >> 32));
10186 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10188 val = TG3_RX_JMB_RING_SIZE(tp) <<
10189 BDINFO_FLAGS_MAXLEN_SHIFT;
10190 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191 val | BDINFO_FLAGS_USE_EXT_RECV);
10192 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10193 tg3_flag(tp, 57765_CLASS) ||
10194 tg3_asic_rev(tp) == ASIC_REV_5762)
10195 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10196 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10197 } else {
10198 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10199 BDINFO_FLAGS_DISABLED);
10202 if (tg3_flag(tp, 57765_PLUS)) {
10203 val = TG3_RX_STD_RING_SIZE(tp);
10204 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10205 val |= (TG3_RX_STD_DMA_SZ << 2);
10206 } else
10207 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10208 } else
10209 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10211 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10213 tpr->rx_std_prod_idx = tp->rx_pending;
10214 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10216 tpr->rx_jmb_prod_idx =
10217 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10218 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10220 tg3_rings_reset(tp);
10222 /* Initialize MAC address and backoff seed. */
10223 __tg3_set_mac_addr(tp, false);
10225 /* MTU + ethernet header + FCS + optional VLAN tag */
10226 tw32(MAC_RX_MTU_SIZE,
10227 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10229 /* The slot time is changed by tg3_setup_phy if we
10230 * run at gigabit with half duplex.
10232 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10233 (6 << TX_LENGTHS_IPG_SHIFT) |
10234 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10236 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10237 tg3_asic_rev(tp) == ASIC_REV_5762)
10238 val |= tr32(MAC_TX_LENGTHS) &
10239 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10240 TX_LENGTHS_CNT_DWN_VAL_MSK);
10242 tw32(MAC_TX_LENGTHS, val);
10244 /* Receive rules. */
10245 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10246 tw32(RCVLPC_CONFIG, 0x0181);
10248 /* Calculate RDMAC_MODE setting early, we need it to determine
10249 * the RCVLPC_STATE_ENABLE mask.
10251 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10252 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10253 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10254 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10255 RDMAC_MODE_LNGREAD_ENAB);
10257 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10258 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10260 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10261 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10262 tg3_asic_rev(tp) == ASIC_REV_57780)
10263 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10264 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10265 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10267 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10268 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10269 if (tg3_flag(tp, TSO_CAPABLE) &&
10270 tg3_asic_rev(tp) == ASIC_REV_5705) {
10271 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10272 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10273 !tg3_flag(tp, IS_5788)) {
10274 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10278 if (tg3_flag(tp, PCI_EXPRESS))
10279 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10281 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10282 tp->dma_limit = 0;
10283 if (tp->dev->mtu <= ETH_DATA_LEN) {
10284 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10285 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10289 if (tg3_flag(tp, HW_TSO_1) ||
10290 tg3_flag(tp, HW_TSO_2) ||
10291 tg3_flag(tp, HW_TSO_3))
10292 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10294 if (tg3_flag(tp, 57765_PLUS) ||
10295 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10296 tg3_asic_rev(tp) == ASIC_REV_57780)
10297 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10299 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10300 tg3_asic_rev(tp) == ASIC_REV_5762)
10301 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10303 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10304 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10306 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10307 tg3_flag(tp, 57765_PLUS)) {
10308 u32 tgtreg;
10310 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10311 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10312 else
10313 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10315 val = tr32(tgtreg);
10316 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10317 tg3_asic_rev(tp) == ASIC_REV_5762) {
10318 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10319 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10320 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10321 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10322 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10323 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10325 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10328 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10329 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10330 tg3_asic_rev(tp) == ASIC_REV_5762) {
10331 u32 tgtreg;
10333 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10334 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10335 else
10336 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10338 val = tr32(tgtreg);
10339 tw32(tgtreg, val |
10340 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10344 /* Receive/send statistics. */
10345 if (tg3_flag(tp, 5750_PLUS)) {
10346 val = tr32(RCVLPC_STATS_ENABLE);
10347 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10348 tw32(RCVLPC_STATS_ENABLE, val);
10349 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10350 tg3_flag(tp, TSO_CAPABLE)) {
10351 val = tr32(RCVLPC_STATS_ENABLE);
10352 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10353 tw32(RCVLPC_STATS_ENABLE, val);
10354 } else {
10355 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10357 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10358 tw32(SNDDATAI_STATSENAB, 0xffffff);
10359 tw32(SNDDATAI_STATSCTRL,
10360 (SNDDATAI_SCTRL_ENABLE |
10361 SNDDATAI_SCTRL_FASTUPD));
10363 /* Setup host coalescing engine. */
10364 tw32(HOSTCC_MODE, 0);
10365 for (i = 0; i < 2000; i++) {
10366 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10367 break;
10368 udelay(10);
10371 __tg3_set_coalesce(tp, &tp->coal);
10373 if (!tg3_flag(tp, 5705_PLUS)) {
10374 /* Status/statistics block address. See tg3_timer,
10375 * the tg3_periodic_fetch_stats call there, and
10376 * tg3_get_stats to see how this works for 5705/5750 chips.
10378 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10379 ((u64) tp->stats_mapping >> 32));
10380 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10381 ((u64) tp->stats_mapping & 0xffffffff));
10382 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10384 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10386 /* Clear statistics and status block memory areas */
10387 for (i = NIC_SRAM_STATS_BLK;
10388 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10389 i += sizeof(u32)) {
10390 tg3_write_mem(tp, i, 0);
10391 udelay(40);
10395 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10397 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10398 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10399 if (!tg3_flag(tp, 5705_PLUS))
10400 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10402 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10403 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10404 /* reset to prevent losing 1st rx packet intermittently */
10405 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10406 udelay(10);
10409 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10410 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10411 MAC_MODE_FHDE_ENABLE;
10412 if (tg3_flag(tp, ENABLE_APE))
10413 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10414 if (!tg3_flag(tp, 5705_PLUS) &&
10415 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10416 tg3_asic_rev(tp) != ASIC_REV_5700)
10417 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10418 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10419 udelay(40);
10421 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10422 * If TG3_FLAG_IS_NIC is zero, we should read the
10423 * register to preserve the GPIO settings for LOMs. The GPIOs,
10424 * whether used as inputs or outputs, are set by boot code after
10425 * reset.
10427 if (!tg3_flag(tp, IS_NIC)) {
10428 u32 gpio_mask;
10430 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10431 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10432 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10434 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10435 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10436 GRC_LCLCTRL_GPIO_OUTPUT3;
10438 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10439 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10441 tp->grc_local_ctrl &= ~gpio_mask;
10442 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10444 /* GPIO1 must be driven high for eeprom write protect */
10445 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10446 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10447 GRC_LCLCTRL_GPIO_OUTPUT1);
10449 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10450 udelay(100);
10452 if (tg3_flag(tp, USING_MSIX)) {
10453 val = tr32(MSGINT_MODE);
10454 val |= MSGINT_MODE_ENABLE;
10455 if (tp->irq_cnt > 1)
10456 val |= MSGINT_MODE_MULTIVEC_EN;
10457 if (!tg3_flag(tp, 1SHOT_MSI))
10458 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10459 tw32(MSGINT_MODE, val);
10462 if (!tg3_flag(tp, 5705_PLUS)) {
10463 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10464 udelay(40);
10467 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10468 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10469 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10470 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10471 WDMAC_MODE_LNGREAD_ENAB);
10473 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10474 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10475 if (tg3_flag(tp, TSO_CAPABLE) &&
10476 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10477 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10478 /* nothing */
10479 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10480 !tg3_flag(tp, IS_5788)) {
10481 val |= WDMAC_MODE_RX_ACCEL;
10485 /* Enable host coalescing bug fix */
10486 if (tg3_flag(tp, 5755_PLUS))
10487 val |= WDMAC_MODE_STATUS_TAG_FIX;
10489 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10490 val |= WDMAC_MODE_BURST_ALL_DATA;
10492 tw32_f(WDMAC_MODE, val);
10493 udelay(40);
10495 if (tg3_flag(tp, PCIX_MODE)) {
10496 u16 pcix_cmd;
10498 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10499 &pcix_cmd);
10500 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10501 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10502 pcix_cmd |= PCI_X_CMD_READ_2K;
10503 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10504 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10505 pcix_cmd |= PCI_X_CMD_READ_2K;
10507 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10508 pcix_cmd);
10511 tw32_f(RDMAC_MODE, rdmac_mode);
10512 udelay(40);
10514 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10515 tg3_asic_rev(tp) == ASIC_REV_5720) {
10516 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10517 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10518 break;
10520 if (i < TG3_NUM_RDMA_CHANNELS) {
10521 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10522 val |= tg3_lso_rd_dma_workaround_bit(tp);
10523 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10524 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10528 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10529 if (!tg3_flag(tp, 5705_PLUS))
10530 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10532 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10533 tw32(SNDDATAC_MODE,
10534 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10535 else
10536 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10538 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10539 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10540 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10541 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10542 val |= RCVDBDI_MODE_LRG_RING_SZ;
10543 tw32(RCVDBDI_MODE, val);
10544 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10545 if (tg3_flag(tp, HW_TSO_1) ||
10546 tg3_flag(tp, HW_TSO_2) ||
10547 tg3_flag(tp, HW_TSO_3))
10548 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10549 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10550 if (tg3_flag(tp, ENABLE_TSS))
10551 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10552 tw32(SNDBDI_MODE, val);
10553 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10555 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10556 err = tg3_load_5701_a0_firmware_fix(tp);
10557 if (err)
10558 return err;
10561 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10562 /* Ignore any errors for the firmware download. If download
10563 * fails, the device will operate with EEE disabled
10565 tg3_load_57766_firmware(tp);
10568 if (tg3_flag(tp, TSO_CAPABLE)) {
10569 err = tg3_load_tso_firmware(tp);
10570 if (err)
10571 return err;
10574 tp->tx_mode = TX_MODE_ENABLE;
10576 if (tg3_flag(tp, 5755_PLUS) ||
10577 tg3_asic_rev(tp) == ASIC_REV_5906)
10578 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10580 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10581 tg3_asic_rev(tp) == ASIC_REV_5762) {
10582 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10583 tp->tx_mode &= ~val;
10584 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10587 tw32_f(MAC_TX_MODE, tp->tx_mode);
10588 udelay(100);
10590 if (tg3_flag(tp, ENABLE_RSS)) {
10591 u32 rss_key[10];
10593 tg3_rss_write_indir_tbl(tp);
10595 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10597 for (i = 0; i < 10 ; i++)
10598 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10601 tp->rx_mode = RX_MODE_ENABLE;
10602 if (tg3_flag(tp, 5755_PLUS))
10603 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10605 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10606 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10608 if (tg3_flag(tp, ENABLE_RSS))
10609 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10610 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10611 RX_MODE_RSS_IPV6_HASH_EN |
10612 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10613 RX_MODE_RSS_IPV4_HASH_EN |
10614 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10616 tw32_f(MAC_RX_MODE, tp->rx_mode);
10617 udelay(10);
10619 tw32(MAC_LED_CTRL, tp->led_ctrl);
10621 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10622 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10623 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10624 udelay(10);
10626 tw32_f(MAC_RX_MODE, tp->rx_mode);
10627 udelay(10);
10629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10630 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10631 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10632 /* Set drive transmission level to 1.2V */
10633 /* only if the signal pre-emphasis bit is not set */
10634 val = tr32(MAC_SERDES_CFG);
10635 val &= 0xfffff000;
10636 val |= 0x880;
10637 tw32(MAC_SERDES_CFG, val);
10639 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10640 tw32(MAC_SERDES_CFG, 0x616000);
10643 /* Prevent chip from dropping frames when flow control
10644 * is enabled.
10646 if (tg3_flag(tp, 57765_CLASS))
10647 val = 1;
10648 else
10649 val = 2;
10650 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10652 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10653 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10654 /* Use hardware link auto-negotiation */
10655 tg3_flag_set(tp, HW_AUTONEG);
10658 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10659 tg3_asic_rev(tp) == ASIC_REV_5714) {
10660 u32 tmp;
10662 tmp = tr32(SERDES_RX_CTRL);
10663 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10664 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10665 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10666 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10669 if (!tg3_flag(tp, USE_PHYLIB)) {
10670 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10671 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10673 err = tg3_setup_phy(tp, false);
10674 if (err)
10675 return err;
10677 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10678 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10679 u32 tmp;
10681 /* Clear CRC stats. */
10682 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10683 tg3_writephy(tp, MII_TG3_TEST1,
10684 tmp | MII_TG3_TEST1_CRC_EN);
10685 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10690 __tg3_set_rx_mode(tp->dev);
10692 /* Initialize receive rules. */
10693 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10694 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10696 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10698 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10699 limit = 8;
10700 else
10701 limit = 16;
10702 if (tg3_flag(tp, ENABLE_ASF))
10703 limit -= 4;
10704 switch (limit) {
10705 case 16:
10706 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10707 case 15:
10708 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10709 case 14:
10710 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10711 case 13:
10712 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10713 case 12:
10714 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10715 case 11:
10716 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10717 case 10:
10718 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10719 case 9:
10720 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10721 case 8:
10722 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10723 case 7:
10724 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10725 case 6:
10726 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10727 case 5:
10728 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10729 case 4:
10730 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10731 case 3:
10732 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10733 case 2:
10734 case 1:
10736 default:
10737 break;
10740 if (tg3_flag(tp, ENABLE_APE))
10741 /* Write our heartbeat update interval to APE. */
10742 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10743 APE_HOST_HEARTBEAT_INT_5SEC);
10745 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10747 return 0;
10750 /* Called at device open time to get the chip ready for
10751 * packet processing. Invoked with tp->lock held.
10753 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10755 /* Chip may have been just powered on. If so, the boot code may still
10756 * be running initialization. Wait for it to finish to avoid races in
10757 * accessing the hardware.
10759 tg3_enable_register_access(tp);
10760 tg3_poll_fw(tp);
10762 tg3_switch_clocks(tp);
10764 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10766 return tg3_reset_hw(tp, reset_phy);
10769 #ifdef CONFIG_TIGON3_HWMON
10770 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10772 int i;
10774 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10775 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10777 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10778 off += len;
10780 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10781 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10782 memset(ocir, 0, TG3_OCIR_LEN);
10786 /* sysfs attributes for hwmon */
10787 static ssize_t tg3_show_temp(struct device *dev,
10788 struct device_attribute *devattr, char *buf)
10790 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10791 struct tg3 *tp = dev_get_drvdata(dev);
10792 u32 temperature;
10794 spin_lock_bh(&tp->lock);
10795 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10796 sizeof(temperature));
10797 spin_unlock_bh(&tp->lock);
10798 return sprintf(buf, "%u\n", temperature * 1000);
10802 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10803 TG3_TEMP_SENSOR_OFFSET);
10804 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10805 TG3_TEMP_CAUTION_OFFSET);
10806 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10807 TG3_TEMP_MAX_OFFSET);
10809 static struct attribute *tg3_attrs[] = {
10810 &sensor_dev_attr_temp1_input.dev_attr.attr,
10811 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10812 &sensor_dev_attr_temp1_max.dev_attr.attr,
10813 NULL
10815 ATTRIBUTE_GROUPS(tg3);
10817 static void tg3_hwmon_close(struct tg3 *tp)
10819 if (tp->hwmon_dev) {
10820 hwmon_device_unregister(tp->hwmon_dev);
10821 tp->hwmon_dev = NULL;
10825 static void tg3_hwmon_open(struct tg3 *tp)
10827 int i;
10828 u32 size = 0;
10829 struct pci_dev *pdev = tp->pdev;
10830 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10832 tg3_sd_scan_scratchpad(tp, ocirs);
10834 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10835 if (!ocirs[i].src_data_length)
10836 continue;
10838 size += ocirs[i].src_hdr_length;
10839 size += ocirs[i].src_data_length;
10842 if (!size)
10843 return;
10845 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10846 tp, tg3_groups);
10847 if (IS_ERR(tp->hwmon_dev)) {
10848 tp->hwmon_dev = NULL;
10849 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10852 #else
10853 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10854 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10855 #endif /* CONFIG_TIGON3_HWMON */
10858 #define TG3_STAT_ADD32(PSTAT, REG) \
10859 do { u32 __val = tr32(REG); \
10860 (PSTAT)->low += __val; \
10861 if ((PSTAT)->low < __val) \
10862 (PSTAT)->high += 1; \
10863 } while (0)
10865 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10867 struct tg3_hw_stats *sp = tp->hw_stats;
10869 if (!tp->link_up)
10870 return;
10872 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10873 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10874 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10875 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10876 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10877 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10878 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10879 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10880 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10881 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10882 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10883 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10884 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10885 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10886 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10887 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10888 u32 val;
10890 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10891 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10892 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10893 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10896 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10897 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10898 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10899 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10900 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10901 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10902 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10903 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10904 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10905 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10906 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10907 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10908 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10909 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10911 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10912 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10913 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10914 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10915 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10916 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10917 } else {
10918 u32 val = tr32(HOSTCC_FLOW_ATTN);
10919 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10920 if (val) {
10921 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10922 sp->rx_discards.low += val;
10923 if (sp->rx_discards.low < val)
10924 sp->rx_discards.high += 1;
10926 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10928 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10931 static void tg3_chk_missed_msi(struct tg3 *tp)
10933 u32 i;
10935 for (i = 0; i < tp->irq_cnt; i++) {
10936 struct tg3_napi *tnapi = &tp->napi[i];
10938 if (tg3_has_work(tnapi)) {
10939 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10940 tnapi->last_tx_cons == tnapi->tx_cons) {
10941 if (tnapi->chk_msi_cnt < 1) {
10942 tnapi->chk_msi_cnt++;
10943 return;
10945 tg3_msi(0, tnapi);
10948 tnapi->chk_msi_cnt = 0;
10949 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10950 tnapi->last_tx_cons = tnapi->tx_cons;
10954 static void tg3_timer(struct timer_list *t)
10956 struct tg3 *tp = from_timer(tp, t, timer);
10958 spin_lock(&tp->lock);
10960 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10961 spin_unlock(&tp->lock);
10962 goto restart_timer;
10965 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10966 tg3_flag(tp, 57765_CLASS))
10967 tg3_chk_missed_msi(tp);
10969 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10970 /* BCM4785: Flush posted writes from GbE to host memory. */
10971 tr32(HOSTCC_MODE);
10974 if (!tg3_flag(tp, TAGGED_STATUS)) {
10975 /* All of this garbage is because when using non-tagged
10976 * IRQ status the mailbox/status_block protocol the chip
10977 * uses with the cpu is race prone.
10979 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10980 tw32(GRC_LOCAL_CTRL,
10981 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10982 } else {
10983 tw32(HOSTCC_MODE, tp->coalesce_mode |
10984 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10987 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10988 spin_unlock(&tp->lock);
10989 tg3_reset_task_schedule(tp);
10990 goto restart_timer;
10994 /* This part only runs once per second. */
10995 if (!--tp->timer_counter) {
10996 if (tg3_flag(tp, 5705_PLUS))
10997 tg3_periodic_fetch_stats(tp);
10999 if (tp->setlpicnt && !--tp->setlpicnt)
11000 tg3_phy_eee_enable(tp);
11002 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11003 u32 mac_stat;
11004 int phy_event;
11006 mac_stat = tr32(MAC_STATUS);
11008 phy_event = 0;
11009 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11010 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11011 phy_event = 1;
11012 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11013 phy_event = 1;
11015 if (phy_event)
11016 tg3_setup_phy(tp, false);
11017 } else if (tg3_flag(tp, POLL_SERDES)) {
11018 u32 mac_stat = tr32(MAC_STATUS);
11019 int need_setup = 0;
11021 if (tp->link_up &&
11022 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11023 need_setup = 1;
11025 if (!tp->link_up &&
11026 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11027 MAC_STATUS_SIGNAL_DET))) {
11028 need_setup = 1;
11030 if (need_setup) {
11031 if (!tp->serdes_counter) {
11032 tw32_f(MAC_MODE,
11033 (tp->mac_mode &
11034 ~MAC_MODE_PORT_MODE_MASK));
11035 udelay(40);
11036 tw32_f(MAC_MODE, tp->mac_mode);
11037 udelay(40);
11039 tg3_setup_phy(tp, false);
11041 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11042 tg3_flag(tp, 5780_CLASS)) {
11043 tg3_serdes_parallel_detect(tp);
11044 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11045 u32 cpmu = tr32(TG3_CPMU_STATUS);
11046 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11047 TG3_CPMU_STATUS_LINK_MASK);
11049 if (link_up != tp->link_up)
11050 tg3_setup_phy(tp, false);
11053 tp->timer_counter = tp->timer_multiplier;
11056 /* Heartbeat is only sent once every 2 seconds.
11058 * The heartbeat is to tell the ASF firmware that the host
11059 * driver is still alive. In the event that the OS crashes,
11060 * ASF needs to reset the hardware to free up the FIFO space
11061 * that may be filled with rx packets destined for the host.
11062 * If the FIFO is full, ASF will no longer function properly.
11064 * Unintended resets have been reported on real time kernels
11065 * where the timer doesn't run on time. Netpoll will also have
11066 * same problem.
11068 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11069 * to check the ring condition when the heartbeat is expiring
11070 * before doing the reset. This will prevent most unintended
11071 * resets.
11073 if (!--tp->asf_counter) {
11074 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11075 tg3_wait_for_event_ack(tp);
11077 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11078 FWCMD_NICDRV_ALIVE3);
11079 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11080 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11081 TG3_FW_UPDATE_TIMEOUT_SEC);
11083 tg3_generate_fw_event(tp);
11085 tp->asf_counter = tp->asf_multiplier;
11088 /* Update the APE heartbeat every 5 seconds.*/
11089 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11091 spin_unlock(&tp->lock);
11093 restart_timer:
11094 tp->timer.expires = jiffies + tp->timer_offset;
11095 add_timer(&tp->timer);
11098 static void tg3_timer_init(struct tg3 *tp)
11100 if (tg3_flag(tp, TAGGED_STATUS) &&
11101 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11102 !tg3_flag(tp, 57765_CLASS))
11103 tp->timer_offset = HZ;
11104 else
11105 tp->timer_offset = HZ / 10;
11107 BUG_ON(tp->timer_offset > HZ);
11109 tp->timer_multiplier = (HZ / tp->timer_offset);
11110 tp->asf_multiplier = (HZ / tp->timer_offset) *
11111 TG3_FW_UPDATE_FREQ_SEC;
11113 timer_setup(&tp->timer, tg3_timer, 0);
11116 static void tg3_timer_start(struct tg3 *tp)
11118 tp->asf_counter = tp->asf_multiplier;
11119 tp->timer_counter = tp->timer_multiplier;
11121 tp->timer.expires = jiffies + tp->timer_offset;
11122 add_timer(&tp->timer);
11125 static void tg3_timer_stop(struct tg3 *tp)
11127 del_timer_sync(&tp->timer);
11130 /* Restart hardware after configuration changes, self-test, etc.
11131 * Invoked with tp->lock held.
11133 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11134 __releases(tp->lock)
11135 __acquires(tp->lock)
11137 int err;
11139 err = tg3_init_hw(tp, reset_phy);
11140 if (err) {
11141 netdev_err(tp->dev,
11142 "Failed to re-initialize device, aborting\n");
11143 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11144 tg3_full_unlock(tp);
11145 tg3_timer_stop(tp);
11146 tp->irq_sync = 0;
11147 tg3_napi_enable(tp);
11148 dev_close(tp->dev);
11149 tg3_full_lock(tp, 0);
11151 return err;
11154 static void tg3_reset_task(struct work_struct *work)
11156 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11157 int err;
11159 rtnl_lock();
11160 tg3_full_lock(tp, 0);
11162 if (!netif_running(tp->dev)) {
11163 tg3_flag_clear(tp, RESET_TASK_PENDING);
11164 tg3_full_unlock(tp);
11165 rtnl_unlock();
11166 return;
11169 tg3_full_unlock(tp);
11171 tg3_phy_stop(tp);
11173 tg3_netif_stop(tp);
11175 tg3_full_lock(tp, 1);
11177 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11178 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11179 tp->write32_rx_mbox = tg3_write_flush_reg32;
11180 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11181 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11184 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11185 err = tg3_init_hw(tp, true);
11186 if (err)
11187 goto out;
11189 tg3_netif_start(tp);
11191 out:
11192 tg3_full_unlock(tp);
11194 if (!err)
11195 tg3_phy_start(tp);
11197 tg3_flag_clear(tp, RESET_TASK_PENDING);
11198 rtnl_unlock();
11201 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11203 irq_handler_t fn;
11204 unsigned long flags;
11205 char *name;
11206 struct tg3_napi *tnapi = &tp->napi[irq_num];
11208 if (tp->irq_cnt == 1)
11209 name = tp->dev->name;
11210 else {
11211 name = &tnapi->irq_lbl[0];
11212 if (tnapi->tx_buffers && tnapi->rx_rcb)
11213 snprintf(name, IFNAMSIZ,
11214 "%s-txrx-%d", tp->dev->name, irq_num);
11215 else if (tnapi->tx_buffers)
11216 snprintf(name, IFNAMSIZ,
11217 "%s-tx-%d", tp->dev->name, irq_num);
11218 else if (tnapi->rx_rcb)
11219 snprintf(name, IFNAMSIZ,
11220 "%s-rx-%d", tp->dev->name, irq_num);
11221 else
11222 snprintf(name, IFNAMSIZ,
11223 "%s-%d", tp->dev->name, irq_num);
11224 name[IFNAMSIZ-1] = 0;
11227 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11228 fn = tg3_msi;
11229 if (tg3_flag(tp, 1SHOT_MSI))
11230 fn = tg3_msi_1shot;
11231 flags = 0;
11232 } else {
11233 fn = tg3_interrupt;
11234 if (tg3_flag(tp, TAGGED_STATUS))
11235 fn = tg3_interrupt_tagged;
11236 flags = IRQF_SHARED;
11239 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11242 static int tg3_test_interrupt(struct tg3 *tp)
11244 struct tg3_napi *tnapi = &tp->napi[0];
11245 struct net_device *dev = tp->dev;
11246 int err, i, intr_ok = 0;
11247 u32 val;
11249 if (!netif_running(dev))
11250 return -ENODEV;
11252 tg3_disable_ints(tp);
11254 free_irq(tnapi->irq_vec, tnapi);
11257 * Turn off MSI one shot mode. Otherwise this test has no
11258 * observable way to know whether the interrupt was delivered.
11260 if (tg3_flag(tp, 57765_PLUS)) {
11261 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11262 tw32(MSGINT_MODE, val);
11265 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11266 IRQF_SHARED, dev->name, tnapi);
11267 if (err)
11268 return err;
11270 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11271 tg3_enable_ints(tp);
11273 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11274 tnapi->coal_now);
11276 for (i = 0; i < 5; i++) {
11277 u32 int_mbox, misc_host_ctrl;
11279 int_mbox = tr32_mailbox(tnapi->int_mbox);
11280 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11282 if ((int_mbox != 0) ||
11283 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11284 intr_ok = 1;
11285 break;
11288 if (tg3_flag(tp, 57765_PLUS) &&
11289 tnapi->hw_status->status_tag != tnapi->last_tag)
11290 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11292 msleep(10);
11295 tg3_disable_ints(tp);
11297 free_irq(tnapi->irq_vec, tnapi);
11299 err = tg3_request_irq(tp, 0);
11301 if (err)
11302 return err;
11304 if (intr_ok) {
11305 /* Reenable MSI one shot mode. */
11306 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11307 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11308 tw32(MSGINT_MODE, val);
11310 return 0;
11313 return -EIO;
11316 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11317 * successfully restored
11319 static int tg3_test_msi(struct tg3 *tp)
11321 int err;
11322 u16 pci_cmd;
11324 if (!tg3_flag(tp, USING_MSI))
11325 return 0;
11327 /* Turn off SERR reporting in case MSI terminates with Master
11328 * Abort.
11330 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11331 pci_write_config_word(tp->pdev, PCI_COMMAND,
11332 pci_cmd & ~PCI_COMMAND_SERR);
11334 err = tg3_test_interrupt(tp);
11336 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11338 if (!err)
11339 return 0;
11341 /* other failures */
11342 if (err != -EIO)
11343 return err;
11345 /* MSI test failed, go back to INTx mode */
11346 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11347 "to INTx mode. Please report this failure to the PCI "
11348 "maintainer and include system chipset information\n");
11350 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11352 pci_disable_msi(tp->pdev);
11354 tg3_flag_clear(tp, USING_MSI);
11355 tp->napi[0].irq_vec = tp->pdev->irq;
11357 err = tg3_request_irq(tp, 0);
11358 if (err)
11359 return err;
11361 /* Need to reset the chip because the MSI cycle may have terminated
11362 * with Master Abort.
11364 tg3_full_lock(tp, 1);
11366 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11367 err = tg3_init_hw(tp, true);
11369 tg3_full_unlock(tp);
11371 if (err)
11372 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11374 return err;
11377 static int tg3_request_firmware(struct tg3 *tp)
11379 const struct tg3_firmware_hdr *fw_hdr;
11381 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11382 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11383 tp->fw_needed);
11384 return -ENOENT;
11387 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11389 /* Firmware blob starts with version numbers, followed by
11390 * start address and _full_ length including BSS sections
11391 * (which must be longer than the actual data, of course
11394 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11395 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11396 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11397 tp->fw_len, tp->fw_needed);
11398 release_firmware(tp->fw);
11399 tp->fw = NULL;
11400 return -EINVAL;
11403 /* We no longer need firmware; we have it. */
11404 tp->fw_needed = NULL;
11405 return 0;
11408 static u32 tg3_irq_count(struct tg3 *tp)
11410 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11412 if (irq_cnt > 1) {
11413 /* We want as many rx rings enabled as there are cpus.
11414 * In multiqueue MSI-X mode, the first MSI-X vector
11415 * only deals with link interrupts, etc, so we add
11416 * one to the number of vectors we are requesting.
11418 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11421 return irq_cnt;
11424 static bool tg3_enable_msix(struct tg3 *tp)
11426 int i, rc;
11427 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11429 tp->txq_cnt = tp->txq_req;
11430 tp->rxq_cnt = tp->rxq_req;
11431 if (!tp->rxq_cnt)
11432 tp->rxq_cnt = netif_get_num_default_rss_queues();
11433 if (tp->rxq_cnt > tp->rxq_max)
11434 tp->rxq_cnt = tp->rxq_max;
11436 /* Disable multiple TX rings by default. Simple round-robin hardware
11437 * scheduling of the TX rings can cause starvation of rings with
11438 * small packets when other rings have TSO or jumbo packets.
11440 if (!tp->txq_req)
11441 tp->txq_cnt = 1;
11443 tp->irq_cnt = tg3_irq_count(tp);
11445 for (i = 0; i < tp->irq_max; i++) {
11446 msix_ent[i].entry = i;
11447 msix_ent[i].vector = 0;
11450 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11451 if (rc < 0) {
11452 return false;
11453 } else if (rc < tp->irq_cnt) {
11454 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11455 tp->irq_cnt, rc);
11456 tp->irq_cnt = rc;
11457 tp->rxq_cnt = max(rc - 1, 1);
11458 if (tp->txq_cnt)
11459 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11462 for (i = 0; i < tp->irq_max; i++)
11463 tp->napi[i].irq_vec = msix_ent[i].vector;
11465 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11466 pci_disable_msix(tp->pdev);
11467 return false;
11470 if (tp->irq_cnt == 1)
11471 return true;
11473 tg3_flag_set(tp, ENABLE_RSS);
11475 if (tp->txq_cnt > 1)
11476 tg3_flag_set(tp, ENABLE_TSS);
11478 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11480 return true;
11483 static void tg3_ints_init(struct tg3 *tp)
11485 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11486 !tg3_flag(tp, TAGGED_STATUS)) {
11487 /* All MSI supporting chips should support tagged
11488 * status. Assert that this is the case.
11490 netdev_warn(tp->dev,
11491 "MSI without TAGGED_STATUS? Not using MSI\n");
11492 goto defcfg;
11495 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11496 tg3_flag_set(tp, USING_MSIX);
11497 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11498 tg3_flag_set(tp, USING_MSI);
11500 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11501 u32 msi_mode = tr32(MSGINT_MODE);
11502 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11503 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11504 if (!tg3_flag(tp, 1SHOT_MSI))
11505 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11506 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11508 defcfg:
11509 if (!tg3_flag(tp, USING_MSIX)) {
11510 tp->irq_cnt = 1;
11511 tp->napi[0].irq_vec = tp->pdev->irq;
11514 if (tp->irq_cnt == 1) {
11515 tp->txq_cnt = 1;
11516 tp->rxq_cnt = 1;
11517 netif_set_real_num_tx_queues(tp->dev, 1);
11518 netif_set_real_num_rx_queues(tp->dev, 1);
11522 static void tg3_ints_fini(struct tg3 *tp)
11524 if (tg3_flag(tp, USING_MSIX))
11525 pci_disable_msix(tp->pdev);
11526 else if (tg3_flag(tp, USING_MSI))
11527 pci_disable_msi(tp->pdev);
11528 tg3_flag_clear(tp, USING_MSI);
11529 tg3_flag_clear(tp, USING_MSIX);
11530 tg3_flag_clear(tp, ENABLE_RSS);
11531 tg3_flag_clear(tp, ENABLE_TSS);
11534 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11535 bool init)
11537 struct net_device *dev = tp->dev;
11538 int i, err;
11541 * Setup interrupts first so we know how
11542 * many NAPI resources to allocate
11544 tg3_ints_init(tp);
11546 tg3_rss_check_indir_tbl(tp);
11548 /* The placement of this call is tied
11549 * to the setup and use of Host TX descriptors.
11551 err = tg3_alloc_consistent(tp);
11552 if (err)
11553 goto out_ints_fini;
11555 tg3_napi_init(tp);
11557 tg3_napi_enable(tp);
11559 for (i = 0; i < tp->irq_cnt; i++) {
11560 err = tg3_request_irq(tp, i);
11561 if (err) {
11562 for (i--; i >= 0; i--) {
11563 struct tg3_napi *tnapi = &tp->napi[i];
11565 free_irq(tnapi->irq_vec, tnapi);
11567 goto out_napi_fini;
11571 tg3_full_lock(tp, 0);
11573 if (init)
11574 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11576 err = tg3_init_hw(tp, reset_phy);
11577 if (err) {
11578 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11579 tg3_free_rings(tp);
11582 tg3_full_unlock(tp);
11584 if (err)
11585 goto out_free_irq;
11587 if (test_irq && tg3_flag(tp, USING_MSI)) {
11588 err = tg3_test_msi(tp);
11590 if (err) {
11591 tg3_full_lock(tp, 0);
11592 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11593 tg3_free_rings(tp);
11594 tg3_full_unlock(tp);
11596 goto out_napi_fini;
11599 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11600 u32 val = tr32(PCIE_TRANSACTION_CFG);
11602 tw32(PCIE_TRANSACTION_CFG,
11603 val | PCIE_TRANS_CFG_1SHOT_MSI);
11607 tg3_phy_start(tp);
11609 tg3_hwmon_open(tp);
11611 tg3_full_lock(tp, 0);
11613 tg3_timer_start(tp);
11614 tg3_flag_set(tp, INIT_COMPLETE);
11615 tg3_enable_ints(tp);
11617 tg3_ptp_resume(tp);
11619 tg3_full_unlock(tp);
11621 netif_tx_start_all_queues(dev);
11624 * Reset loopback feature if it was turned on while the device was down
11625 * make sure that it's installed properly now.
11627 if (dev->features & NETIF_F_LOOPBACK)
11628 tg3_set_loopback(dev, dev->features);
11630 return 0;
11632 out_free_irq:
11633 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11634 struct tg3_napi *tnapi = &tp->napi[i];
11635 free_irq(tnapi->irq_vec, tnapi);
11638 out_napi_fini:
11639 tg3_napi_disable(tp);
11640 tg3_napi_fini(tp);
11641 tg3_free_consistent(tp);
11643 out_ints_fini:
11644 tg3_ints_fini(tp);
11646 return err;
11649 static void tg3_stop(struct tg3 *tp)
11651 int i;
11653 tg3_reset_task_cancel(tp);
11654 tg3_netif_stop(tp);
11656 tg3_timer_stop(tp);
11658 tg3_hwmon_close(tp);
11660 tg3_phy_stop(tp);
11662 tg3_full_lock(tp, 1);
11664 tg3_disable_ints(tp);
11666 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11667 tg3_free_rings(tp);
11668 tg3_flag_clear(tp, INIT_COMPLETE);
11670 tg3_full_unlock(tp);
11672 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11673 struct tg3_napi *tnapi = &tp->napi[i];
11674 free_irq(tnapi->irq_vec, tnapi);
11677 tg3_ints_fini(tp);
11679 tg3_napi_fini(tp);
11681 tg3_free_consistent(tp);
11684 static int tg3_open(struct net_device *dev)
11686 struct tg3 *tp = netdev_priv(dev);
11687 int err;
11689 if (tp->pcierr_recovery) {
11690 netdev_err(dev, "Failed to open device. PCI error recovery "
11691 "in progress\n");
11692 return -EAGAIN;
11695 if (tp->fw_needed) {
11696 err = tg3_request_firmware(tp);
11697 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11698 if (err) {
11699 netdev_warn(tp->dev, "EEE capability disabled\n");
11700 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11701 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11702 netdev_warn(tp->dev, "EEE capability restored\n");
11703 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11705 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11706 if (err)
11707 return err;
11708 } else if (err) {
11709 netdev_warn(tp->dev, "TSO capability disabled\n");
11710 tg3_flag_clear(tp, TSO_CAPABLE);
11711 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11712 netdev_notice(tp->dev, "TSO capability restored\n");
11713 tg3_flag_set(tp, TSO_CAPABLE);
11717 tg3_carrier_off(tp);
11719 err = tg3_power_up(tp);
11720 if (err)
11721 return err;
11723 tg3_full_lock(tp, 0);
11725 tg3_disable_ints(tp);
11726 tg3_flag_clear(tp, INIT_COMPLETE);
11728 tg3_full_unlock(tp);
11730 err = tg3_start(tp,
11731 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11732 true, true);
11733 if (err) {
11734 tg3_frob_aux_power(tp, false);
11735 pci_set_power_state(tp->pdev, PCI_D3hot);
11738 return err;
11741 static int tg3_close(struct net_device *dev)
11743 struct tg3 *tp = netdev_priv(dev);
11745 if (tp->pcierr_recovery) {
11746 netdev_err(dev, "Failed to close device. PCI error recovery "
11747 "in progress\n");
11748 return -EAGAIN;
11751 tg3_stop(tp);
11753 if (pci_device_is_present(tp->pdev)) {
11754 tg3_power_down_prepare(tp);
11756 tg3_carrier_off(tp);
11758 return 0;
11761 static inline u64 get_stat64(tg3_stat64_t *val)
11763 return ((u64)val->high << 32) | ((u64)val->low);
11766 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11768 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11770 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11771 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11772 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11773 u32 val;
11775 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11776 tg3_writephy(tp, MII_TG3_TEST1,
11777 val | MII_TG3_TEST1_CRC_EN);
11778 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11779 } else
11780 val = 0;
11782 tp->phy_crc_errors += val;
11784 return tp->phy_crc_errors;
11787 return get_stat64(&hw_stats->rx_fcs_errors);
11790 #define ESTAT_ADD(member) \
11791 estats->member = old_estats->member + \
11792 get_stat64(&hw_stats->member)
11794 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11796 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11797 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11799 ESTAT_ADD(rx_octets);
11800 ESTAT_ADD(rx_fragments);
11801 ESTAT_ADD(rx_ucast_packets);
11802 ESTAT_ADD(rx_mcast_packets);
11803 ESTAT_ADD(rx_bcast_packets);
11804 ESTAT_ADD(rx_fcs_errors);
11805 ESTAT_ADD(rx_align_errors);
11806 ESTAT_ADD(rx_xon_pause_rcvd);
11807 ESTAT_ADD(rx_xoff_pause_rcvd);
11808 ESTAT_ADD(rx_mac_ctrl_rcvd);
11809 ESTAT_ADD(rx_xoff_entered);
11810 ESTAT_ADD(rx_frame_too_long_errors);
11811 ESTAT_ADD(rx_jabbers);
11812 ESTAT_ADD(rx_undersize_packets);
11813 ESTAT_ADD(rx_in_length_errors);
11814 ESTAT_ADD(rx_out_length_errors);
11815 ESTAT_ADD(rx_64_or_less_octet_packets);
11816 ESTAT_ADD(rx_65_to_127_octet_packets);
11817 ESTAT_ADD(rx_128_to_255_octet_packets);
11818 ESTAT_ADD(rx_256_to_511_octet_packets);
11819 ESTAT_ADD(rx_512_to_1023_octet_packets);
11820 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11821 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11822 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11823 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11824 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11826 ESTAT_ADD(tx_octets);
11827 ESTAT_ADD(tx_collisions);
11828 ESTAT_ADD(tx_xon_sent);
11829 ESTAT_ADD(tx_xoff_sent);
11830 ESTAT_ADD(tx_flow_control);
11831 ESTAT_ADD(tx_mac_errors);
11832 ESTAT_ADD(tx_single_collisions);
11833 ESTAT_ADD(tx_mult_collisions);
11834 ESTAT_ADD(tx_deferred);
11835 ESTAT_ADD(tx_excessive_collisions);
11836 ESTAT_ADD(tx_late_collisions);
11837 ESTAT_ADD(tx_collide_2times);
11838 ESTAT_ADD(tx_collide_3times);
11839 ESTAT_ADD(tx_collide_4times);
11840 ESTAT_ADD(tx_collide_5times);
11841 ESTAT_ADD(tx_collide_6times);
11842 ESTAT_ADD(tx_collide_7times);
11843 ESTAT_ADD(tx_collide_8times);
11844 ESTAT_ADD(tx_collide_9times);
11845 ESTAT_ADD(tx_collide_10times);
11846 ESTAT_ADD(tx_collide_11times);
11847 ESTAT_ADD(tx_collide_12times);
11848 ESTAT_ADD(tx_collide_13times);
11849 ESTAT_ADD(tx_collide_14times);
11850 ESTAT_ADD(tx_collide_15times);
11851 ESTAT_ADD(tx_ucast_packets);
11852 ESTAT_ADD(tx_mcast_packets);
11853 ESTAT_ADD(tx_bcast_packets);
11854 ESTAT_ADD(tx_carrier_sense_errors);
11855 ESTAT_ADD(tx_discards);
11856 ESTAT_ADD(tx_errors);
11858 ESTAT_ADD(dma_writeq_full);
11859 ESTAT_ADD(dma_write_prioq_full);
11860 ESTAT_ADD(rxbds_empty);
11861 ESTAT_ADD(rx_discards);
11862 ESTAT_ADD(rx_errors);
11863 ESTAT_ADD(rx_threshold_hit);
11865 ESTAT_ADD(dma_readq_full);
11866 ESTAT_ADD(dma_read_prioq_full);
11867 ESTAT_ADD(tx_comp_queue_full);
11869 ESTAT_ADD(ring_set_send_prod_index);
11870 ESTAT_ADD(ring_status_update);
11871 ESTAT_ADD(nic_irqs);
11872 ESTAT_ADD(nic_avoided_irqs);
11873 ESTAT_ADD(nic_tx_threshold_hit);
11875 ESTAT_ADD(mbuf_lwm_thresh_hit);
11878 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11880 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11881 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11883 stats->rx_packets = old_stats->rx_packets +
11884 get_stat64(&hw_stats->rx_ucast_packets) +
11885 get_stat64(&hw_stats->rx_mcast_packets) +
11886 get_stat64(&hw_stats->rx_bcast_packets);
11888 stats->tx_packets = old_stats->tx_packets +
11889 get_stat64(&hw_stats->tx_ucast_packets) +
11890 get_stat64(&hw_stats->tx_mcast_packets) +
11891 get_stat64(&hw_stats->tx_bcast_packets);
11893 stats->rx_bytes = old_stats->rx_bytes +
11894 get_stat64(&hw_stats->rx_octets);
11895 stats->tx_bytes = old_stats->tx_bytes +
11896 get_stat64(&hw_stats->tx_octets);
11898 stats->rx_errors = old_stats->rx_errors +
11899 get_stat64(&hw_stats->rx_errors);
11900 stats->tx_errors = old_stats->tx_errors +
11901 get_stat64(&hw_stats->tx_errors) +
11902 get_stat64(&hw_stats->tx_mac_errors) +
11903 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11904 get_stat64(&hw_stats->tx_discards);
11906 stats->multicast = old_stats->multicast +
11907 get_stat64(&hw_stats->rx_mcast_packets);
11908 stats->collisions = old_stats->collisions +
11909 get_stat64(&hw_stats->tx_collisions);
11911 stats->rx_length_errors = old_stats->rx_length_errors +
11912 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11913 get_stat64(&hw_stats->rx_undersize_packets);
11915 stats->rx_frame_errors = old_stats->rx_frame_errors +
11916 get_stat64(&hw_stats->rx_align_errors);
11917 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11918 get_stat64(&hw_stats->tx_discards);
11919 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11920 get_stat64(&hw_stats->tx_carrier_sense_errors);
11922 stats->rx_crc_errors = old_stats->rx_crc_errors +
11923 tg3_calc_crc_errors(tp);
11925 stats->rx_missed_errors = old_stats->rx_missed_errors +
11926 get_stat64(&hw_stats->rx_discards);
11928 stats->rx_dropped = tp->rx_dropped;
11929 stats->tx_dropped = tp->tx_dropped;
11932 static int tg3_get_regs_len(struct net_device *dev)
11934 return TG3_REG_BLK_SIZE;
11937 static void tg3_get_regs(struct net_device *dev,
11938 struct ethtool_regs *regs, void *_p)
11940 struct tg3 *tp = netdev_priv(dev);
11942 regs->version = 0;
11944 memset(_p, 0, TG3_REG_BLK_SIZE);
11946 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11947 return;
11949 tg3_full_lock(tp, 0);
11951 tg3_dump_legacy_regs(tp, (u32 *)_p);
11953 tg3_full_unlock(tp);
11956 static int tg3_get_eeprom_len(struct net_device *dev)
11958 struct tg3 *tp = netdev_priv(dev);
11960 return tp->nvram_size;
11963 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11965 struct tg3 *tp = netdev_priv(dev);
11966 int ret, cpmu_restore = 0;
11967 u8 *pd;
11968 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11969 __be32 val;
11971 if (tg3_flag(tp, NO_NVRAM))
11972 return -EINVAL;
11974 offset = eeprom->offset;
11975 len = eeprom->len;
11976 eeprom->len = 0;
11978 eeprom->magic = TG3_EEPROM_MAGIC;
11980 /* Override clock, link aware and link idle modes */
11981 if (tg3_flag(tp, CPMU_PRESENT)) {
11982 cpmu_val = tr32(TG3_CPMU_CTRL);
11983 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11984 CPMU_CTRL_LINK_IDLE_MODE)) {
11985 tw32(TG3_CPMU_CTRL, cpmu_val &
11986 ~(CPMU_CTRL_LINK_AWARE_MODE |
11987 CPMU_CTRL_LINK_IDLE_MODE));
11988 cpmu_restore = 1;
11991 tg3_override_clk(tp);
11993 if (offset & 3) {
11994 /* adjustments to start on required 4 byte boundary */
11995 b_offset = offset & 3;
11996 b_count = 4 - b_offset;
11997 if (b_count > len) {
11998 /* i.e. offset=1 len=2 */
11999 b_count = len;
12001 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12002 if (ret)
12003 goto eeprom_done;
12004 memcpy(data, ((char *)&val) + b_offset, b_count);
12005 len -= b_count;
12006 offset += b_count;
12007 eeprom->len += b_count;
12010 /* read bytes up to the last 4 byte boundary */
12011 pd = &data[eeprom->len];
12012 for (i = 0; i < (len - (len & 3)); i += 4) {
12013 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12014 if (ret) {
12015 if (i)
12016 i -= 4;
12017 eeprom->len += i;
12018 goto eeprom_done;
12020 memcpy(pd + i, &val, 4);
12021 if (need_resched()) {
12022 if (signal_pending(current)) {
12023 eeprom->len += i;
12024 ret = -EINTR;
12025 goto eeprom_done;
12027 cond_resched();
12030 eeprom->len += i;
12032 if (len & 3) {
12033 /* read last bytes not ending on 4 byte boundary */
12034 pd = &data[eeprom->len];
12035 b_count = len & 3;
12036 b_offset = offset + len - b_count;
12037 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12038 if (ret)
12039 goto eeprom_done;
12040 memcpy(pd, &val, b_count);
12041 eeprom->len += b_count;
12043 ret = 0;
12045 eeprom_done:
12046 /* Restore clock, link aware and link idle modes */
12047 tg3_restore_clk(tp);
12048 if (cpmu_restore)
12049 tw32(TG3_CPMU_CTRL, cpmu_val);
12051 return ret;
12054 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12056 struct tg3 *tp = netdev_priv(dev);
12057 int ret;
12058 u32 offset, len, b_offset, odd_len;
12059 u8 *buf;
12060 __be32 start = 0, end;
12062 if (tg3_flag(tp, NO_NVRAM) ||
12063 eeprom->magic != TG3_EEPROM_MAGIC)
12064 return -EINVAL;
12066 offset = eeprom->offset;
12067 len = eeprom->len;
12069 if ((b_offset = (offset & 3))) {
12070 /* adjustments to start on required 4 byte boundary */
12071 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12072 if (ret)
12073 return ret;
12074 len += b_offset;
12075 offset &= ~3;
12076 if (len < 4)
12077 len = 4;
12080 odd_len = 0;
12081 if (len & 3) {
12082 /* adjustments to end on required 4 byte boundary */
12083 odd_len = 1;
12084 len = (len + 3) & ~3;
12085 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12086 if (ret)
12087 return ret;
12090 buf = data;
12091 if (b_offset || odd_len) {
12092 buf = kmalloc(len, GFP_KERNEL);
12093 if (!buf)
12094 return -ENOMEM;
12095 if (b_offset)
12096 memcpy(buf, &start, 4);
12097 if (odd_len)
12098 memcpy(buf+len-4, &end, 4);
12099 memcpy(buf + b_offset, data, eeprom->len);
12102 ret = tg3_nvram_write_block(tp, offset, len, buf);
12104 if (buf != data)
12105 kfree(buf);
12107 return ret;
12110 static int tg3_get_link_ksettings(struct net_device *dev,
12111 struct ethtool_link_ksettings *cmd)
12113 struct tg3 *tp = netdev_priv(dev);
12114 u32 supported, advertising;
12116 if (tg3_flag(tp, USE_PHYLIB)) {
12117 struct phy_device *phydev;
12118 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12119 return -EAGAIN;
12120 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12121 phy_ethtool_ksettings_get(phydev, cmd);
12123 return 0;
12126 supported = (SUPPORTED_Autoneg);
12128 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12129 supported |= (SUPPORTED_1000baseT_Half |
12130 SUPPORTED_1000baseT_Full);
12132 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12133 supported |= (SUPPORTED_100baseT_Half |
12134 SUPPORTED_100baseT_Full |
12135 SUPPORTED_10baseT_Half |
12136 SUPPORTED_10baseT_Full |
12137 SUPPORTED_TP);
12138 cmd->base.port = PORT_TP;
12139 } else {
12140 supported |= SUPPORTED_FIBRE;
12141 cmd->base.port = PORT_FIBRE;
12143 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12144 supported);
12146 advertising = tp->link_config.advertising;
12147 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12148 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12149 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12150 advertising |= ADVERTISED_Pause;
12151 } else {
12152 advertising |= ADVERTISED_Pause |
12153 ADVERTISED_Asym_Pause;
12155 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12156 advertising |= ADVERTISED_Asym_Pause;
12159 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12160 advertising);
12162 if (netif_running(dev) && tp->link_up) {
12163 cmd->base.speed = tp->link_config.active_speed;
12164 cmd->base.duplex = tp->link_config.active_duplex;
12165 ethtool_convert_legacy_u32_to_link_mode(
12166 cmd->link_modes.lp_advertising,
12167 tp->link_config.rmt_adv);
12169 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12170 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12171 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12172 else
12173 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12175 } else {
12176 cmd->base.speed = SPEED_UNKNOWN;
12177 cmd->base.duplex = DUPLEX_UNKNOWN;
12178 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12180 cmd->base.phy_address = tp->phy_addr;
12181 cmd->base.autoneg = tp->link_config.autoneg;
12182 return 0;
12185 static int tg3_set_link_ksettings(struct net_device *dev,
12186 const struct ethtool_link_ksettings *cmd)
12188 struct tg3 *tp = netdev_priv(dev);
12189 u32 speed = cmd->base.speed;
12190 u32 advertising;
12192 if (tg3_flag(tp, USE_PHYLIB)) {
12193 struct phy_device *phydev;
12194 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12195 return -EAGAIN;
12196 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12197 return phy_ethtool_ksettings_set(phydev, cmd);
12200 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12201 cmd->base.autoneg != AUTONEG_DISABLE)
12202 return -EINVAL;
12204 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12205 cmd->base.duplex != DUPLEX_FULL &&
12206 cmd->base.duplex != DUPLEX_HALF)
12207 return -EINVAL;
12209 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12210 cmd->link_modes.advertising);
12212 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12213 u32 mask = ADVERTISED_Autoneg |
12214 ADVERTISED_Pause |
12215 ADVERTISED_Asym_Pause;
12217 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12218 mask |= ADVERTISED_1000baseT_Half |
12219 ADVERTISED_1000baseT_Full;
12221 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12222 mask |= ADVERTISED_100baseT_Half |
12223 ADVERTISED_100baseT_Full |
12224 ADVERTISED_10baseT_Half |
12225 ADVERTISED_10baseT_Full |
12226 ADVERTISED_TP;
12227 else
12228 mask |= ADVERTISED_FIBRE;
12230 if (advertising & ~mask)
12231 return -EINVAL;
12233 mask &= (ADVERTISED_1000baseT_Half |
12234 ADVERTISED_1000baseT_Full |
12235 ADVERTISED_100baseT_Half |
12236 ADVERTISED_100baseT_Full |
12237 ADVERTISED_10baseT_Half |
12238 ADVERTISED_10baseT_Full);
12240 advertising &= mask;
12241 } else {
12242 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12243 if (speed != SPEED_1000)
12244 return -EINVAL;
12246 if (cmd->base.duplex != DUPLEX_FULL)
12247 return -EINVAL;
12248 } else {
12249 if (speed != SPEED_100 &&
12250 speed != SPEED_10)
12251 return -EINVAL;
12255 tg3_full_lock(tp, 0);
12257 tp->link_config.autoneg = cmd->base.autoneg;
12258 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12259 tp->link_config.advertising = (advertising |
12260 ADVERTISED_Autoneg);
12261 tp->link_config.speed = SPEED_UNKNOWN;
12262 tp->link_config.duplex = DUPLEX_UNKNOWN;
12263 } else {
12264 tp->link_config.advertising = 0;
12265 tp->link_config.speed = speed;
12266 tp->link_config.duplex = cmd->base.duplex;
12269 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12271 tg3_warn_mgmt_link_flap(tp);
12273 if (netif_running(dev))
12274 tg3_setup_phy(tp, true);
12276 tg3_full_unlock(tp);
12278 return 0;
12281 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12283 struct tg3 *tp = netdev_priv(dev);
12285 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12286 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12287 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12288 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12291 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12293 struct tg3 *tp = netdev_priv(dev);
12295 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12296 wol->supported = WAKE_MAGIC;
12297 else
12298 wol->supported = 0;
12299 wol->wolopts = 0;
12300 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12301 wol->wolopts = WAKE_MAGIC;
12302 memset(&wol->sopass, 0, sizeof(wol->sopass));
12305 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12307 struct tg3 *tp = netdev_priv(dev);
12308 struct device *dp = &tp->pdev->dev;
12310 if (wol->wolopts & ~WAKE_MAGIC)
12311 return -EINVAL;
12312 if ((wol->wolopts & WAKE_MAGIC) &&
12313 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12314 return -EINVAL;
12316 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12318 if (device_may_wakeup(dp))
12319 tg3_flag_set(tp, WOL_ENABLE);
12320 else
12321 tg3_flag_clear(tp, WOL_ENABLE);
12323 return 0;
12326 static u32 tg3_get_msglevel(struct net_device *dev)
12328 struct tg3 *tp = netdev_priv(dev);
12329 return tp->msg_enable;
12332 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12334 struct tg3 *tp = netdev_priv(dev);
12335 tp->msg_enable = value;
12338 static int tg3_nway_reset(struct net_device *dev)
12340 struct tg3 *tp = netdev_priv(dev);
12341 int r;
12343 if (!netif_running(dev))
12344 return -EAGAIN;
12346 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12347 return -EINVAL;
12349 tg3_warn_mgmt_link_flap(tp);
12351 if (tg3_flag(tp, USE_PHYLIB)) {
12352 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12353 return -EAGAIN;
12354 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12355 } else {
12356 u32 bmcr;
12358 spin_lock_bh(&tp->lock);
12359 r = -EINVAL;
12360 tg3_readphy(tp, MII_BMCR, &bmcr);
12361 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12362 ((bmcr & BMCR_ANENABLE) ||
12363 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12364 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12365 BMCR_ANENABLE);
12366 r = 0;
12368 spin_unlock_bh(&tp->lock);
12371 return r;
12374 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12376 struct tg3 *tp = netdev_priv(dev);
12378 ering->rx_max_pending = tp->rx_std_ring_mask;
12379 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12380 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12381 else
12382 ering->rx_jumbo_max_pending = 0;
12384 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12386 ering->rx_pending = tp->rx_pending;
12387 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12388 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12389 else
12390 ering->rx_jumbo_pending = 0;
12392 ering->tx_pending = tp->napi[0].tx_pending;
12395 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12397 struct tg3 *tp = netdev_priv(dev);
12398 int i, irq_sync = 0, err = 0;
12400 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12401 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12402 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12403 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12404 (tg3_flag(tp, TSO_BUG) &&
12405 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12406 return -EINVAL;
12408 if (netif_running(dev)) {
12409 tg3_phy_stop(tp);
12410 tg3_netif_stop(tp);
12411 irq_sync = 1;
12414 tg3_full_lock(tp, irq_sync);
12416 tp->rx_pending = ering->rx_pending;
12418 if (tg3_flag(tp, MAX_RXPEND_64) &&
12419 tp->rx_pending > 63)
12420 tp->rx_pending = 63;
12422 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12423 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12425 for (i = 0; i < tp->irq_max; i++)
12426 tp->napi[i].tx_pending = ering->tx_pending;
12428 if (netif_running(dev)) {
12429 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12430 err = tg3_restart_hw(tp, false);
12431 if (!err)
12432 tg3_netif_start(tp);
12435 tg3_full_unlock(tp);
12437 if (irq_sync && !err)
12438 tg3_phy_start(tp);
12440 return err;
12443 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12445 struct tg3 *tp = netdev_priv(dev);
12447 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12449 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12450 epause->rx_pause = 1;
12451 else
12452 epause->rx_pause = 0;
12454 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12455 epause->tx_pause = 1;
12456 else
12457 epause->tx_pause = 0;
12460 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12462 struct tg3 *tp = netdev_priv(dev);
12463 int err = 0;
12465 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12466 tg3_warn_mgmt_link_flap(tp);
12468 if (tg3_flag(tp, USE_PHYLIB)) {
12469 u32 newadv;
12470 struct phy_device *phydev;
12472 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12474 if (!(phydev->supported & SUPPORTED_Pause) ||
12475 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12476 (epause->rx_pause != epause->tx_pause)))
12477 return -EINVAL;
12479 tp->link_config.flowctrl = 0;
12480 if (epause->rx_pause) {
12481 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12483 if (epause->tx_pause) {
12484 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12485 newadv = ADVERTISED_Pause;
12486 } else
12487 newadv = ADVERTISED_Pause |
12488 ADVERTISED_Asym_Pause;
12489 } else if (epause->tx_pause) {
12490 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12491 newadv = ADVERTISED_Asym_Pause;
12492 } else
12493 newadv = 0;
12495 if (epause->autoneg)
12496 tg3_flag_set(tp, PAUSE_AUTONEG);
12497 else
12498 tg3_flag_clear(tp, PAUSE_AUTONEG);
12500 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12501 u32 oldadv = phydev->advertising &
12502 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12503 if (oldadv != newadv) {
12504 phydev->advertising &=
12505 ~(ADVERTISED_Pause |
12506 ADVERTISED_Asym_Pause);
12507 phydev->advertising |= newadv;
12508 if (phydev->autoneg) {
12510 * Always renegotiate the link to
12511 * inform our link partner of our
12512 * flow control settings, even if the
12513 * flow control is forced. Let
12514 * tg3_adjust_link() do the final
12515 * flow control setup.
12517 return phy_start_aneg(phydev);
12521 if (!epause->autoneg)
12522 tg3_setup_flow_control(tp, 0, 0);
12523 } else {
12524 tp->link_config.advertising &=
12525 ~(ADVERTISED_Pause |
12526 ADVERTISED_Asym_Pause);
12527 tp->link_config.advertising |= newadv;
12529 } else {
12530 int irq_sync = 0;
12532 if (netif_running(dev)) {
12533 tg3_netif_stop(tp);
12534 irq_sync = 1;
12537 tg3_full_lock(tp, irq_sync);
12539 if (epause->autoneg)
12540 tg3_flag_set(tp, PAUSE_AUTONEG);
12541 else
12542 tg3_flag_clear(tp, PAUSE_AUTONEG);
12543 if (epause->rx_pause)
12544 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12545 else
12546 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12547 if (epause->tx_pause)
12548 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12549 else
12550 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12552 if (netif_running(dev)) {
12553 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12554 err = tg3_restart_hw(tp, false);
12555 if (!err)
12556 tg3_netif_start(tp);
12559 tg3_full_unlock(tp);
12562 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12564 return err;
12567 static int tg3_get_sset_count(struct net_device *dev, int sset)
12569 switch (sset) {
12570 case ETH_SS_TEST:
12571 return TG3_NUM_TEST;
12572 case ETH_SS_STATS:
12573 return TG3_NUM_STATS;
12574 default:
12575 return -EOPNOTSUPP;
12579 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12580 u32 *rules __always_unused)
12582 struct tg3 *tp = netdev_priv(dev);
12584 if (!tg3_flag(tp, SUPPORT_MSIX))
12585 return -EOPNOTSUPP;
12587 switch (info->cmd) {
12588 case ETHTOOL_GRXRINGS:
12589 if (netif_running(tp->dev))
12590 info->data = tp->rxq_cnt;
12591 else {
12592 info->data = num_online_cpus();
12593 if (info->data > TG3_RSS_MAX_NUM_QS)
12594 info->data = TG3_RSS_MAX_NUM_QS;
12597 return 0;
12599 default:
12600 return -EOPNOTSUPP;
12604 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12606 u32 size = 0;
12607 struct tg3 *tp = netdev_priv(dev);
12609 if (tg3_flag(tp, SUPPORT_MSIX))
12610 size = TG3_RSS_INDIR_TBL_SIZE;
12612 return size;
12615 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12617 struct tg3 *tp = netdev_priv(dev);
12618 int i;
12620 if (hfunc)
12621 *hfunc = ETH_RSS_HASH_TOP;
12622 if (!indir)
12623 return 0;
12625 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12626 indir[i] = tp->rss_ind_tbl[i];
12628 return 0;
12631 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12632 const u8 hfunc)
12634 struct tg3 *tp = netdev_priv(dev);
12635 size_t i;
12637 /* We require at least one supported parameter to be changed and no
12638 * change in any of the unsupported parameters
12640 if (key ||
12641 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12642 return -EOPNOTSUPP;
12644 if (!indir)
12645 return 0;
12647 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12648 tp->rss_ind_tbl[i] = indir[i];
12650 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12651 return 0;
12653 /* It is legal to write the indirection
12654 * table while the device is running.
12656 tg3_full_lock(tp, 0);
12657 tg3_rss_write_indir_tbl(tp);
12658 tg3_full_unlock(tp);
12660 return 0;
12663 static void tg3_get_channels(struct net_device *dev,
12664 struct ethtool_channels *channel)
12666 struct tg3 *tp = netdev_priv(dev);
12667 u32 deflt_qs = netif_get_num_default_rss_queues();
12669 channel->max_rx = tp->rxq_max;
12670 channel->max_tx = tp->txq_max;
12672 if (netif_running(dev)) {
12673 channel->rx_count = tp->rxq_cnt;
12674 channel->tx_count = tp->txq_cnt;
12675 } else {
12676 if (tp->rxq_req)
12677 channel->rx_count = tp->rxq_req;
12678 else
12679 channel->rx_count = min(deflt_qs, tp->rxq_max);
12681 if (tp->txq_req)
12682 channel->tx_count = tp->txq_req;
12683 else
12684 channel->tx_count = min(deflt_qs, tp->txq_max);
12688 static int tg3_set_channels(struct net_device *dev,
12689 struct ethtool_channels *channel)
12691 struct tg3 *tp = netdev_priv(dev);
12693 if (!tg3_flag(tp, SUPPORT_MSIX))
12694 return -EOPNOTSUPP;
12696 if (channel->rx_count > tp->rxq_max ||
12697 channel->tx_count > tp->txq_max)
12698 return -EINVAL;
12700 tp->rxq_req = channel->rx_count;
12701 tp->txq_req = channel->tx_count;
12703 if (!netif_running(dev))
12704 return 0;
12706 tg3_stop(tp);
12708 tg3_carrier_off(tp);
12710 tg3_start(tp, true, false, false);
12712 return 0;
12715 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12717 switch (stringset) {
12718 case ETH_SS_STATS:
12719 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12720 break;
12721 case ETH_SS_TEST:
12722 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12723 break;
12724 default:
12725 WARN_ON(1); /* we need a WARN() */
12726 break;
12730 static int tg3_set_phys_id(struct net_device *dev,
12731 enum ethtool_phys_id_state state)
12733 struct tg3 *tp = netdev_priv(dev);
12735 if (!netif_running(tp->dev))
12736 return -EAGAIN;
12738 switch (state) {
12739 case ETHTOOL_ID_ACTIVE:
12740 return 1; /* cycle on/off once per second */
12742 case ETHTOOL_ID_ON:
12743 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12744 LED_CTRL_1000MBPS_ON |
12745 LED_CTRL_100MBPS_ON |
12746 LED_CTRL_10MBPS_ON |
12747 LED_CTRL_TRAFFIC_OVERRIDE |
12748 LED_CTRL_TRAFFIC_BLINK |
12749 LED_CTRL_TRAFFIC_LED);
12750 break;
12752 case ETHTOOL_ID_OFF:
12753 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12754 LED_CTRL_TRAFFIC_OVERRIDE);
12755 break;
12757 case ETHTOOL_ID_INACTIVE:
12758 tw32(MAC_LED_CTRL, tp->led_ctrl);
12759 break;
12762 return 0;
12765 static void tg3_get_ethtool_stats(struct net_device *dev,
12766 struct ethtool_stats *estats, u64 *tmp_stats)
12768 struct tg3 *tp = netdev_priv(dev);
12770 if (tp->hw_stats)
12771 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12772 else
12773 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12776 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12778 int i;
12779 __be32 *buf;
12780 u32 offset = 0, len = 0;
12781 u32 magic, val;
12783 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12784 return NULL;
12786 if (magic == TG3_EEPROM_MAGIC) {
12787 for (offset = TG3_NVM_DIR_START;
12788 offset < TG3_NVM_DIR_END;
12789 offset += TG3_NVM_DIRENT_SIZE) {
12790 if (tg3_nvram_read(tp, offset, &val))
12791 return NULL;
12793 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12794 TG3_NVM_DIRTYPE_EXTVPD)
12795 break;
12798 if (offset != TG3_NVM_DIR_END) {
12799 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12800 if (tg3_nvram_read(tp, offset + 4, &offset))
12801 return NULL;
12803 offset = tg3_nvram_logical_addr(tp, offset);
12807 if (!offset || !len) {
12808 offset = TG3_NVM_VPD_OFF;
12809 len = TG3_NVM_VPD_LEN;
12812 buf = kmalloc(len, GFP_KERNEL);
12813 if (buf == NULL)
12814 return NULL;
12816 if (magic == TG3_EEPROM_MAGIC) {
12817 for (i = 0; i < len; i += 4) {
12818 /* The data is in little-endian format in NVRAM.
12819 * Use the big-endian read routines to preserve
12820 * the byte order as it exists in NVRAM.
12822 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12823 goto error;
12825 } else {
12826 u8 *ptr;
12827 ssize_t cnt;
12828 unsigned int pos = 0;
12830 ptr = (u8 *)&buf[0];
12831 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12832 cnt = pci_read_vpd(tp->pdev, pos,
12833 len - pos, ptr);
12834 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12835 cnt = 0;
12836 else if (cnt < 0)
12837 goto error;
12839 if (pos != len)
12840 goto error;
12843 *vpdlen = len;
12845 return buf;
12847 error:
12848 kfree(buf);
12849 return NULL;
12852 #define NVRAM_TEST_SIZE 0x100
12853 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12854 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12855 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12856 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12857 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12858 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12859 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12860 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12862 static int tg3_test_nvram(struct tg3 *tp)
12864 u32 csum, magic, len;
12865 __be32 *buf;
12866 int i, j, k, err = 0, size;
12868 if (tg3_flag(tp, NO_NVRAM))
12869 return 0;
12871 if (tg3_nvram_read(tp, 0, &magic) != 0)
12872 return -EIO;
12874 if (magic == TG3_EEPROM_MAGIC)
12875 size = NVRAM_TEST_SIZE;
12876 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12877 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12878 TG3_EEPROM_SB_FORMAT_1) {
12879 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12880 case TG3_EEPROM_SB_REVISION_0:
12881 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12882 break;
12883 case TG3_EEPROM_SB_REVISION_2:
12884 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12885 break;
12886 case TG3_EEPROM_SB_REVISION_3:
12887 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12888 break;
12889 case TG3_EEPROM_SB_REVISION_4:
12890 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12891 break;
12892 case TG3_EEPROM_SB_REVISION_5:
12893 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12894 break;
12895 case TG3_EEPROM_SB_REVISION_6:
12896 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12897 break;
12898 default:
12899 return -EIO;
12901 } else
12902 return 0;
12903 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12904 size = NVRAM_SELFBOOT_HW_SIZE;
12905 else
12906 return -EIO;
12908 buf = kmalloc(size, GFP_KERNEL);
12909 if (buf == NULL)
12910 return -ENOMEM;
12912 err = -EIO;
12913 for (i = 0, j = 0; i < size; i += 4, j++) {
12914 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12915 if (err)
12916 break;
12918 if (i < size)
12919 goto out;
12921 /* Selfboot format */
12922 magic = be32_to_cpu(buf[0]);
12923 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12924 TG3_EEPROM_MAGIC_FW) {
12925 u8 *buf8 = (u8 *) buf, csum8 = 0;
12927 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12928 TG3_EEPROM_SB_REVISION_2) {
12929 /* For rev 2, the csum doesn't include the MBA. */
12930 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12931 csum8 += buf8[i];
12932 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12933 csum8 += buf8[i];
12934 } else {
12935 for (i = 0; i < size; i++)
12936 csum8 += buf8[i];
12939 if (csum8 == 0) {
12940 err = 0;
12941 goto out;
12944 err = -EIO;
12945 goto out;
12948 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12949 TG3_EEPROM_MAGIC_HW) {
12950 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12951 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12952 u8 *buf8 = (u8 *) buf;
12954 /* Separate the parity bits and the data bytes. */
12955 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12956 if ((i == 0) || (i == 8)) {
12957 int l;
12958 u8 msk;
12960 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12961 parity[k++] = buf8[i] & msk;
12962 i++;
12963 } else if (i == 16) {
12964 int l;
12965 u8 msk;
12967 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12968 parity[k++] = buf8[i] & msk;
12969 i++;
12971 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12972 parity[k++] = buf8[i] & msk;
12973 i++;
12975 data[j++] = buf8[i];
12978 err = -EIO;
12979 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12980 u8 hw8 = hweight8(data[i]);
12982 if ((hw8 & 0x1) && parity[i])
12983 goto out;
12984 else if (!(hw8 & 0x1) && !parity[i])
12985 goto out;
12987 err = 0;
12988 goto out;
12991 err = -EIO;
12993 /* Bootstrap checksum at offset 0x10 */
12994 csum = calc_crc((unsigned char *) buf, 0x10);
12995 if (csum != le32_to_cpu(buf[0x10/4]))
12996 goto out;
12998 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12999 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13000 if (csum != le32_to_cpu(buf[0xfc/4]))
13001 goto out;
13003 kfree(buf);
13005 buf = tg3_vpd_readblock(tp, &len);
13006 if (!buf)
13007 return -ENOMEM;
13009 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13010 if (i > 0) {
13011 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13012 if (j < 0)
13013 goto out;
13015 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13016 goto out;
13018 i += PCI_VPD_LRDT_TAG_SIZE;
13019 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13020 PCI_VPD_RO_KEYWORD_CHKSUM);
13021 if (j > 0) {
13022 u8 csum8 = 0;
13024 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026 for (i = 0; i <= j; i++)
13027 csum8 += ((u8 *)buf)[i];
13029 if (csum8)
13030 goto out;
13034 err = 0;
13036 out:
13037 kfree(buf);
13038 return err;
13041 #define TG3_SERDES_TIMEOUT_SEC 2
13042 #define TG3_COPPER_TIMEOUT_SEC 6
13044 static int tg3_test_link(struct tg3 *tp)
13046 int i, max;
13048 if (!netif_running(tp->dev))
13049 return -ENODEV;
13051 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13052 max = TG3_SERDES_TIMEOUT_SEC;
13053 else
13054 max = TG3_COPPER_TIMEOUT_SEC;
13056 for (i = 0; i < max; i++) {
13057 if (tp->link_up)
13058 return 0;
13060 if (msleep_interruptible(1000))
13061 break;
13064 return -EIO;
13067 /* Only test the commonly used registers */
13068 static int tg3_test_registers(struct tg3 *tp)
13070 int i, is_5705, is_5750;
13071 u32 offset, read_mask, write_mask, val, save_val, read_val;
13072 static struct {
13073 u16 offset;
13074 u16 flags;
13075 #define TG3_FL_5705 0x1
13076 #define TG3_FL_NOT_5705 0x2
13077 #define TG3_FL_NOT_5788 0x4
13078 #define TG3_FL_NOT_5750 0x8
13079 u32 read_mask;
13080 u32 write_mask;
13081 } reg_tbl[] = {
13082 /* MAC Control Registers */
13083 { MAC_MODE, TG3_FL_NOT_5705,
13084 0x00000000, 0x00ef6f8c },
13085 { MAC_MODE, TG3_FL_5705,
13086 0x00000000, 0x01ef6b8c },
13087 { MAC_STATUS, TG3_FL_NOT_5705,
13088 0x03800107, 0x00000000 },
13089 { MAC_STATUS, TG3_FL_5705,
13090 0x03800100, 0x00000000 },
13091 { MAC_ADDR_0_HIGH, 0x0000,
13092 0x00000000, 0x0000ffff },
13093 { MAC_ADDR_0_LOW, 0x0000,
13094 0x00000000, 0xffffffff },
13095 { MAC_RX_MTU_SIZE, 0x0000,
13096 0x00000000, 0x0000ffff },
13097 { MAC_TX_MODE, 0x0000,
13098 0x00000000, 0x00000070 },
13099 { MAC_TX_LENGTHS, 0x0000,
13100 0x00000000, 0x00003fff },
13101 { MAC_RX_MODE, TG3_FL_NOT_5705,
13102 0x00000000, 0x000007fc },
13103 { MAC_RX_MODE, TG3_FL_5705,
13104 0x00000000, 0x000007dc },
13105 { MAC_HASH_REG_0, 0x0000,
13106 0x00000000, 0xffffffff },
13107 { MAC_HASH_REG_1, 0x0000,
13108 0x00000000, 0xffffffff },
13109 { MAC_HASH_REG_2, 0x0000,
13110 0x00000000, 0xffffffff },
13111 { MAC_HASH_REG_3, 0x0000,
13112 0x00000000, 0xffffffff },
13114 /* Receive Data and Receive BD Initiator Control Registers. */
13115 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13116 0x00000000, 0xffffffff },
13117 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13118 0x00000000, 0xffffffff },
13119 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13120 0x00000000, 0x00000003 },
13121 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13122 0x00000000, 0xffffffff },
13123 { RCVDBDI_STD_BD+0, 0x0000,
13124 0x00000000, 0xffffffff },
13125 { RCVDBDI_STD_BD+4, 0x0000,
13126 0x00000000, 0xffffffff },
13127 { RCVDBDI_STD_BD+8, 0x0000,
13128 0x00000000, 0xffff0002 },
13129 { RCVDBDI_STD_BD+0xc, 0x0000,
13130 0x00000000, 0xffffffff },
13132 /* Receive BD Initiator Control Registers. */
13133 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13134 0x00000000, 0xffffffff },
13135 { RCVBDI_STD_THRESH, TG3_FL_5705,
13136 0x00000000, 0x000003ff },
13137 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13140 /* Host Coalescing Control Registers. */
13141 { HOSTCC_MODE, TG3_FL_NOT_5705,
13142 0x00000000, 0x00000004 },
13143 { HOSTCC_MODE, TG3_FL_5705,
13144 0x00000000, 0x000000f6 },
13145 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13148 0x00000000, 0x000003ff },
13149 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13152 0x00000000, 0x000003ff },
13153 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13156 0x00000000, 0x000000ff },
13157 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13158 0x00000000, 0xffffffff },
13159 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13160 0x00000000, 0x000000ff },
13161 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13162 0x00000000, 0xffffffff },
13163 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13164 0x00000000, 0xffffffff },
13165 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13166 0x00000000, 0xffffffff },
13167 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13168 0x00000000, 0x000000ff },
13169 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13170 0x00000000, 0xffffffff },
13171 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13172 0x00000000, 0x000000ff },
13173 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13174 0x00000000, 0xffffffff },
13175 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13176 0x00000000, 0xffffffff },
13177 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13178 0x00000000, 0xffffffff },
13179 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13180 0x00000000, 0xffffffff },
13181 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13182 0x00000000, 0xffffffff },
13183 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13184 0xffffffff, 0x00000000 },
13185 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13186 0xffffffff, 0x00000000 },
13188 /* Buffer Manager Control Registers. */
13189 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13190 0x00000000, 0x007fff80 },
13191 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13192 0x00000000, 0x007fffff },
13193 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13194 0x00000000, 0x0000003f },
13195 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13196 0x00000000, 0x000001ff },
13197 { BUFMGR_MB_HIGH_WATER, 0x0000,
13198 0x00000000, 0x000001ff },
13199 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13200 0xffffffff, 0x00000000 },
13201 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13202 0xffffffff, 0x00000000 },
13204 /* Mailbox Registers */
13205 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13206 0x00000000, 0x000001ff },
13207 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13208 0x00000000, 0x000001ff },
13209 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13210 0x00000000, 0x000007ff },
13211 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13212 0x00000000, 0x000001ff },
13214 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13217 is_5705 = is_5750 = 0;
13218 if (tg3_flag(tp, 5705_PLUS)) {
13219 is_5705 = 1;
13220 if (tg3_flag(tp, 5750_PLUS))
13221 is_5750 = 1;
13224 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13225 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13226 continue;
13228 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13229 continue;
13231 if (tg3_flag(tp, IS_5788) &&
13232 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13233 continue;
13235 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13236 continue;
13238 offset = (u32) reg_tbl[i].offset;
13239 read_mask = reg_tbl[i].read_mask;
13240 write_mask = reg_tbl[i].write_mask;
13242 /* Save the original register content */
13243 save_val = tr32(offset);
13245 /* Determine the read-only value. */
13246 read_val = save_val & read_mask;
13248 /* Write zero to the register, then make sure the read-only bits
13249 * are not changed and the read/write bits are all zeros.
13251 tw32(offset, 0);
13253 val = tr32(offset);
13255 /* Test the read-only and read/write bits. */
13256 if (((val & read_mask) != read_val) || (val & write_mask))
13257 goto out;
13259 /* Write ones to all the bits defined by RdMask and WrMask, then
13260 * make sure the read-only bits are not changed and the
13261 * read/write bits are all ones.
13263 tw32(offset, read_mask | write_mask);
13265 val = tr32(offset);
13267 /* Test the read-only bits. */
13268 if ((val & read_mask) != read_val)
13269 goto out;
13271 /* Test the read/write bits. */
13272 if ((val & write_mask) != write_mask)
13273 goto out;
13275 tw32(offset, save_val);
13278 return 0;
13280 out:
13281 if (netif_msg_hw(tp))
13282 netdev_err(tp->dev,
13283 "Register test failed at offset %x\n", offset);
13284 tw32(offset, save_val);
13285 return -EIO;
13288 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13290 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13291 int i;
13292 u32 j;
13294 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13295 for (j = 0; j < len; j += 4) {
13296 u32 val;
13298 tg3_write_mem(tp, offset + j, test_pattern[i]);
13299 tg3_read_mem(tp, offset + j, &val);
13300 if (val != test_pattern[i])
13301 return -EIO;
13304 return 0;
13307 static int tg3_test_memory(struct tg3 *tp)
13309 static struct mem_entry {
13310 u32 offset;
13311 u32 len;
13312 } mem_tbl_570x[] = {
13313 { 0x00000000, 0x00b50},
13314 { 0x00002000, 0x1c000},
13315 { 0xffffffff, 0x00000}
13316 }, mem_tbl_5705[] = {
13317 { 0x00000100, 0x0000c},
13318 { 0x00000200, 0x00008},
13319 { 0x00004000, 0x00800},
13320 { 0x00006000, 0x01000},
13321 { 0x00008000, 0x02000},
13322 { 0x00010000, 0x0e000},
13323 { 0xffffffff, 0x00000}
13324 }, mem_tbl_5755[] = {
13325 { 0x00000200, 0x00008},
13326 { 0x00004000, 0x00800},
13327 { 0x00006000, 0x00800},
13328 { 0x00008000, 0x02000},
13329 { 0x00010000, 0x0c000},
13330 { 0xffffffff, 0x00000}
13331 }, mem_tbl_5906[] = {
13332 { 0x00000200, 0x00008},
13333 { 0x00004000, 0x00400},
13334 { 0x00006000, 0x00400},
13335 { 0x00008000, 0x01000},
13336 { 0x00010000, 0x01000},
13337 { 0xffffffff, 0x00000}
13338 }, mem_tbl_5717[] = {
13339 { 0x00000200, 0x00008},
13340 { 0x00010000, 0x0a000},
13341 { 0x00020000, 0x13c00},
13342 { 0xffffffff, 0x00000}
13343 }, mem_tbl_57765[] = {
13344 { 0x00000200, 0x00008},
13345 { 0x00004000, 0x00800},
13346 { 0x00006000, 0x09800},
13347 { 0x00010000, 0x0a000},
13348 { 0xffffffff, 0x00000}
13350 struct mem_entry *mem_tbl;
13351 int err = 0;
13352 int i;
13354 if (tg3_flag(tp, 5717_PLUS))
13355 mem_tbl = mem_tbl_5717;
13356 else if (tg3_flag(tp, 57765_CLASS) ||
13357 tg3_asic_rev(tp) == ASIC_REV_5762)
13358 mem_tbl = mem_tbl_57765;
13359 else if (tg3_flag(tp, 5755_PLUS))
13360 mem_tbl = mem_tbl_5755;
13361 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13362 mem_tbl = mem_tbl_5906;
13363 else if (tg3_flag(tp, 5705_PLUS))
13364 mem_tbl = mem_tbl_5705;
13365 else
13366 mem_tbl = mem_tbl_570x;
13368 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13369 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13370 if (err)
13371 break;
13374 return err;
13377 #define TG3_TSO_MSS 500
13379 #define TG3_TSO_IP_HDR_LEN 20
13380 #define TG3_TSO_TCP_HDR_LEN 20
13381 #define TG3_TSO_TCP_OPT_LEN 12
13383 static const u8 tg3_tso_header[] = {
13384 0x08, 0x00,
13385 0x45, 0x00, 0x00, 0x00,
13386 0x00, 0x00, 0x40, 0x00,
13387 0x40, 0x06, 0x00, 0x00,
13388 0x0a, 0x00, 0x00, 0x01,
13389 0x0a, 0x00, 0x00, 0x02,
13390 0x0d, 0x00, 0xe0, 0x00,
13391 0x00, 0x00, 0x01, 0x00,
13392 0x00, 0x00, 0x02, 0x00,
13393 0x80, 0x10, 0x10, 0x00,
13394 0x14, 0x09, 0x00, 0x00,
13395 0x01, 0x01, 0x08, 0x0a,
13396 0x11, 0x11, 0x11, 0x11,
13397 0x11, 0x11, 0x11, 0x11,
13400 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13402 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13403 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13404 u32 budget;
13405 struct sk_buff *skb;
13406 u8 *tx_data, *rx_data;
13407 dma_addr_t map;
13408 int num_pkts, tx_len, rx_len, i, err;
13409 struct tg3_rx_buffer_desc *desc;
13410 struct tg3_napi *tnapi, *rnapi;
13411 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13413 tnapi = &tp->napi[0];
13414 rnapi = &tp->napi[0];
13415 if (tp->irq_cnt > 1) {
13416 if (tg3_flag(tp, ENABLE_RSS))
13417 rnapi = &tp->napi[1];
13418 if (tg3_flag(tp, ENABLE_TSS))
13419 tnapi = &tp->napi[1];
13421 coal_now = tnapi->coal_now | rnapi->coal_now;
13423 err = -EIO;
13425 tx_len = pktsz;
13426 skb = netdev_alloc_skb(tp->dev, tx_len);
13427 if (!skb)
13428 return -ENOMEM;
13430 tx_data = skb_put(skb, tx_len);
13431 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13432 memset(tx_data + ETH_ALEN, 0x0, 8);
13434 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13436 if (tso_loopback) {
13437 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13439 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13440 TG3_TSO_TCP_OPT_LEN;
13442 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13443 sizeof(tg3_tso_header));
13444 mss = TG3_TSO_MSS;
13446 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13447 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13449 /* Set the total length field in the IP header */
13450 iph->tot_len = htons((u16)(mss + hdr_len));
13452 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13453 TXD_FLAG_CPU_POST_DMA);
13455 if (tg3_flag(tp, HW_TSO_1) ||
13456 tg3_flag(tp, HW_TSO_2) ||
13457 tg3_flag(tp, HW_TSO_3)) {
13458 struct tcphdr *th;
13459 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13460 th = (struct tcphdr *)&tx_data[val];
13461 th->check = 0;
13462 } else
13463 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13465 if (tg3_flag(tp, HW_TSO_3)) {
13466 mss |= (hdr_len & 0xc) << 12;
13467 if (hdr_len & 0x10)
13468 base_flags |= 0x00000010;
13469 base_flags |= (hdr_len & 0x3e0) << 5;
13470 } else if (tg3_flag(tp, HW_TSO_2))
13471 mss |= hdr_len << 9;
13472 else if (tg3_flag(tp, HW_TSO_1) ||
13473 tg3_asic_rev(tp) == ASIC_REV_5705) {
13474 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13475 } else {
13476 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13479 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13480 } else {
13481 num_pkts = 1;
13482 data_off = ETH_HLEN;
13484 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13485 tx_len > VLAN_ETH_FRAME_LEN)
13486 base_flags |= TXD_FLAG_JMB_PKT;
13489 for (i = data_off; i < tx_len; i++)
13490 tx_data[i] = (u8) (i & 0xff);
13492 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13493 if (pci_dma_mapping_error(tp->pdev, map)) {
13494 dev_kfree_skb(skb);
13495 return -EIO;
13498 val = tnapi->tx_prod;
13499 tnapi->tx_buffers[val].skb = skb;
13500 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13502 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13503 rnapi->coal_now);
13505 udelay(10);
13507 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13509 budget = tg3_tx_avail(tnapi);
13510 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13511 base_flags | TXD_FLAG_END, mss, 0)) {
13512 tnapi->tx_buffers[val].skb = NULL;
13513 dev_kfree_skb(skb);
13514 return -EIO;
13517 tnapi->tx_prod++;
13519 /* Sync BD data before updating mailbox */
13520 wmb();
13522 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13523 tr32_mailbox(tnapi->prodmbox);
13525 udelay(10);
13527 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13528 for (i = 0; i < 35; i++) {
13529 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13530 coal_now);
13532 udelay(10);
13534 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13535 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13536 if ((tx_idx == tnapi->tx_prod) &&
13537 (rx_idx == (rx_start_idx + num_pkts)))
13538 break;
13541 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13542 dev_kfree_skb(skb);
13544 if (tx_idx != tnapi->tx_prod)
13545 goto out;
13547 if (rx_idx != rx_start_idx + num_pkts)
13548 goto out;
13550 val = data_off;
13551 while (rx_idx != rx_start_idx) {
13552 desc = &rnapi->rx_rcb[rx_start_idx++];
13553 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13554 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13556 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13557 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13558 goto out;
13560 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13561 - ETH_FCS_LEN;
13563 if (!tso_loopback) {
13564 if (rx_len != tx_len)
13565 goto out;
13567 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13568 if (opaque_key != RXD_OPAQUE_RING_STD)
13569 goto out;
13570 } else {
13571 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13572 goto out;
13574 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13575 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13576 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13577 goto out;
13580 if (opaque_key == RXD_OPAQUE_RING_STD) {
13581 rx_data = tpr->rx_std_buffers[desc_idx].data;
13582 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13583 mapping);
13584 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13585 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13586 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13587 mapping);
13588 } else
13589 goto out;
13591 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13592 PCI_DMA_FROMDEVICE);
13594 rx_data += TG3_RX_OFFSET(tp);
13595 for (i = data_off; i < rx_len; i++, val++) {
13596 if (*(rx_data + i) != (u8) (val & 0xff))
13597 goto out;
13601 err = 0;
13603 /* tg3_free_rings will unmap and free the rx_data */
13604 out:
13605 return err;
13608 #define TG3_STD_LOOPBACK_FAILED 1
13609 #define TG3_JMB_LOOPBACK_FAILED 2
13610 #define TG3_TSO_LOOPBACK_FAILED 4
13611 #define TG3_LOOPBACK_FAILED \
13612 (TG3_STD_LOOPBACK_FAILED | \
13613 TG3_JMB_LOOPBACK_FAILED | \
13614 TG3_TSO_LOOPBACK_FAILED)
13616 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13618 int err = -EIO;
13619 u32 eee_cap;
13620 u32 jmb_pkt_sz = 9000;
13622 if (tp->dma_limit)
13623 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13625 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13626 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13628 if (!netif_running(tp->dev)) {
13629 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13630 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631 if (do_extlpbk)
13632 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13633 goto done;
13636 err = tg3_reset_hw(tp, true);
13637 if (err) {
13638 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13639 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640 if (do_extlpbk)
13641 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13642 goto done;
13645 if (tg3_flag(tp, ENABLE_RSS)) {
13646 int i;
13648 /* Reroute all rx packets to the 1st queue */
13649 for (i = MAC_RSS_INDIR_TBL_0;
13650 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13651 tw32(i, 0x0);
13654 /* HW errata - mac loopback fails in some cases on 5780.
13655 * Normal traffic and PHY loopback are not affected by
13656 * errata. Also, the MAC loopback test is deprecated for
13657 * all newer ASIC revisions.
13659 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13660 !tg3_flag(tp, CPMU_PRESENT)) {
13661 tg3_mac_loopback(tp, true);
13663 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13664 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13667 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13668 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13670 tg3_mac_loopback(tp, false);
13673 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13674 !tg3_flag(tp, USE_PHYLIB)) {
13675 int i;
13677 tg3_phy_lpbk_set(tp, 0, false);
13679 /* Wait for link */
13680 for (i = 0; i < 100; i++) {
13681 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13682 break;
13683 mdelay(1);
13686 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13687 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13688 if (tg3_flag(tp, TSO_CAPABLE) &&
13689 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13690 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13691 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13692 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13693 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13695 if (do_extlpbk) {
13696 tg3_phy_lpbk_set(tp, 0, true);
13698 /* All link indications report up, but the hardware
13699 * isn't really ready for about 20 msec. Double it
13700 * to be sure.
13702 mdelay(40);
13704 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13705 data[TG3_EXT_LOOPB_TEST] |=
13706 TG3_STD_LOOPBACK_FAILED;
13707 if (tg3_flag(tp, TSO_CAPABLE) &&
13708 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13709 data[TG3_EXT_LOOPB_TEST] |=
13710 TG3_TSO_LOOPBACK_FAILED;
13711 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13712 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13713 data[TG3_EXT_LOOPB_TEST] |=
13714 TG3_JMB_LOOPBACK_FAILED;
13717 /* Re-enable gphy autopowerdown. */
13718 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13719 tg3_phy_toggle_apd(tp, true);
13722 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13723 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13725 done:
13726 tp->phy_flags |= eee_cap;
13728 return err;
13731 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13732 u64 *data)
13734 struct tg3 *tp = netdev_priv(dev);
13735 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13737 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13738 if (tg3_power_up(tp)) {
13739 etest->flags |= ETH_TEST_FL_FAILED;
13740 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13741 return;
13743 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13746 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13748 if (tg3_test_nvram(tp) != 0) {
13749 etest->flags |= ETH_TEST_FL_FAILED;
13750 data[TG3_NVRAM_TEST] = 1;
13752 if (!doextlpbk && tg3_test_link(tp)) {
13753 etest->flags |= ETH_TEST_FL_FAILED;
13754 data[TG3_LINK_TEST] = 1;
13756 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13757 int err, err2 = 0, irq_sync = 0;
13759 if (netif_running(dev)) {
13760 tg3_phy_stop(tp);
13761 tg3_netif_stop(tp);
13762 irq_sync = 1;
13765 tg3_full_lock(tp, irq_sync);
13766 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13767 err = tg3_nvram_lock(tp);
13768 tg3_halt_cpu(tp, RX_CPU_BASE);
13769 if (!tg3_flag(tp, 5705_PLUS))
13770 tg3_halt_cpu(tp, TX_CPU_BASE);
13771 if (!err)
13772 tg3_nvram_unlock(tp);
13774 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13775 tg3_phy_reset(tp);
13777 if (tg3_test_registers(tp) != 0) {
13778 etest->flags |= ETH_TEST_FL_FAILED;
13779 data[TG3_REGISTER_TEST] = 1;
13782 if (tg3_test_memory(tp) != 0) {
13783 etest->flags |= ETH_TEST_FL_FAILED;
13784 data[TG3_MEMORY_TEST] = 1;
13787 if (doextlpbk)
13788 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13790 if (tg3_test_loopback(tp, data, doextlpbk))
13791 etest->flags |= ETH_TEST_FL_FAILED;
13793 tg3_full_unlock(tp);
13795 if (tg3_test_interrupt(tp) != 0) {
13796 etest->flags |= ETH_TEST_FL_FAILED;
13797 data[TG3_INTERRUPT_TEST] = 1;
13800 tg3_full_lock(tp, 0);
13802 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13803 if (netif_running(dev)) {
13804 tg3_flag_set(tp, INIT_COMPLETE);
13805 err2 = tg3_restart_hw(tp, true);
13806 if (!err2)
13807 tg3_netif_start(tp);
13810 tg3_full_unlock(tp);
13812 if (irq_sync && !err2)
13813 tg3_phy_start(tp);
13815 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13816 tg3_power_down_prepare(tp);
13820 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13822 struct tg3 *tp = netdev_priv(dev);
13823 struct hwtstamp_config stmpconf;
13825 if (!tg3_flag(tp, PTP_CAPABLE))
13826 return -EOPNOTSUPP;
13828 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13829 return -EFAULT;
13831 if (stmpconf.flags)
13832 return -EINVAL;
13834 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13835 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13836 return -ERANGE;
13838 switch (stmpconf.rx_filter) {
13839 case HWTSTAMP_FILTER_NONE:
13840 tp->rxptpctl = 0;
13841 break;
13842 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13843 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13844 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13845 break;
13846 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13847 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13848 TG3_RX_PTP_CTL_SYNC_EVNT;
13849 break;
13850 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13851 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13852 TG3_RX_PTP_CTL_DELAY_REQ;
13853 break;
13854 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13855 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13856 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13857 break;
13858 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13859 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13860 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13861 break;
13862 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13863 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13864 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13865 break;
13866 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13867 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13868 TG3_RX_PTP_CTL_SYNC_EVNT;
13869 break;
13870 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13871 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13872 TG3_RX_PTP_CTL_SYNC_EVNT;
13873 break;
13874 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13875 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13876 TG3_RX_PTP_CTL_SYNC_EVNT;
13877 break;
13878 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13879 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13880 TG3_RX_PTP_CTL_DELAY_REQ;
13881 break;
13882 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13883 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13884 TG3_RX_PTP_CTL_DELAY_REQ;
13885 break;
13886 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13888 TG3_RX_PTP_CTL_DELAY_REQ;
13889 break;
13890 default:
13891 return -ERANGE;
13894 if (netif_running(dev) && tp->rxptpctl)
13895 tw32(TG3_RX_PTP_CTL,
13896 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13898 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13899 tg3_flag_set(tp, TX_TSTAMP_EN);
13900 else
13901 tg3_flag_clear(tp, TX_TSTAMP_EN);
13903 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13904 -EFAULT : 0;
13907 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13909 struct tg3 *tp = netdev_priv(dev);
13910 struct hwtstamp_config stmpconf;
13912 if (!tg3_flag(tp, PTP_CAPABLE))
13913 return -EOPNOTSUPP;
13915 stmpconf.flags = 0;
13916 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13917 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13919 switch (tp->rxptpctl) {
13920 case 0:
13921 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13922 break;
13923 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13924 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13925 break;
13926 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13927 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13928 break;
13929 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13930 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13931 break;
13932 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13933 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13934 break;
13935 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13936 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13937 break;
13938 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13939 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13940 break;
13941 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13942 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13943 break;
13944 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13945 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13946 break;
13947 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13948 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13949 break;
13950 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13951 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13952 break;
13953 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13954 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13955 break;
13956 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13957 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13958 break;
13959 default:
13960 WARN_ON_ONCE(1);
13961 return -ERANGE;
13964 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13965 -EFAULT : 0;
13968 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13970 struct mii_ioctl_data *data = if_mii(ifr);
13971 struct tg3 *tp = netdev_priv(dev);
13972 int err;
13974 if (tg3_flag(tp, USE_PHYLIB)) {
13975 struct phy_device *phydev;
13976 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13977 return -EAGAIN;
13978 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13979 return phy_mii_ioctl(phydev, ifr, cmd);
13982 switch (cmd) {
13983 case SIOCGMIIPHY:
13984 data->phy_id = tp->phy_addr;
13986 /* fallthru */
13987 case SIOCGMIIREG: {
13988 u32 mii_regval;
13990 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13991 break; /* We have no PHY */
13993 if (!netif_running(dev))
13994 return -EAGAIN;
13996 spin_lock_bh(&tp->lock);
13997 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13998 data->reg_num & 0x1f, &mii_regval);
13999 spin_unlock_bh(&tp->lock);
14001 data->val_out = mii_regval;
14003 return err;
14006 case SIOCSMIIREG:
14007 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14008 break; /* We have no PHY */
14010 if (!netif_running(dev))
14011 return -EAGAIN;
14013 spin_lock_bh(&tp->lock);
14014 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14015 data->reg_num & 0x1f, data->val_in);
14016 spin_unlock_bh(&tp->lock);
14018 return err;
14020 case SIOCSHWTSTAMP:
14021 return tg3_hwtstamp_set(dev, ifr);
14023 case SIOCGHWTSTAMP:
14024 return tg3_hwtstamp_get(dev, ifr);
14026 default:
14027 /* do nothing */
14028 break;
14030 return -EOPNOTSUPP;
14033 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14035 struct tg3 *tp = netdev_priv(dev);
14037 memcpy(ec, &tp->coal, sizeof(*ec));
14038 return 0;
14041 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14043 struct tg3 *tp = netdev_priv(dev);
14044 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14045 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14047 if (!tg3_flag(tp, 5705_PLUS)) {
14048 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14049 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14050 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14051 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14054 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14055 (!ec->rx_coalesce_usecs) ||
14056 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14057 (!ec->tx_coalesce_usecs) ||
14058 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14059 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14060 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14061 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14062 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14063 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14064 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14065 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14066 return -EINVAL;
14068 /* Only copy relevant parameters, ignore all others. */
14069 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14070 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14071 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14072 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14073 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14074 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14075 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14076 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14077 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14079 if (netif_running(dev)) {
14080 tg3_full_lock(tp, 0);
14081 __tg3_set_coalesce(tp, &tp->coal);
14082 tg3_full_unlock(tp);
14084 return 0;
14087 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14089 struct tg3 *tp = netdev_priv(dev);
14091 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14092 netdev_warn(tp->dev, "Board does not support EEE!\n");
14093 return -EOPNOTSUPP;
14096 if (edata->advertised != tp->eee.advertised) {
14097 netdev_warn(tp->dev,
14098 "Direct manipulation of EEE advertisement is not supported\n");
14099 return -EINVAL;
14102 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14103 netdev_warn(tp->dev,
14104 "Maximal Tx Lpi timer supported is %#x(u)\n",
14105 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14106 return -EINVAL;
14109 tp->eee = *edata;
14111 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14112 tg3_warn_mgmt_link_flap(tp);
14114 if (netif_running(tp->dev)) {
14115 tg3_full_lock(tp, 0);
14116 tg3_setup_eee(tp);
14117 tg3_phy_reset(tp);
14118 tg3_full_unlock(tp);
14121 return 0;
14124 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14126 struct tg3 *tp = netdev_priv(dev);
14128 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14129 netdev_warn(tp->dev,
14130 "Board does not support EEE!\n");
14131 return -EOPNOTSUPP;
14134 *edata = tp->eee;
14135 return 0;
14138 static const struct ethtool_ops tg3_ethtool_ops = {
14139 .get_drvinfo = tg3_get_drvinfo,
14140 .get_regs_len = tg3_get_regs_len,
14141 .get_regs = tg3_get_regs,
14142 .get_wol = tg3_get_wol,
14143 .set_wol = tg3_set_wol,
14144 .get_msglevel = tg3_get_msglevel,
14145 .set_msglevel = tg3_set_msglevel,
14146 .nway_reset = tg3_nway_reset,
14147 .get_link = ethtool_op_get_link,
14148 .get_eeprom_len = tg3_get_eeprom_len,
14149 .get_eeprom = tg3_get_eeprom,
14150 .set_eeprom = tg3_set_eeprom,
14151 .get_ringparam = tg3_get_ringparam,
14152 .set_ringparam = tg3_set_ringparam,
14153 .get_pauseparam = tg3_get_pauseparam,
14154 .set_pauseparam = tg3_set_pauseparam,
14155 .self_test = tg3_self_test,
14156 .get_strings = tg3_get_strings,
14157 .set_phys_id = tg3_set_phys_id,
14158 .get_ethtool_stats = tg3_get_ethtool_stats,
14159 .get_coalesce = tg3_get_coalesce,
14160 .set_coalesce = tg3_set_coalesce,
14161 .get_sset_count = tg3_get_sset_count,
14162 .get_rxnfc = tg3_get_rxnfc,
14163 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14164 .get_rxfh = tg3_get_rxfh,
14165 .set_rxfh = tg3_set_rxfh,
14166 .get_channels = tg3_get_channels,
14167 .set_channels = tg3_set_channels,
14168 .get_ts_info = tg3_get_ts_info,
14169 .get_eee = tg3_get_eee,
14170 .set_eee = tg3_set_eee,
14171 .get_link_ksettings = tg3_get_link_ksettings,
14172 .set_link_ksettings = tg3_set_link_ksettings,
14175 static void tg3_get_stats64(struct net_device *dev,
14176 struct rtnl_link_stats64 *stats)
14178 struct tg3 *tp = netdev_priv(dev);
14180 spin_lock_bh(&tp->lock);
14181 if (!tp->hw_stats) {
14182 *stats = tp->net_stats_prev;
14183 spin_unlock_bh(&tp->lock);
14184 return;
14187 tg3_get_nstats(tp, stats);
14188 spin_unlock_bh(&tp->lock);
14191 static void tg3_set_rx_mode(struct net_device *dev)
14193 struct tg3 *tp = netdev_priv(dev);
14195 if (!netif_running(dev))
14196 return;
14198 tg3_full_lock(tp, 0);
14199 __tg3_set_rx_mode(dev);
14200 tg3_full_unlock(tp);
14203 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14204 int new_mtu)
14206 dev->mtu = new_mtu;
14208 if (new_mtu > ETH_DATA_LEN) {
14209 if (tg3_flag(tp, 5780_CLASS)) {
14210 netdev_update_features(dev);
14211 tg3_flag_clear(tp, TSO_CAPABLE);
14212 } else {
14213 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14215 } else {
14216 if (tg3_flag(tp, 5780_CLASS)) {
14217 tg3_flag_set(tp, TSO_CAPABLE);
14218 netdev_update_features(dev);
14220 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14224 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14226 struct tg3 *tp = netdev_priv(dev);
14227 int err;
14228 bool reset_phy = false;
14230 if (!netif_running(dev)) {
14231 /* We'll just catch it later when the
14232 * device is up'd.
14234 tg3_set_mtu(dev, tp, new_mtu);
14235 return 0;
14238 tg3_phy_stop(tp);
14240 tg3_netif_stop(tp);
14242 tg3_set_mtu(dev, tp, new_mtu);
14244 tg3_full_lock(tp, 1);
14246 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14248 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14249 * breaks all requests to 256 bytes.
14251 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14252 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14253 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14254 tg3_asic_rev(tp) == ASIC_REV_5720)
14255 reset_phy = true;
14257 err = tg3_restart_hw(tp, reset_phy);
14259 if (!err)
14260 tg3_netif_start(tp);
14262 tg3_full_unlock(tp);
14264 if (!err)
14265 tg3_phy_start(tp);
14267 return err;
14270 static const struct net_device_ops tg3_netdev_ops = {
14271 .ndo_open = tg3_open,
14272 .ndo_stop = tg3_close,
14273 .ndo_start_xmit = tg3_start_xmit,
14274 .ndo_get_stats64 = tg3_get_stats64,
14275 .ndo_validate_addr = eth_validate_addr,
14276 .ndo_set_rx_mode = tg3_set_rx_mode,
14277 .ndo_set_mac_address = tg3_set_mac_addr,
14278 .ndo_do_ioctl = tg3_ioctl,
14279 .ndo_tx_timeout = tg3_tx_timeout,
14280 .ndo_change_mtu = tg3_change_mtu,
14281 .ndo_fix_features = tg3_fix_features,
14282 .ndo_set_features = tg3_set_features,
14283 #ifdef CONFIG_NET_POLL_CONTROLLER
14284 .ndo_poll_controller = tg3_poll_controller,
14285 #endif
14288 static void tg3_get_eeprom_size(struct tg3 *tp)
14290 u32 cursize, val, magic;
14292 tp->nvram_size = EEPROM_CHIP_SIZE;
14294 if (tg3_nvram_read(tp, 0, &magic) != 0)
14295 return;
14297 if ((magic != TG3_EEPROM_MAGIC) &&
14298 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14299 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14300 return;
14303 * Size the chip by reading offsets at increasing powers of two.
14304 * When we encounter our validation signature, we know the addressing
14305 * has wrapped around, and thus have our chip size.
14307 cursize = 0x10;
14309 while (cursize < tp->nvram_size) {
14310 if (tg3_nvram_read(tp, cursize, &val) != 0)
14311 return;
14313 if (val == magic)
14314 break;
14316 cursize <<= 1;
14319 tp->nvram_size = cursize;
14322 static void tg3_get_nvram_size(struct tg3 *tp)
14324 u32 val;
14326 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14327 return;
14329 /* Selfboot format */
14330 if (val != TG3_EEPROM_MAGIC) {
14331 tg3_get_eeprom_size(tp);
14332 return;
14335 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14336 if (val != 0) {
14337 /* This is confusing. We want to operate on the
14338 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14339 * call will read from NVRAM and byteswap the data
14340 * according to the byteswapping settings for all
14341 * other register accesses. This ensures the data we
14342 * want will always reside in the lower 16-bits.
14343 * However, the data in NVRAM is in LE format, which
14344 * means the data from the NVRAM read will always be
14345 * opposite the endianness of the CPU. The 16-bit
14346 * byteswap then brings the data to CPU endianness.
14348 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14349 return;
14352 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14355 static void tg3_get_nvram_info(struct tg3 *tp)
14357 u32 nvcfg1;
14359 nvcfg1 = tr32(NVRAM_CFG1);
14360 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14361 tg3_flag_set(tp, FLASH);
14362 } else {
14363 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14364 tw32(NVRAM_CFG1, nvcfg1);
14367 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14368 tg3_flag(tp, 5780_CLASS)) {
14369 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14370 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14371 tp->nvram_jedecnum = JEDEC_ATMEL;
14372 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14373 tg3_flag_set(tp, NVRAM_BUFFERED);
14374 break;
14375 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14376 tp->nvram_jedecnum = JEDEC_ATMEL;
14377 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14378 break;
14379 case FLASH_VENDOR_ATMEL_EEPROM:
14380 tp->nvram_jedecnum = JEDEC_ATMEL;
14381 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14382 tg3_flag_set(tp, NVRAM_BUFFERED);
14383 break;
14384 case FLASH_VENDOR_ST:
14385 tp->nvram_jedecnum = JEDEC_ST;
14386 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14387 tg3_flag_set(tp, NVRAM_BUFFERED);
14388 break;
14389 case FLASH_VENDOR_SAIFUN:
14390 tp->nvram_jedecnum = JEDEC_SAIFUN;
14391 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14392 break;
14393 case FLASH_VENDOR_SST_SMALL:
14394 case FLASH_VENDOR_SST_LARGE:
14395 tp->nvram_jedecnum = JEDEC_SST;
14396 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14397 break;
14399 } else {
14400 tp->nvram_jedecnum = JEDEC_ATMEL;
14401 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14402 tg3_flag_set(tp, NVRAM_BUFFERED);
14406 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14408 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14409 case FLASH_5752PAGE_SIZE_256:
14410 tp->nvram_pagesize = 256;
14411 break;
14412 case FLASH_5752PAGE_SIZE_512:
14413 tp->nvram_pagesize = 512;
14414 break;
14415 case FLASH_5752PAGE_SIZE_1K:
14416 tp->nvram_pagesize = 1024;
14417 break;
14418 case FLASH_5752PAGE_SIZE_2K:
14419 tp->nvram_pagesize = 2048;
14420 break;
14421 case FLASH_5752PAGE_SIZE_4K:
14422 tp->nvram_pagesize = 4096;
14423 break;
14424 case FLASH_5752PAGE_SIZE_264:
14425 tp->nvram_pagesize = 264;
14426 break;
14427 case FLASH_5752PAGE_SIZE_528:
14428 tp->nvram_pagesize = 528;
14429 break;
14433 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14435 u32 nvcfg1;
14437 nvcfg1 = tr32(NVRAM_CFG1);
14439 /* NVRAM protection for TPM */
14440 if (nvcfg1 & (1 << 27))
14441 tg3_flag_set(tp, PROTECTED_NVRAM);
14443 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14444 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14445 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14446 tp->nvram_jedecnum = JEDEC_ATMEL;
14447 tg3_flag_set(tp, NVRAM_BUFFERED);
14448 break;
14449 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14450 tp->nvram_jedecnum = JEDEC_ATMEL;
14451 tg3_flag_set(tp, NVRAM_BUFFERED);
14452 tg3_flag_set(tp, FLASH);
14453 break;
14454 case FLASH_5752VENDOR_ST_M45PE10:
14455 case FLASH_5752VENDOR_ST_M45PE20:
14456 case FLASH_5752VENDOR_ST_M45PE40:
14457 tp->nvram_jedecnum = JEDEC_ST;
14458 tg3_flag_set(tp, NVRAM_BUFFERED);
14459 tg3_flag_set(tp, FLASH);
14460 break;
14463 if (tg3_flag(tp, FLASH)) {
14464 tg3_nvram_get_pagesize(tp, nvcfg1);
14465 } else {
14466 /* For eeprom, set pagesize to maximum eeprom size */
14467 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14469 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14470 tw32(NVRAM_CFG1, nvcfg1);
14474 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14476 u32 nvcfg1, protect = 0;
14478 nvcfg1 = tr32(NVRAM_CFG1);
14480 /* NVRAM protection for TPM */
14481 if (nvcfg1 & (1 << 27)) {
14482 tg3_flag_set(tp, PROTECTED_NVRAM);
14483 protect = 1;
14486 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14487 switch (nvcfg1) {
14488 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14489 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14490 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14491 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14492 tp->nvram_jedecnum = JEDEC_ATMEL;
14493 tg3_flag_set(tp, NVRAM_BUFFERED);
14494 tg3_flag_set(tp, FLASH);
14495 tp->nvram_pagesize = 264;
14496 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14497 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14498 tp->nvram_size = (protect ? 0x3e200 :
14499 TG3_NVRAM_SIZE_512KB);
14500 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14501 tp->nvram_size = (protect ? 0x1f200 :
14502 TG3_NVRAM_SIZE_256KB);
14503 else
14504 tp->nvram_size = (protect ? 0x1f200 :
14505 TG3_NVRAM_SIZE_128KB);
14506 break;
14507 case FLASH_5752VENDOR_ST_M45PE10:
14508 case FLASH_5752VENDOR_ST_M45PE20:
14509 case FLASH_5752VENDOR_ST_M45PE40:
14510 tp->nvram_jedecnum = JEDEC_ST;
14511 tg3_flag_set(tp, NVRAM_BUFFERED);
14512 tg3_flag_set(tp, FLASH);
14513 tp->nvram_pagesize = 256;
14514 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14515 tp->nvram_size = (protect ?
14516 TG3_NVRAM_SIZE_64KB :
14517 TG3_NVRAM_SIZE_128KB);
14518 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14519 tp->nvram_size = (protect ?
14520 TG3_NVRAM_SIZE_64KB :
14521 TG3_NVRAM_SIZE_256KB);
14522 else
14523 tp->nvram_size = (protect ?
14524 TG3_NVRAM_SIZE_128KB :
14525 TG3_NVRAM_SIZE_512KB);
14526 break;
14530 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14532 u32 nvcfg1;
14534 nvcfg1 = tr32(NVRAM_CFG1);
14536 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14537 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14538 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14539 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14540 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14541 tp->nvram_jedecnum = JEDEC_ATMEL;
14542 tg3_flag_set(tp, NVRAM_BUFFERED);
14543 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14545 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14546 tw32(NVRAM_CFG1, nvcfg1);
14547 break;
14548 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14549 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14550 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14551 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14552 tp->nvram_jedecnum = JEDEC_ATMEL;
14553 tg3_flag_set(tp, NVRAM_BUFFERED);
14554 tg3_flag_set(tp, FLASH);
14555 tp->nvram_pagesize = 264;
14556 break;
14557 case FLASH_5752VENDOR_ST_M45PE10:
14558 case FLASH_5752VENDOR_ST_M45PE20:
14559 case FLASH_5752VENDOR_ST_M45PE40:
14560 tp->nvram_jedecnum = JEDEC_ST;
14561 tg3_flag_set(tp, NVRAM_BUFFERED);
14562 tg3_flag_set(tp, FLASH);
14563 tp->nvram_pagesize = 256;
14564 break;
14568 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14570 u32 nvcfg1, protect = 0;
14572 nvcfg1 = tr32(NVRAM_CFG1);
14574 /* NVRAM protection for TPM */
14575 if (nvcfg1 & (1 << 27)) {
14576 tg3_flag_set(tp, PROTECTED_NVRAM);
14577 protect = 1;
14580 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14581 switch (nvcfg1) {
14582 case FLASH_5761VENDOR_ATMEL_ADB021D:
14583 case FLASH_5761VENDOR_ATMEL_ADB041D:
14584 case FLASH_5761VENDOR_ATMEL_ADB081D:
14585 case FLASH_5761VENDOR_ATMEL_ADB161D:
14586 case FLASH_5761VENDOR_ATMEL_MDB021D:
14587 case FLASH_5761VENDOR_ATMEL_MDB041D:
14588 case FLASH_5761VENDOR_ATMEL_MDB081D:
14589 case FLASH_5761VENDOR_ATMEL_MDB161D:
14590 tp->nvram_jedecnum = JEDEC_ATMEL;
14591 tg3_flag_set(tp, NVRAM_BUFFERED);
14592 tg3_flag_set(tp, FLASH);
14593 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14594 tp->nvram_pagesize = 256;
14595 break;
14596 case FLASH_5761VENDOR_ST_A_M45PE20:
14597 case FLASH_5761VENDOR_ST_A_M45PE40:
14598 case FLASH_5761VENDOR_ST_A_M45PE80:
14599 case FLASH_5761VENDOR_ST_A_M45PE16:
14600 case FLASH_5761VENDOR_ST_M_M45PE20:
14601 case FLASH_5761VENDOR_ST_M_M45PE40:
14602 case FLASH_5761VENDOR_ST_M_M45PE80:
14603 case FLASH_5761VENDOR_ST_M_M45PE16:
14604 tp->nvram_jedecnum = JEDEC_ST;
14605 tg3_flag_set(tp, NVRAM_BUFFERED);
14606 tg3_flag_set(tp, FLASH);
14607 tp->nvram_pagesize = 256;
14608 break;
14611 if (protect) {
14612 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14613 } else {
14614 switch (nvcfg1) {
14615 case FLASH_5761VENDOR_ATMEL_ADB161D:
14616 case FLASH_5761VENDOR_ATMEL_MDB161D:
14617 case FLASH_5761VENDOR_ST_A_M45PE16:
14618 case FLASH_5761VENDOR_ST_M_M45PE16:
14619 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14620 break;
14621 case FLASH_5761VENDOR_ATMEL_ADB081D:
14622 case FLASH_5761VENDOR_ATMEL_MDB081D:
14623 case FLASH_5761VENDOR_ST_A_M45PE80:
14624 case FLASH_5761VENDOR_ST_M_M45PE80:
14625 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14626 break;
14627 case FLASH_5761VENDOR_ATMEL_ADB041D:
14628 case FLASH_5761VENDOR_ATMEL_MDB041D:
14629 case FLASH_5761VENDOR_ST_A_M45PE40:
14630 case FLASH_5761VENDOR_ST_M_M45PE40:
14631 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14632 break;
14633 case FLASH_5761VENDOR_ATMEL_ADB021D:
14634 case FLASH_5761VENDOR_ATMEL_MDB021D:
14635 case FLASH_5761VENDOR_ST_A_M45PE20:
14636 case FLASH_5761VENDOR_ST_M_M45PE20:
14637 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14638 break;
14643 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14645 tp->nvram_jedecnum = JEDEC_ATMEL;
14646 tg3_flag_set(tp, NVRAM_BUFFERED);
14647 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14650 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14652 u32 nvcfg1;
14654 nvcfg1 = tr32(NVRAM_CFG1);
14656 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14657 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14658 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14659 tp->nvram_jedecnum = JEDEC_ATMEL;
14660 tg3_flag_set(tp, NVRAM_BUFFERED);
14661 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14663 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14664 tw32(NVRAM_CFG1, nvcfg1);
14665 return;
14666 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14667 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14668 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14669 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14670 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14671 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14672 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14673 tp->nvram_jedecnum = JEDEC_ATMEL;
14674 tg3_flag_set(tp, NVRAM_BUFFERED);
14675 tg3_flag_set(tp, FLASH);
14677 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14678 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14679 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14680 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14681 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14682 break;
14683 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14684 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14685 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14686 break;
14687 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14688 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14689 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14690 break;
14692 break;
14693 case FLASH_5752VENDOR_ST_M45PE10:
14694 case FLASH_5752VENDOR_ST_M45PE20:
14695 case FLASH_5752VENDOR_ST_M45PE40:
14696 tp->nvram_jedecnum = JEDEC_ST;
14697 tg3_flag_set(tp, NVRAM_BUFFERED);
14698 tg3_flag_set(tp, FLASH);
14700 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14701 case FLASH_5752VENDOR_ST_M45PE10:
14702 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14703 break;
14704 case FLASH_5752VENDOR_ST_M45PE20:
14705 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14706 break;
14707 case FLASH_5752VENDOR_ST_M45PE40:
14708 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14709 break;
14711 break;
14712 default:
14713 tg3_flag_set(tp, NO_NVRAM);
14714 return;
14717 tg3_nvram_get_pagesize(tp, nvcfg1);
14718 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14719 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14723 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14725 u32 nvcfg1;
14727 nvcfg1 = tr32(NVRAM_CFG1);
14729 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14730 case FLASH_5717VENDOR_ATMEL_EEPROM:
14731 case FLASH_5717VENDOR_MICRO_EEPROM:
14732 tp->nvram_jedecnum = JEDEC_ATMEL;
14733 tg3_flag_set(tp, NVRAM_BUFFERED);
14734 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14736 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14737 tw32(NVRAM_CFG1, nvcfg1);
14738 return;
14739 case FLASH_5717VENDOR_ATMEL_MDB011D:
14740 case FLASH_5717VENDOR_ATMEL_ADB011B:
14741 case FLASH_5717VENDOR_ATMEL_ADB011D:
14742 case FLASH_5717VENDOR_ATMEL_MDB021D:
14743 case FLASH_5717VENDOR_ATMEL_ADB021B:
14744 case FLASH_5717VENDOR_ATMEL_ADB021D:
14745 case FLASH_5717VENDOR_ATMEL_45USPT:
14746 tp->nvram_jedecnum = JEDEC_ATMEL;
14747 tg3_flag_set(tp, NVRAM_BUFFERED);
14748 tg3_flag_set(tp, FLASH);
14750 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14751 case FLASH_5717VENDOR_ATMEL_MDB021D:
14752 /* Detect size with tg3_nvram_get_size() */
14753 break;
14754 case FLASH_5717VENDOR_ATMEL_ADB021B:
14755 case FLASH_5717VENDOR_ATMEL_ADB021D:
14756 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14757 break;
14758 default:
14759 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14760 break;
14762 break;
14763 case FLASH_5717VENDOR_ST_M_M25PE10:
14764 case FLASH_5717VENDOR_ST_A_M25PE10:
14765 case FLASH_5717VENDOR_ST_M_M45PE10:
14766 case FLASH_5717VENDOR_ST_A_M45PE10:
14767 case FLASH_5717VENDOR_ST_M_M25PE20:
14768 case FLASH_5717VENDOR_ST_A_M25PE20:
14769 case FLASH_5717VENDOR_ST_M_M45PE20:
14770 case FLASH_5717VENDOR_ST_A_M45PE20:
14771 case FLASH_5717VENDOR_ST_25USPT:
14772 case FLASH_5717VENDOR_ST_45USPT:
14773 tp->nvram_jedecnum = JEDEC_ST;
14774 tg3_flag_set(tp, NVRAM_BUFFERED);
14775 tg3_flag_set(tp, FLASH);
14777 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14778 case FLASH_5717VENDOR_ST_M_M25PE20:
14779 case FLASH_5717VENDOR_ST_M_M45PE20:
14780 /* Detect size with tg3_nvram_get_size() */
14781 break;
14782 case FLASH_5717VENDOR_ST_A_M25PE20:
14783 case FLASH_5717VENDOR_ST_A_M45PE20:
14784 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14785 break;
14786 default:
14787 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14788 break;
14790 break;
14791 default:
14792 tg3_flag_set(tp, NO_NVRAM);
14793 return;
14796 tg3_nvram_get_pagesize(tp, nvcfg1);
14797 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14798 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14801 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14803 u32 nvcfg1, nvmpinstrp, nv_status;
14805 nvcfg1 = tr32(NVRAM_CFG1);
14806 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14808 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14809 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14810 tg3_flag_set(tp, NO_NVRAM);
14811 return;
14814 switch (nvmpinstrp) {
14815 case FLASH_5762_MX25L_100:
14816 case FLASH_5762_MX25L_200:
14817 case FLASH_5762_MX25L_400:
14818 case FLASH_5762_MX25L_800:
14819 case FLASH_5762_MX25L_160_320:
14820 tp->nvram_pagesize = 4096;
14821 tp->nvram_jedecnum = JEDEC_MACRONIX;
14822 tg3_flag_set(tp, NVRAM_BUFFERED);
14823 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14824 tg3_flag_set(tp, FLASH);
14825 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14826 tp->nvram_size =
14827 (1 << (nv_status >> AUTOSENSE_DEVID &
14828 AUTOSENSE_DEVID_MASK)
14829 << AUTOSENSE_SIZE_IN_MB);
14830 return;
14832 case FLASH_5762_EEPROM_HD:
14833 nvmpinstrp = FLASH_5720_EEPROM_HD;
14834 break;
14835 case FLASH_5762_EEPROM_LD:
14836 nvmpinstrp = FLASH_5720_EEPROM_LD;
14837 break;
14838 case FLASH_5720VENDOR_M_ST_M45PE20:
14839 /* This pinstrap supports multiple sizes, so force it
14840 * to read the actual size from location 0xf0.
14842 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14843 break;
14847 switch (nvmpinstrp) {
14848 case FLASH_5720_EEPROM_HD:
14849 case FLASH_5720_EEPROM_LD:
14850 tp->nvram_jedecnum = JEDEC_ATMEL;
14851 tg3_flag_set(tp, NVRAM_BUFFERED);
14853 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14854 tw32(NVRAM_CFG1, nvcfg1);
14855 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14856 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14857 else
14858 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14859 return;
14860 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14861 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14862 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14863 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14864 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14865 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14866 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14867 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14868 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14869 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14870 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14871 case FLASH_5720VENDOR_ATMEL_45USPT:
14872 tp->nvram_jedecnum = JEDEC_ATMEL;
14873 tg3_flag_set(tp, NVRAM_BUFFERED);
14874 tg3_flag_set(tp, FLASH);
14876 switch (nvmpinstrp) {
14877 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14878 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14879 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14880 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14881 break;
14882 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14883 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14884 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14885 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14886 break;
14887 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14888 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14889 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14890 break;
14891 default:
14892 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14893 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14894 break;
14896 break;
14897 case FLASH_5720VENDOR_M_ST_M25PE10:
14898 case FLASH_5720VENDOR_M_ST_M45PE10:
14899 case FLASH_5720VENDOR_A_ST_M25PE10:
14900 case FLASH_5720VENDOR_A_ST_M45PE10:
14901 case FLASH_5720VENDOR_M_ST_M25PE20:
14902 case FLASH_5720VENDOR_M_ST_M45PE20:
14903 case FLASH_5720VENDOR_A_ST_M25PE20:
14904 case FLASH_5720VENDOR_A_ST_M45PE20:
14905 case FLASH_5720VENDOR_M_ST_M25PE40:
14906 case FLASH_5720VENDOR_M_ST_M45PE40:
14907 case FLASH_5720VENDOR_A_ST_M25PE40:
14908 case FLASH_5720VENDOR_A_ST_M45PE40:
14909 case FLASH_5720VENDOR_M_ST_M25PE80:
14910 case FLASH_5720VENDOR_M_ST_M45PE80:
14911 case FLASH_5720VENDOR_A_ST_M25PE80:
14912 case FLASH_5720VENDOR_A_ST_M45PE80:
14913 case FLASH_5720VENDOR_ST_25USPT:
14914 case FLASH_5720VENDOR_ST_45USPT:
14915 tp->nvram_jedecnum = JEDEC_ST;
14916 tg3_flag_set(tp, NVRAM_BUFFERED);
14917 tg3_flag_set(tp, FLASH);
14919 switch (nvmpinstrp) {
14920 case FLASH_5720VENDOR_M_ST_M25PE20:
14921 case FLASH_5720VENDOR_M_ST_M45PE20:
14922 case FLASH_5720VENDOR_A_ST_M25PE20:
14923 case FLASH_5720VENDOR_A_ST_M45PE20:
14924 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14925 break;
14926 case FLASH_5720VENDOR_M_ST_M25PE40:
14927 case FLASH_5720VENDOR_M_ST_M45PE40:
14928 case FLASH_5720VENDOR_A_ST_M25PE40:
14929 case FLASH_5720VENDOR_A_ST_M45PE40:
14930 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14931 break;
14932 case FLASH_5720VENDOR_M_ST_M25PE80:
14933 case FLASH_5720VENDOR_M_ST_M45PE80:
14934 case FLASH_5720VENDOR_A_ST_M25PE80:
14935 case FLASH_5720VENDOR_A_ST_M45PE80:
14936 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14937 break;
14938 default:
14939 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14940 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14941 break;
14943 break;
14944 default:
14945 tg3_flag_set(tp, NO_NVRAM);
14946 return;
14949 tg3_nvram_get_pagesize(tp, nvcfg1);
14950 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14951 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14953 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14954 u32 val;
14956 if (tg3_nvram_read(tp, 0, &val))
14957 return;
14959 if (val != TG3_EEPROM_MAGIC &&
14960 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14961 tg3_flag_set(tp, NO_NVRAM);
14965 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14966 static void tg3_nvram_init(struct tg3 *tp)
14968 if (tg3_flag(tp, IS_SSB_CORE)) {
14969 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14970 tg3_flag_clear(tp, NVRAM);
14971 tg3_flag_clear(tp, NVRAM_BUFFERED);
14972 tg3_flag_set(tp, NO_NVRAM);
14973 return;
14976 tw32_f(GRC_EEPROM_ADDR,
14977 (EEPROM_ADDR_FSM_RESET |
14978 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14979 EEPROM_ADDR_CLKPERD_SHIFT)));
14981 msleep(1);
14983 /* Enable seeprom accesses. */
14984 tw32_f(GRC_LOCAL_CTRL,
14985 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14986 udelay(100);
14988 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14989 tg3_asic_rev(tp) != ASIC_REV_5701) {
14990 tg3_flag_set(tp, NVRAM);
14992 if (tg3_nvram_lock(tp)) {
14993 netdev_warn(tp->dev,
14994 "Cannot get nvram lock, %s failed\n",
14995 __func__);
14996 return;
14998 tg3_enable_nvram_access(tp);
15000 tp->nvram_size = 0;
15002 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15003 tg3_get_5752_nvram_info(tp);
15004 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15005 tg3_get_5755_nvram_info(tp);
15006 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15007 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15008 tg3_asic_rev(tp) == ASIC_REV_5785)
15009 tg3_get_5787_nvram_info(tp);
15010 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15011 tg3_get_5761_nvram_info(tp);
15012 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15013 tg3_get_5906_nvram_info(tp);
15014 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15015 tg3_flag(tp, 57765_CLASS))
15016 tg3_get_57780_nvram_info(tp);
15017 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15018 tg3_asic_rev(tp) == ASIC_REV_5719)
15019 tg3_get_5717_nvram_info(tp);
15020 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15021 tg3_asic_rev(tp) == ASIC_REV_5762)
15022 tg3_get_5720_nvram_info(tp);
15023 else
15024 tg3_get_nvram_info(tp);
15026 if (tp->nvram_size == 0)
15027 tg3_get_nvram_size(tp);
15029 tg3_disable_nvram_access(tp);
15030 tg3_nvram_unlock(tp);
15032 } else {
15033 tg3_flag_clear(tp, NVRAM);
15034 tg3_flag_clear(tp, NVRAM_BUFFERED);
15036 tg3_get_eeprom_size(tp);
15040 struct subsys_tbl_ent {
15041 u16 subsys_vendor, subsys_devid;
15042 u32 phy_id;
15045 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15046 /* Broadcom boards. */
15047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15051 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15052 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15053 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15054 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15055 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15056 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15058 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15059 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15060 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15061 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15062 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15063 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15064 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15065 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15066 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15067 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15068 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15070 /* 3com boards. */
15071 { TG3PCI_SUBVENDOR_ID_3COM,
15072 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15073 { TG3PCI_SUBVENDOR_ID_3COM,
15074 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15075 { TG3PCI_SUBVENDOR_ID_3COM,
15076 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15077 { TG3PCI_SUBVENDOR_ID_3COM,
15078 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_3COM,
15080 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15082 /* DELL boards. */
15083 { TG3PCI_SUBVENDOR_ID_DELL,
15084 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15085 { TG3PCI_SUBVENDOR_ID_DELL,
15086 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15087 { TG3PCI_SUBVENDOR_ID_DELL,
15088 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15089 { TG3PCI_SUBVENDOR_ID_DELL,
15090 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15092 /* Compaq boards. */
15093 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15094 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15095 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15096 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15097 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15098 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15099 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15100 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15101 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15102 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15104 /* IBM boards. */
15105 { TG3PCI_SUBVENDOR_ID_IBM,
15106 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15109 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15111 int i;
15113 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15114 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15115 tp->pdev->subsystem_vendor) &&
15116 (subsys_id_to_phy_id[i].subsys_devid ==
15117 tp->pdev->subsystem_device))
15118 return &subsys_id_to_phy_id[i];
15120 return NULL;
15123 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15125 u32 val;
15127 tp->phy_id = TG3_PHY_ID_INVALID;
15128 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15130 /* Assume an onboard device and WOL capable by default. */
15131 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15132 tg3_flag_set(tp, WOL_CAP);
15134 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15135 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15136 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15137 tg3_flag_set(tp, IS_NIC);
15139 val = tr32(VCPU_CFGSHDW);
15140 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15141 tg3_flag_set(tp, ASPM_WORKAROUND);
15142 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15143 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15144 tg3_flag_set(tp, WOL_ENABLE);
15145 device_set_wakeup_enable(&tp->pdev->dev, true);
15147 goto done;
15150 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15151 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15152 u32 nic_cfg, led_cfg;
15153 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15154 u32 nic_phy_id, ver, eeprom_phy_id;
15155 int eeprom_phy_serdes = 0;
15157 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15158 tp->nic_sram_data_cfg = nic_cfg;
15160 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15161 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15162 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15163 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15164 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15165 (ver > 0) && (ver < 0x100))
15166 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15168 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15169 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15171 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15172 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15173 tg3_asic_rev(tp) == ASIC_REV_5720)
15174 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15176 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15177 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15178 eeprom_phy_serdes = 1;
15180 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15181 if (nic_phy_id != 0) {
15182 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15183 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15185 eeprom_phy_id = (id1 >> 16) << 10;
15186 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15187 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15188 } else
15189 eeprom_phy_id = 0;
15191 tp->phy_id = eeprom_phy_id;
15192 if (eeprom_phy_serdes) {
15193 if (!tg3_flag(tp, 5705_PLUS))
15194 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15195 else
15196 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15199 if (tg3_flag(tp, 5750_PLUS))
15200 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15201 SHASTA_EXT_LED_MODE_MASK);
15202 else
15203 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15205 switch (led_cfg) {
15206 default:
15207 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15208 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15209 break;
15211 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15212 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15213 break;
15215 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15216 tp->led_ctrl = LED_CTRL_MODE_MAC;
15218 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15219 * read on some older 5700/5701 bootcode.
15221 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15222 tg3_asic_rev(tp) == ASIC_REV_5701)
15223 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15225 break;
15227 case SHASTA_EXT_LED_SHARED:
15228 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15229 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15230 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15231 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15232 LED_CTRL_MODE_PHY_2);
15234 if (tg3_flag(tp, 5717_PLUS) ||
15235 tg3_asic_rev(tp) == ASIC_REV_5762)
15236 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15237 LED_CTRL_BLINK_RATE_MASK;
15239 break;
15241 case SHASTA_EXT_LED_MAC:
15242 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15243 break;
15245 case SHASTA_EXT_LED_COMBO:
15246 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15247 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15248 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15249 LED_CTRL_MODE_PHY_2);
15250 break;
15254 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15255 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15256 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15257 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15259 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15260 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15262 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15263 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15264 if ((tp->pdev->subsystem_vendor ==
15265 PCI_VENDOR_ID_ARIMA) &&
15266 (tp->pdev->subsystem_device == 0x205a ||
15267 tp->pdev->subsystem_device == 0x2063))
15268 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15269 } else {
15270 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15271 tg3_flag_set(tp, IS_NIC);
15274 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15275 tg3_flag_set(tp, ENABLE_ASF);
15276 if (tg3_flag(tp, 5750_PLUS))
15277 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15280 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15281 tg3_flag(tp, 5750_PLUS))
15282 tg3_flag_set(tp, ENABLE_APE);
15284 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15285 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15286 tg3_flag_clear(tp, WOL_CAP);
15288 if (tg3_flag(tp, WOL_CAP) &&
15289 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15290 tg3_flag_set(tp, WOL_ENABLE);
15291 device_set_wakeup_enable(&tp->pdev->dev, true);
15294 if (cfg2 & (1 << 17))
15295 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15297 /* serdes signal pre-emphasis in register 0x590 set by */
15298 /* bootcode if bit 18 is set */
15299 if (cfg2 & (1 << 18))
15300 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15302 if ((tg3_flag(tp, 57765_PLUS) ||
15303 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15304 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15305 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15306 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15308 if (tg3_flag(tp, PCI_EXPRESS)) {
15309 u32 cfg3;
15311 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15312 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15313 !tg3_flag(tp, 57765_PLUS) &&
15314 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15315 tg3_flag_set(tp, ASPM_WORKAROUND);
15316 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15317 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15318 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15319 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15322 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15323 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15324 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15325 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15326 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15327 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15329 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15330 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15332 done:
15333 if (tg3_flag(tp, WOL_CAP))
15334 device_set_wakeup_enable(&tp->pdev->dev,
15335 tg3_flag(tp, WOL_ENABLE));
15336 else
15337 device_set_wakeup_capable(&tp->pdev->dev, false);
15340 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15342 int i, err;
15343 u32 val2, off = offset * 8;
15345 err = tg3_nvram_lock(tp);
15346 if (err)
15347 return err;
15349 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15350 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15351 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15352 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15353 udelay(10);
15355 for (i = 0; i < 100; i++) {
15356 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15357 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15358 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15359 break;
15361 udelay(10);
15364 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15366 tg3_nvram_unlock(tp);
15367 if (val2 & APE_OTP_STATUS_CMD_DONE)
15368 return 0;
15370 return -EBUSY;
15373 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15375 int i;
15376 u32 val;
15378 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15379 tw32(OTP_CTRL, cmd);
15381 /* Wait for up to 1 ms for command to execute. */
15382 for (i = 0; i < 100; i++) {
15383 val = tr32(OTP_STATUS);
15384 if (val & OTP_STATUS_CMD_DONE)
15385 break;
15386 udelay(10);
15389 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15392 /* Read the gphy configuration from the OTP region of the chip. The gphy
15393 * configuration is a 32-bit value that straddles the alignment boundary.
15394 * We do two 32-bit reads and then shift and merge the results.
15396 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15398 u32 bhalf_otp, thalf_otp;
15400 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15402 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15403 return 0;
15405 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15407 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15408 return 0;
15410 thalf_otp = tr32(OTP_READ_DATA);
15412 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15414 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15415 return 0;
15417 bhalf_otp = tr32(OTP_READ_DATA);
15419 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15422 static void tg3_phy_init_link_config(struct tg3 *tp)
15424 u32 adv = ADVERTISED_Autoneg;
15426 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15427 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15428 adv |= ADVERTISED_1000baseT_Half;
15429 adv |= ADVERTISED_1000baseT_Full;
15432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15433 adv |= ADVERTISED_100baseT_Half |
15434 ADVERTISED_100baseT_Full |
15435 ADVERTISED_10baseT_Half |
15436 ADVERTISED_10baseT_Full |
15437 ADVERTISED_TP;
15438 else
15439 adv |= ADVERTISED_FIBRE;
15441 tp->link_config.advertising = adv;
15442 tp->link_config.speed = SPEED_UNKNOWN;
15443 tp->link_config.duplex = DUPLEX_UNKNOWN;
15444 tp->link_config.autoneg = AUTONEG_ENABLE;
15445 tp->link_config.active_speed = SPEED_UNKNOWN;
15446 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15448 tp->old_link = -1;
15451 static int tg3_phy_probe(struct tg3 *tp)
15453 u32 hw_phy_id_1, hw_phy_id_2;
15454 u32 hw_phy_id, hw_phy_id_masked;
15455 int err;
15457 /* flow control autonegotiation is default behavior */
15458 tg3_flag_set(tp, PAUSE_AUTONEG);
15459 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15461 if (tg3_flag(tp, ENABLE_APE)) {
15462 switch (tp->pci_fn) {
15463 case 0:
15464 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15465 break;
15466 case 1:
15467 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15468 break;
15469 case 2:
15470 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15471 break;
15472 case 3:
15473 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15474 break;
15478 if (!tg3_flag(tp, ENABLE_ASF) &&
15479 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15480 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15481 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15482 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15484 if (tg3_flag(tp, USE_PHYLIB))
15485 return tg3_phy_init(tp);
15487 /* Reading the PHY ID register can conflict with ASF
15488 * firmware access to the PHY hardware.
15490 err = 0;
15491 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15492 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15493 } else {
15494 /* Now read the physical PHY_ID from the chip and verify
15495 * that it is sane. If it doesn't look good, we fall back
15496 * to either the hard-coded table based PHY_ID and failing
15497 * that the value found in the eeprom area.
15499 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15500 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15502 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15503 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15504 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15506 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15509 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15510 tp->phy_id = hw_phy_id;
15511 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15512 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15513 else
15514 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15515 } else {
15516 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15517 /* Do nothing, phy ID already set up in
15518 * tg3_get_eeprom_hw_cfg().
15520 } else {
15521 struct subsys_tbl_ent *p;
15523 /* No eeprom signature? Try the hardcoded
15524 * subsys device table.
15526 p = tg3_lookup_by_subsys(tp);
15527 if (p) {
15528 tp->phy_id = p->phy_id;
15529 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15530 /* For now we saw the IDs 0xbc050cd0,
15531 * 0xbc050f80 and 0xbc050c30 on devices
15532 * connected to an BCM4785 and there are
15533 * probably more. Just assume that the phy is
15534 * supported when it is connected to a SSB core
15535 * for now.
15537 return -ENODEV;
15540 if (!tp->phy_id ||
15541 tp->phy_id == TG3_PHY_ID_BCM8002)
15542 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15546 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15547 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15548 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15549 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15550 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15551 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15552 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15553 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15554 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15555 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15557 tp->eee.supported = SUPPORTED_100baseT_Full |
15558 SUPPORTED_1000baseT_Full;
15559 tp->eee.advertised = ADVERTISED_100baseT_Full |
15560 ADVERTISED_1000baseT_Full;
15561 tp->eee.eee_enabled = 1;
15562 tp->eee.tx_lpi_enabled = 1;
15563 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15566 tg3_phy_init_link_config(tp);
15568 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15569 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15570 !tg3_flag(tp, ENABLE_APE) &&
15571 !tg3_flag(tp, ENABLE_ASF)) {
15572 u32 bmsr, dummy;
15574 tg3_readphy(tp, MII_BMSR, &bmsr);
15575 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15576 (bmsr & BMSR_LSTATUS))
15577 goto skip_phy_reset;
15579 err = tg3_phy_reset(tp);
15580 if (err)
15581 return err;
15583 tg3_phy_set_wirespeed(tp);
15585 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15586 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15587 tp->link_config.flowctrl);
15589 tg3_writephy(tp, MII_BMCR,
15590 BMCR_ANENABLE | BMCR_ANRESTART);
15594 skip_phy_reset:
15595 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15596 err = tg3_init_5401phy_dsp(tp);
15597 if (err)
15598 return err;
15600 err = tg3_init_5401phy_dsp(tp);
15603 return err;
15606 static void tg3_read_vpd(struct tg3 *tp)
15608 u8 *vpd_data;
15609 unsigned int block_end, rosize, len;
15610 u32 vpdlen;
15611 int j, i = 0;
15613 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15614 if (!vpd_data)
15615 goto out_no_vpd;
15617 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15618 if (i < 0)
15619 goto out_not_found;
15621 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15622 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15623 i += PCI_VPD_LRDT_TAG_SIZE;
15625 if (block_end > vpdlen)
15626 goto out_not_found;
15628 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15629 PCI_VPD_RO_KEYWORD_MFR_ID);
15630 if (j > 0) {
15631 len = pci_vpd_info_field_size(&vpd_data[j]);
15633 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15634 if (j + len > block_end || len != 4 ||
15635 memcmp(&vpd_data[j], "1028", 4))
15636 goto partno;
15638 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15639 PCI_VPD_RO_KEYWORD_VENDOR0);
15640 if (j < 0)
15641 goto partno;
15643 len = pci_vpd_info_field_size(&vpd_data[j]);
15645 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15646 if (j + len > block_end)
15647 goto partno;
15649 if (len >= sizeof(tp->fw_ver))
15650 len = sizeof(tp->fw_ver) - 1;
15651 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15652 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15653 &vpd_data[j]);
15656 partno:
15657 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15658 PCI_VPD_RO_KEYWORD_PARTNO);
15659 if (i < 0)
15660 goto out_not_found;
15662 len = pci_vpd_info_field_size(&vpd_data[i]);
15664 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15665 if (len > TG3_BPN_SIZE ||
15666 (len + i) > vpdlen)
15667 goto out_not_found;
15669 memcpy(tp->board_part_number, &vpd_data[i], len);
15671 out_not_found:
15672 kfree(vpd_data);
15673 if (tp->board_part_number[0])
15674 return;
15676 out_no_vpd:
15677 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15678 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15679 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15680 strcpy(tp->board_part_number, "BCM5717");
15681 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15682 strcpy(tp->board_part_number, "BCM5718");
15683 else
15684 goto nomatch;
15685 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15686 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15687 strcpy(tp->board_part_number, "BCM57780");
15688 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15689 strcpy(tp->board_part_number, "BCM57760");
15690 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15691 strcpy(tp->board_part_number, "BCM57790");
15692 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15693 strcpy(tp->board_part_number, "BCM57788");
15694 else
15695 goto nomatch;
15696 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15697 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15698 strcpy(tp->board_part_number, "BCM57761");
15699 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15700 strcpy(tp->board_part_number, "BCM57765");
15701 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15702 strcpy(tp->board_part_number, "BCM57781");
15703 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15704 strcpy(tp->board_part_number, "BCM57785");
15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15706 strcpy(tp->board_part_number, "BCM57791");
15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15708 strcpy(tp->board_part_number, "BCM57795");
15709 else
15710 goto nomatch;
15711 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15712 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15713 strcpy(tp->board_part_number, "BCM57762");
15714 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15715 strcpy(tp->board_part_number, "BCM57766");
15716 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15717 strcpy(tp->board_part_number, "BCM57782");
15718 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15719 strcpy(tp->board_part_number, "BCM57786");
15720 else
15721 goto nomatch;
15722 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15723 strcpy(tp->board_part_number, "BCM95906");
15724 } else {
15725 nomatch:
15726 strcpy(tp->board_part_number, "none");
15730 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15732 u32 val;
15734 if (tg3_nvram_read(tp, offset, &val) ||
15735 (val & 0xfc000000) != 0x0c000000 ||
15736 tg3_nvram_read(tp, offset + 4, &val) ||
15737 val != 0)
15738 return 0;
15740 return 1;
15743 static void tg3_read_bc_ver(struct tg3 *tp)
15745 u32 val, offset, start, ver_offset;
15746 int i, dst_off;
15747 bool newver = false;
15749 if (tg3_nvram_read(tp, 0xc, &offset) ||
15750 tg3_nvram_read(tp, 0x4, &start))
15751 return;
15753 offset = tg3_nvram_logical_addr(tp, offset);
15755 if (tg3_nvram_read(tp, offset, &val))
15756 return;
15758 if ((val & 0xfc000000) == 0x0c000000) {
15759 if (tg3_nvram_read(tp, offset + 4, &val))
15760 return;
15762 if (val == 0)
15763 newver = true;
15766 dst_off = strlen(tp->fw_ver);
15768 if (newver) {
15769 if (TG3_VER_SIZE - dst_off < 16 ||
15770 tg3_nvram_read(tp, offset + 8, &ver_offset))
15771 return;
15773 offset = offset + ver_offset - start;
15774 for (i = 0; i < 16; i += 4) {
15775 __be32 v;
15776 if (tg3_nvram_read_be32(tp, offset + i, &v))
15777 return;
15779 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15781 } else {
15782 u32 major, minor;
15784 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15785 return;
15787 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15788 TG3_NVM_BCVER_MAJSFT;
15789 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15790 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15791 "v%d.%02d", major, minor);
15795 static void tg3_read_hwsb_ver(struct tg3 *tp)
15797 u32 val, major, minor;
15799 /* Use native endian representation */
15800 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15801 return;
15803 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15804 TG3_NVM_HWSB_CFG1_MAJSFT;
15805 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15806 TG3_NVM_HWSB_CFG1_MINSFT;
15808 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15811 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15813 u32 offset, major, minor, build;
15815 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15817 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15818 return;
15820 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15821 case TG3_EEPROM_SB_REVISION_0:
15822 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15823 break;
15824 case TG3_EEPROM_SB_REVISION_2:
15825 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15826 break;
15827 case TG3_EEPROM_SB_REVISION_3:
15828 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15829 break;
15830 case TG3_EEPROM_SB_REVISION_4:
15831 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15832 break;
15833 case TG3_EEPROM_SB_REVISION_5:
15834 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15835 break;
15836 case TG3_EEPROM_SB_REVISION_6:
15837 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15838 break;
15839 default:
15840 return;
15843 if (tg3_nvram_read(tp, offset, &val))
15844 return;
15846 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15847 TG3_EEPROM_SB_EDH_BLD_SHFT;
15848 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15849 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15850 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15852 if (minor > 99 || build > 26)
15853 return;
15855 offset = strlen(tp->fw_ver);
15856 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15857 " v%d.%02d", major, minor);
15859 if (build > 0) {
15860 offset = strlen(tp->fw_ver);
15861 if (offset < TG3_VER_SIZE - 1)
15862 tp->fw_ver[offset] = 'a' + build - 1;
15866 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15868 u32 val, offset, start;
15869 int i, vlen;
15871 for (offset = TG3_NVM_DIR_START;
15872 offset < TG3_NVM_DIR_END;
15873 offset += TG3_NVM_DIRENT_SIZE) {
15874 if (tg3_nvram_read(tp, offset, &val))
15875 return;
15877 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15878 break;
15881 if (offset == TG3_NVM_DIR_END)
15882 return;
15884 if (!tg3_flag(tp, 5705_PLUS))
15885 start = 0x08000000;
15886 else if (tg3_nvram_read(tp, offset - 4, &start))
15887 return;
15889 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15890 !tg3_fw_img_is_valid(tp, offset) ||
15891 tg3_nvram_read(tp, offset + 8, &val))
15892 return;
15894 offset += val - start;
15896 vlen = strlen(tp->fw_ver);
15898 tp->fw_ver[vlen++] = ',';
15899 tp->fw_ver[vlen++] = ' ';
15901 for (i = 0; i < 4; i++) {
15902 __be32 v;
15903 if (tg3_nvram_read_be32(tp, offset, &v))
15904 return;
15906 offset += sizeof(v);
15908 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15909 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15910 break;
15913 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15914 vlen += sizeof(v);
15918 static void tg3_probe_ncsi(struct tg3 *tp)
15920 u32 apedata;
15922 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15923 if (apedata != APE_SEG_SIG_MAGIC)
15924 return;
15926 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15927 if (!(apedata & APE_FW_STATUS_READY))
15928 return;
15930 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15931 tg3_flag_set(tp, APE_HAS_NCSI);
15934 static void tg3_read_dash_ver(struct tg3 *tp)
15936 int vlen;
15937 u32 apedata;
15938 char *fwtype;
15940 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15942 if (tg3_flag(tp, APE_HAS_NCSI))
15943 fwtype = "NCSI";
15944 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15945 fwtype = "SMASH";
15946 else
15947 fwtype = "DASH";
15949 vlen = strlen(tp->fw_ver);
15951 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15952 fwtype,
15953 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15954 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15955 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15956 (apedata & APE_FW_VERSION_BLDMSK));
15959 static void tg3_read_otp_ver(struct tg3 *tp)
15961 u32 val, val2;
15963 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15964 return;
15966 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15967 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15968 TG3_OTP_MAGIC0_VALID(val)) {
15969 u64 val64 = (u64) val << 32 | val2;
15970 u32 ver = 0;
15971 int i, vlen;
15973 for (i = 0; i < 7; i++) {
15974 if ((val64 & 0xff) == 0)
15975 break;
15976 ver = val64 & 0xff;
15977 val64 >>= 8;
15979 vlen = strlen(tp->fw_ver);
15980 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15984 static void tg3_read_fw_ver(struct tg3 *tp)
15986 u32 val;
15987 bool vpd_vers = false;
15989 if (tp->fw_ver[0] != 0)
15990 vpd_vers = true;
15992 if (tg3_flag(tp, NO_NVRAM)) {
15993 strcat(tp->fw_ver, "sb");
15994 tg3_read_otp_ver(tp);
15995 return;
15998 if (tg3_nvram_read(tp, 0, &val))
15999 return;
16001 if (val == TG3_EEPROM_MAGIC)
16002 tg3_read_bc_ver(tp);
16003 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16004 tg3_read_sb_ver(tp, val);
16005 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16006 tg3_read_hwsb_ver(tp);
16008 if (tg3_flag(tp, ENABLE_ASF)) {
16009 if (tg3_flag(tp, ENABLE_APE)) {
16010 tg3_probe_ncsi(tp);
16011 if (!vpd_vers)
16012 tg3_read_dash_ver(tp);
16013 } else if (!vpd_vers) {
16014 tg3_read_mgmtfw_ver(tp);
16018 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16021 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16023 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16024 return TG3_RX_RET_MAX_SIZE_5717;
16025 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16026 return TG3_RX_RET_MAX_SIZE_5700;
16027 else
16028 return TG3_RX_RET_MAX_SIZE_5705;
16031 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16032 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16033 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16034 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16035 { },
16038 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16040 struct pci_dev *peer;
16041 unsigned int func, devnr = tp->pdev->devfn & ~7;
16043 for (func = 0; func < 8; func++) {
16044 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16045 if (peer && peer != tp->pdev)
16046 break;
16047 pci_dev_put(peer);
16049 /* 5704 can be configured in single-port mode, set peer to
16050 * tp->pdev in that case.
16052 if (!peer) {
16053 peer = tp->pdev;
16054 return peer;
16058 * We don't need to keep the refcount elevated; there's no way
16059 * to remove one half of this device without removing the other
16061 pci_dev_put(peer);
16063 return peer;
16066 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16068 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16069 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16070 u32 reg;
16072 /* All devices that use the alternate
16073 * ASIC REV location have a CPMU.
16075 tg3_flag_set(tp, CPMU_PRESENT);
16077 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16088 reg = TG3PCI_GEN2_PRODID_ASICREV;
16089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16099 reg = TG3PCI_GEN15_PRODID_ASICREV;
16100 else
16101 reg = TG3PCI_PRODID_ASICREV;
16103 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16106 /* Wrong chip ID in 5752 A0. This code can be removed later
16107 * as A0 is not in production.
16109 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16110 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16112 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16113 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16115 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16116 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16117 tg3_asic_rev(tp) == ASIC_REV_5720)
16118 tg3_flag_set(tp, 5717_PLUS);
16120 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16121 tg3_asic_rev(tp) == ASIC_REV_57766)
16122 tg3_flag_set(tp, 57765_CLASS);
16124 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16125 tg3_asic_rev(tp) == ASIC_REV_5762)
16126 tg3_flag_set(tp, 57765_PLUS);
16128 /* Intentionally exclude ASIC_REV_5906 */
16129 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16130 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16131 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16133 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16134 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16135 tg3_flag(tp, 57765_PLUS))
16136 tg3_flag_set(tp, 5755_PLUS);
16138 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16139 tg3_asic_rev(tp) == ASIC_REV_5714)
16140 tg3_flag_set(tp, 5780_CLASS);
16142 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16143 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16144 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16145 tg3_flag(tp, 5755_PLUS) ||
16146 tg3_flag(tp, 5780_CLASS))
16147 tg3_flag_set(tp, 5750_PLUS);
16149 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16150 tg3_flag(tp, 5750_PLUS))
16151 tg3_flag_set(tp, 5705_PLUS);
16154 static bool tg3_10_100_only_device(struct tg3 *tp,
16155 const struct pci_device_id *ent)
16157 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16159 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16160 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16161 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16162 return true;
16164 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16165 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16166 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16167 return true;
16168 } else {
16169 return true;
16173 return false;
16176 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16178 u32 misc_ctrl_reg;
16179 u32 pci_state_reg, grc_misc_cfg;
16180 u32 val;
16181 u16 pci_cmd;
16182 int err;
16184 /* Force memory write invalidate off. If we leave it on,
16185 * then on 5700_BX chips we have to enable a workaround.
16186 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16187 * to match the cacheline size. The Broadcom driver have this
16188 * workaround but turns MWI off all the times so never uses
16189 * it. This seems to suggest that the workaround is insufficient.
16191 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16192 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16193 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16195 /* Important! -- Make sure register accesses are byteswapped
16196 * correctly. Also, for those chips that require it, make
16197 * sure that indirect register accesses are enabled before
16198 * the first operation.
16200 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16201 &misc_ctrl_reg);
16202 tp->misc_host_ctrl |= (misc_ctrl_reg &
16203 MISC_HOST_CTRL_CHIPREV);
16204 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16205 tp->misc_host_ctrl);
16207 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16209 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16210 * we need to disable memory and use config. cycles
16211 * only to access all registers. The 5702/03 chips
16212 * can mistakenly decode the special cycles from the
16213 * ICH chipsets as memory write cycles, causing corruption
16214 * of register and memory space. Only certain ICH bridges
16215 * will drive special cycles with non-zero data during the
16216 * address phase which can fall within the 5703's address
16217 * range. This is not an ICH bug as the PCI spec allows
16218 * non-zero address during special cycles. However, only
16219 * these ICH bridges are known to drive non-zero addresses
16220 * during special cycles.
16222 * Since special cycles do not cross PCI bridges, we only
16223 * enable this workaround if the 5703 is on the secondary
16224 * bus of these ICH bridges.
16226 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16227 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16228 static struct tg3_dev_id {
16229 u32 vendor;
16230 u32 device;
16231 u32 rev;
16232 } ich_chipsets[] = {
16233 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16234 PCI_ANY_ID },
16235 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16236 PCI_ANY_ID },
16237 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16238 0xa },
16239 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16240 PCI_ANY_ID },
16241 { },
16243 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16244 struct pci_dev *bridge = NULL;
16246 while (pci_id->vendor != 0) {
16247 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16248 bridge);
16249 if (!bridge) {
16250 pci_id++;
16251 continue;
16253 if (pci_id->rev != PCI_ANY_ID) {
16254 if (bridge->revision > pci_id->rev)
16255 continue;
16257 if (bridge->subordinate &&
16258 (bridge->subordinate->number ==
16259 tp->pdev->bus->number)) {
16260 tg3_flag_set(tp, ICH_WORKAROUND);
16261 pci_dev_put(bridge);
16262 break;
16267 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16268 static struct tg3_dev_id {
16269 u32 vendor;
16270 u32 device;
16271 } bridge_chipsets[] = {
16272 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16273 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16274 { },
16276 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16277 struct pci_dev *bridge = NULL;
16279 while (pci_id->vendor != 0) {
16280 bridge = pci_get_device(pci_id->vendor,
16281 pci_id->device,
16282 bridge);
16283 if (!bridge) {
16284 pci_id++;
16285 continue;
16287 if (bridge->subordinate &&
16288 (bridge->subordinate->number <=
16289 tp->pdev->bus->number) &&
16290 (bridge->subordinate->busn_res.end >=
16291 tp->pdev->bus->number)) {
16292 tg3_flag_set(tp, 5701_DMA_BUG);
16293 pci_dev_put(bridge);
16294 break;
16299 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16300 * DMA addresses > 40-bit. This bridge may have other additional
16301 * 57xx devices behind it in some 4-port NIC designs for example.
16302 * Any tg3 device found behind the bridge will also need the 40-bit
16303 * DMA workaround.
16305 if (tg3_flag(tp, 5780_CLASS)) {
16306 tg3_flag_set(tp, 40BIT_DMA_BUG);
16307 tp->msi_cap = tp->pdev->msi_cap;
16308 } else {
16309 struct pci_dev *bridge = NULL;
16311 do {
16312 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16313 PCI_DEVICE_ID_SERVERWORKS_EPB,
16314 bridge);
16315 if (bridge && bridge->subordinate &&
16316 (bridge->subordinate->number <=
16317 tp->pdev->bus->number) &&
16318 (bridge->subordinate->busn_res.end >=
16319 tp->pdev->bus->number)) {
16320 tg3_flag_set(tp, 40BIT_DMA_BUG);
16321 pci_dev_put(bridge);
16322 break;
16324 } while (bridge);
16327 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16328 tg3_asic_rev(tp) == ASIC_REV_5714)
16329 tp->pdev_peer = tg3_find_peer(tp);
16331 /* Determine TSO capabilities */
16332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16333 ; /* Do nothing. HW bug. */
16334 else if (tg3_flag(tp, 57765_PLUS))
16335 tg3_flag_set(tp, HW_TSO_3);
16336 else if (tg3_flag(tp, 5755_PLUS) ||
16337 tg3_asic_rev(tp) == ASIC_REV_5906)
16338 tg3_flag_set(tp, HW_TSO_2);
16339 else if (tg3_flag(tp, 5750_PLUS)) {
16340 tg3_flag_set(tp, HW_TSO_1);
16341 tg3_flag_set(tp, TSO_BUG);
16342 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16343 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16344 tg3_flag_clear(tp, TSO_BUG);
16345 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16346 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16347 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16348 tg3_flag_set(tp, FW_TSO);
16349 tg3_flag_set(tp, TSO_BUG);
16350 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16351 tp->fw_needed = FIRMWARE_TG3TSO5;
16352 else
16353 tp->fw_needed = FIRMWARE_TG3TSO;
16356 /* Selectively allow TSO based on operating conditions */
16357 if (tg3_flag(tp, HW_TSO_1) ||
16358 tg3_flag(tp, HW_TSO_2) ||
16359 tg3_flag(tp, HW_TSO_3) ||
16360 tg3_flag(tp, FW_TSO)) {
16361 /* For firmware TSO, assume ASF is disabled.
16362 * We'll disable TSO later if we discover ASF
16363 * is enabled in tg3_get_eeprom_hw_cfg().
16365 tg3_flag_set(tp, TSO_CAPABLE);
16366 } else {
16367 tg3_flag_clear(tp, TSO_CAPABLE);
16368 tg3_flag_clear(tp, TSO_BUG);
16369 tp->fw_needed = NULL;
16372 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16373 tp->fw_needed = FIRMWARE_TG3;
16375 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16376 tp->fw_needed = FIRMWARE_TG357766;
16378 tp->irq_max = 1;
16380 if (tg3_flag(tp, 5750_PLUS)) {
16381 tg3_flag_set(tp, SUPPORT_MSI);
16382 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16383 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16384 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16385 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16386 tp->pdev_peer == tp->pdev))
16387 tg3_flag_clear(tp, SUPPORT_MSI);
16389 if (tg3_flag(tp, 5755_PLUS) ||
16390 tg3_asic_rev(tp) == ASIC_REV_5906) {
16391 tg3_flag_set(tp, 1SHOT_MSI);
16394 if (tg3_flag(tp, 57765_PLUS)) {
16395 tg3_flag_set(tp, SUPPORT_MSIX);
16396 tp->irq_max = TG3_IRQ_MAX_VECS;
16400 tp->txq_max = 1;
16401 tp->rxq_max = 1;
16402 if (tp->irq_max > 1) {
16403 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16404 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16406 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16407 tg3_asic_rev(tp) == ASIC_REV_5720)
16408 tp->txq_max = tp->irq_max - 1;
16411 if (tg3_flag(tp, 5755_PLUS) ||
16412 tg3_asic_rev(tp) == ASIC_REV_5906)
16413 tg3_flag_set(tp, SHORT_DMA_BUG);
16415 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16416 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16418 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16419 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16420 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16421 tg3_asic_rev(tp) == ASIC_REV_5762)
16422 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16424 if (tg3_flag(tp, 57765_PLUS) &&
16425 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16426 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16428 if (!tg3_flag(tp, 5705_PLUS) ||
16429 tg3_flag(tp, 5780_CLASS) ||
16430 tg3_flag(tp, USE_JUMBO_BDFLAG))
16431 tg3_flag_set(tp, JUMBO_CAPABLE);
16433 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16434 &pci_state_reg);
16436 if (pci_is_pcie(tp->pdev)) {
16437 u16 lnkctl;
16439 tg3_flag_set(tp, PCI_EXPRESS);
16441 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16442 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16443 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16444 tg3_flag_clear(tp, HW_TSO_2);
16445 tg3_flag_clear(tp, TSO_CAPABLE);
16447 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16448 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16449 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16450 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16451 tg3_flag_set(tp, CLKREQ_BUG);
16452 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16453 tg3_flag_set(tp, L1PLLPD_EN);
16455 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16456 /* BCM5785 devices are effectively PCIe devices, and should
16457 * follow PCIe codepaths, but do not have a PCIe capabilities
16458 * section.
16460 tg3_flag_set(tp, PCI_EXPRESS);
16461 } else if (!tg3_flag(tp, 5705_PLUS) ||
16462 tg3_flag(tp, 5780_CLASS)) {
16463 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16464 if (!tp->pcix_cap) {
16465 dev_err(&tp->pdev->dev,
16466 "Cannot find PCI-X capability, aborting\n");
16467 return -EIO;
16470 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16471 tg3_flag_set(tp, PCIX_MODE);
16474 /* If we have an AMD 762 or VIA K8T800 chipset, write
16475 * reordering to the mailbox registers done by the host
16476 * controller can cause major troubles. We read back from
16477 * every mailbox register write to force the writes to be
16478 * posted to the chip in order.
16480 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16481 !tg3_flag(tp, PCI_EXPRESS))
16482 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16484 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16485 &tp->pci_cacheline_sz);
16486 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16487 &tp->pci_lat_timer);
16488 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16489 tp->pci_lat_timer < 64) {
16490 tp->pci_lat_timer = 64;
16491 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16492 tp->pci_lat_timer);
16495 /* Important! -- It is critical that the PCI-X hw workaround
16496 * situation is decided before the first MMIO register access.
16498 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16499 /* 5700 BX chips need to have their TX producer index
16500 * mailboxes written twice to workaround a bug.
16502 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16504 /* If we are in PCI-X mode, enable register write workaround.
16506 * The workaround is to use indirect register accesses
16507 * for all chip writes not to mailbox registers.
16509 if (tg3_flag(tp, PCIX_MODE)) {
16510 u32 pm_reg;
16512 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16514 /* The chip can have it's power management PCI config
16515 * space registers clobbered due to this bug.
16516 * So explicitly force the chip into D0 here.
16518 pci_read_config_dword(tp->pdev,
16519 tp->pdev->pm_cap + PCI_PM_CTRL,
16520 &pm_reg);
16521 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16522 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16523 pci_write_config_dword(tp->pdev,
16524 tp->pdev->pm_cap + PCI_PM_CTRL,
16525 pm_reg);
16527 /* Also, force SERR#/PERR# in PCI command. */
16528 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16529 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16530 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16534 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16535 tg3_flag_set(tp, PCI_HIGH_SPEED);
16536 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16537 tg3_flag_set(tp, PCI_32BIT);
16539 /* Chip-specific fixup from Broadcom driver */
16540 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16541 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16542 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16543 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16546 /* Default fast path register access methods */
16547 tp->read32 = tg3_read32;
16548 tp->write32 = tg3_write32;
16549 tp->read32_mbox = tg3_read32;
16550 tp->write32_mbox = tg3_write32;
16551 tp->write32_tx_mbox = tg3_write32;
16552 tp->write32_rx_mbox = tg3_write32;
16554 /* Various workaround register access methods */
16555 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16556 tp->write32 = tg3_write_indirect_reg32;
16557 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16558 (tg3_flag(tp, PCI_EXPRESS) &&
16559 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16561 * Back to back register writes can cause problems on these
16562 * chips, the workaround is to read back all reg writes
16563 * except those to mailbox regs.
16565 * See tg3_write_indirect_reg32().
16567 tp->write32 = tg3_write_flush_reg32;
16570 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16571 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16572 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16573 tp->write32_rx_mbox = tg3_write_flush_reg32;
16576 if (tg3_flag(tp, ICH_WORKAROUND)) {
16577 tp->read32 = tg3_read_indirect_reg32;
16578 tp->write32 = tg3_write_indirect_reg32;
16579 tp->read32_mbox = tg3_read_indirect_mbox;
16580 tp->write32_mbox = tg3_write_indirect_mbox;
16581 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16582 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16584 iounmap(tp->regs);
16585 tp->regs = NULL;
16587 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16588 pci_cmd &= ~PCI_COMMAND_MEMORY;
16589 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16591 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16592 tp->read32_mbox = tg3_read32_mbox_5906;
16593 tp->write32_mbox = tg3_write32_mbox_5906;
16594 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16595 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16598 if (tp->write32 == tg3_write_indirect_reg32 ||
16599 (tg3_flag(tp, PCIX_MODE) &&
16600 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16601 tg3_asic_rev(tp) == ASIC_REV_5701)))
16602 tg3_flag_set(tp, SRAM_USE_CONFIG);
16604 /* The memory arbiter has to be enabled in order for SRAM accesses
16605 * to succeed. Normally on powerup the tg3 chip firmware will make
16606 * sure it is enabled, but other entities such as system netboot
16607 * code might disable it.
16609 val = tr32(MEMARB_MODE);
16610 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16612 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16613 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16614 tg3_flag(tp, 5780_CLASS)) {
16615 if (tg3_flag(tp, PCIX_MODE)) {
16616 pci_read_config_dword(tp->pdev,
16617 tp->pcix_cap + PCI_X_STATUS,
16618 &val);
16619 tp->pci_fn = val & 0x7;
16621 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16622 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16623 tg3_asic_rev(tp) == ASIC_REV_5720) {
16624 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16625 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16626 val = tr32(TG3_CPMU_STATUS);
16628 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16629 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16630 else
16631 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16632 TG3_CPMU_STATUS_FSHFT_5719;
16635 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16636 tp->write32_tx_mbox = tg3_write_flush_reg32;
16637 tp->write32_rx_mbox = tg3_write_flush_reg32;
16640 /* Get eeprom hw config before calling tg3_set_power_state().
16641 * In particular, the TG3_FLAG_IS_NIC flag must be
16642 * determined before calling tg3_set_power_state() so that
16643 * we know whether or not to switch out of Vaux power.
16644 * When the flag is set, it means that GPIO1 is used for eeprom
16645 * write protect and also implies that it is a LOM where GPIOs
16646 * are not used to switch power.
16648 tg3_get_eeprom_hw_cfg(tp);
16650 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16651 tg3_flag_clear(tp, TSO_CAPABLE);
16652 tg3_flag_clear(tp, TSO_BUG);
16653 tp->fw_needed = NULL;
16656 if (tg3_flag(tp, ENABLE_APE)) {
16657 /* Allow reads and writes to the
16658 * APE register and memory space.
16660 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16661 PCISTATE_ALLOW_APE_SHMEM_WR |
16662 PCISTATE_ALLOW_APE_PSPACE_WR;
16663 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16664 pci_state_reg);
16666 tg3_ape_lock_init(tp);
16667 tp->ape_hb_interval =
16668 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16671 /* Set up tp->grc_local_ctrl before calling
16672 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16673 * will bring 5700's external PHY out of reset.
16674 * It is also used as eeprom write protect on LOMs.
16676 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16677 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16678 tg3_flag(tp, EEPROM_WRITE_PROT))
16679 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16680 GRC_LCLCTRL_GPIO_OUTPUT1);
16681 /* Unused GPIO3 must be driven as output on 5752 because there
16682 * are no pull-up resistors on unused GPIO pins.
16684 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16685 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16687 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16688 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16689 tg3_flag(tp, 57765_CLASS))
16690 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16692 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16693 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16694 /* Turn off the debug UART. */
16695 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16696 if (tg3_flag(tp, IS_NIC))
16697 /* Keep VMain power. */
16698 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16699 GRC_LCLCTRL_GPIO_OUTPUT0;
16702 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16703 tp->grc_local_ctrl |=
16704 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16706 /* Switch out of Vaux if it is a NIC */
16707 tg3_pwrsrc_switch_to_vmain(tp);
16709 /* Derive initial jumbo mode from MTU assigned in
16710 * ether_setup() via the alloc_etherdev() call
16712 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16713 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16715 /* Determine WakeOnLan speed to use. */
16716 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16717 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16718 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16719 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16720 tg3_flag_clear(tp, WOL_SPEED_100MB);
16721 } else {
16722 tg3_flag_set(tp, WOL_SPEED_100MB);
16725 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16726 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16728 /* A few boards don't want Ethernet@WireSpeed phy feature */
16729 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16730 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16731 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16732 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16733 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16734 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16735 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16737 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16738 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16739 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16740 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16741 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16743 if (tg3_flag(tp, 5705_PLUS) &&
16744 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16745 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16746 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16747 !tg3_flag(tp, 57765_PLUS)) {
16748 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16749 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16750 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16751 tg3_asic_rev(tp) == ASIC_REV_5761) {
16752 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16753 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16754 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16755 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16756 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16757 } else
16758 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16761 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16762 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16763 tp->phy_otp = tg3_read_otp_phycfg(tp);
16764 if (tp->phy_otp == 0)
16765 tp->phy_otp = TG3_OTP_DEFAULT;
16768 if (tg3_flag(tp, CPMU_PRESENT))
16769 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16770 else
16771 tp->mi_mode = MAC_MI_MODE_BASE;
16773 tp->coalesce_mode = 0;
16774 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16775 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16776 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16778 /* Set these bits to enable statistics workaround. */
16779 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16780 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16781 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16782 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16783 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16784 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16787 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16788 tg3_asic_rev(tp) == ASIC_REV_57780)
16789 tg3_flag_set(tp, USE_PHYLIB);
16791 err = tg3_mdio_init(tp);
16792 if (err)
16793 return err;
16795 /* Initialize data/descriptor byte/word swapping. */
16796 val = tr32(GRC_MODE);
16797 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16798 tg3_asic_rev(tp) == ASIC_REV_5762)
16799 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16800 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16801 GRC_MODE_B2HRX_ENABLE |
16802 GRC_MODE_HTX2B_ENABLE |
16803 GRC_MODE_HOST_STACKUP);
16804 else
16805 val &= GRC_MODE_HOST_STACKUP;
16807 tw32(GRC_MODE, val | tp->grc_mode);
16809 tg3_switch_clocks(tp);
16811 /* Clear this out for sanity. */
16812 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16814 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16815 tw32(TG3PCI_REG_BASE_ADDR, 0);
16817 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16818 &pci_state_reg);
16819 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16820 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16821 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16822 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16823 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16824 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16825 void __iomem *sram_base;
16827 /* Write some dummy words into the SRAM status block
16828 * area, see if it reads back correctly. If the return
16829 * value is bad, force enable the PCIX workaround.
16831 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16833 writel(0x00000000, sram_base);
16834 writel(0x00000000, sram_base + 4);
16835 writel(0xffffffff, sram_base + 4);
16836 if (readl(sram_base) != 0x00000000)
16837 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16841 udelay(50);
16842 tg3_nvram_init(tp);
16844 /* If the device has an NVRAM, no need to load patch firmware */
16845 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16846 !tg3_flag(tp, NO_NVRAM))
16847 tp->fw_needed = NULL;
16849 grc_misc_cfg = tr32(GRC_MISC_CFG);
16850 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16852 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16853 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16854 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16855 tg3_flag_set(tp, IS_5788);
16857 if (!tg3_flag(tp, IS_5788) &&
16858 tg3_asic_rev(tp) != ASIC_REV_5700)
16859 tg3_flag_set(tp, TAGGED_STATUS);
16860 if (tg3_flag(tp, TAGGED_STATUS)) {
16861 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16862 HOSTCC_MODE_CLRTICK_TXBD);
16864 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16865 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16866 tp->misc_host_ctrl);
16869 /* Preserve the APE MAC_MODE bits */
16870 if (tg3_flag(tp, ENABLE_APE))
16871 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16872 else
16873 tp->mac_mode = 0;
16875 if (tg3_10_100_only_device(tp, ent))
16876 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16878 err = tg3_phy_probe(tp);
16879 if (err) {
16880 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16881 /* ... but do not return immediately ... */
16882 tg3_mdio_fini(tp);
16885 tg3_read_vpd(tp);
16886 tg3_read_fw_ver(tp);
16888 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16889 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16890 } else {
16891 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16892 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16893 else
16894 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16897 /* 5700 {AX,BX} chips have a broken status block link
16898 * change bit implementation, so we must use the
16899 * status register in those cases.
16901 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16902 tg3_flag_set(tp, USE_LINKCHG_REG);
16903 else
16904 tg3_flag_clear(tp, USE_LINKCHG_REG);
16906 /* The led_ctrl is set during tg3_phy_probe, here we might
16907 * have to force the link status polling mechanism based
16908 * upon subsystem IDs.
16910 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16911 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16912 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16913 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16914 tg3_flag_set(tp, USE_LINKCHG_REG);
16917 /* For all SERDES we poll the MAC status register. */
16918 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16919 tg3_flag_set(tp, POLL_SERDES);
16920 else
16921 tg3_flag_clear(tp, POLL_SERDES);
16923 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16924 tg3_flag_set(tp, POLL_CPMU_LINK);
16926 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16927 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16928 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16929 tg3_flag(tp, PCIX_MODE)) {
16930 tp->rx_offset = NET_SKB_PAD;
16931 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16932 tp->rx_copy_thresh = ~(u16)0;
16933 #endif
16936 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16937 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16938 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16940 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16942 /* Increment the rx prod index on the rx std ring by at most
16943 * 8 for these chips to workaround hw errata.
16945 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16946 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16947 tg3_asic_rev(tp) == ASIC_REV_5755)
16948 tp->rx_std_max_post = 8;
16950 if (tg3_flag(tp, ASPM_WORKAROUND))
16951 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16952 PCIE_PWR_MGMT_L1_THRESH_MSK;
16954 return err;
16957 #ifdef CONFIG_SPARC
16958 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16960 struct net_device *dev = tp->dev;
16961 struct pci_dev *pdev = tp->pdev;
16962 struct device_node *dp = pci_device_to_OF_node(pdev);
16963 const unsigned char *addr;
16964 int len;
16966 addr = of_get_property(dp, "local-mac-address", &len);
16967 if (addr && len == ETH_ALEN) {
16968 memcpy(dev->dev_addr, addr, ETH_ALEN);
16969 return 0;
16971 return -ENODEV;
16974 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16976 struct net_device *dev = tp->dev;
16978 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16979 return 0;
16981 #endif
16983 static int tg3_get_device_address(struct tg3 *tp)
16985 struct net_device *dev = tp->dev;
16986 u32 hi, lo, mac_offset;
16987 int addr_ok = 0;
16988 int err;
16990 #ifdef CONFIG_SPARC
16991 if (!tg3_get_macaddr_sparc(tp))
16992 return 0;
16993 #endif
16995 if (tg3_flag(tp, IS_SSB_CORE)) {
16996 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16997 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16998 return 0;
17001 mac_offset = 0x7c;
17002 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17003 tg3_flag(tp, 5780_CLASS)) {
17004 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17005 mac_offset = 0xcc;
17006 if (tg3_nvram_lock(tp))
17007 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17008 else
17009 tg3_nvram_unlock(tp);
17010 } else if (tg3_flag(tp, 5717_PLUS)) {
17011 if (tp->pci_fn & 1)
17012 mac_offset = 0xcc;
17013 if (tp->pci_fn > 1)
17014 mac_offset += 0x18c;
17015 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17016 mac_offset = 0x10;
17018 /* First try to get it from MAC address mailbox. */
17019 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17020 if ((hi >> 16) == 0x484b) {
17021 dev->dev_addr[0] = (hi >> 8) & 0xff;
17022 dev->dev_addr[1] = (hi >> 0) & 0xff;
17024 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17025 dev->dev_addr[2] = (lo >> 24) & 0xff;
17026 dev->dev_addr[3] = (lo >> 16) & 0xff;
17027 dev->dev_addr[4] = (lo >> 8) & 0xff;
17028 dev->dev_addr[5] = (lo >> 0) & 0xff;
17030 /* Some old bootcode may report a 0 MAC address in SRAM */
17031 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17033 if (!addr_ok) {
17034 /* Next, try NVRAM. */
17035 if (!tg3_flag(tp, NO_NVRAM) &&
17036 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17037 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17038 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17039 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17041 /* Finally just fetch it out of the MAC control regs. */
17042 else {
17043 hi = tr32(MAC_ADDR_0_HIGH);
17044 lo = tr32(MAC_ADDR_0_LOW);
17046 dev->dev_addr[5] = lo & 0xff;
17047 dev->dev_addr[4] = (lo >> 8) & 0xff;
17048 dev->dev_addr[3] = (lo >> 16) & 0xff;
17049 dev->dev_addr[2] = (lo >> 24) & 0xff;
17050 dev->dev_addr[1] = hi & 0xff;
17051 dev->dev_addr[0] = (hi >> 8) & 0xff;
17055 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17056 #ifdef CONFIG_SPARC
17057 if (!tg3_get_default_macaddr_sparc(tp))
17058 return 0;
17059 #endif
17060 return -EINVAL;
17062 return 0;
17065 #define BOUNDARY_SINGLE_CACHELINE 1
17066 #define BOUNDARY_MULTI_CACHELINE 2
17068 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17070 int cacheline_size;
17071 u8 byte;
17072 int goal;
17074 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17075 if (byte == 0)
17076 cacheline_size = 1024;
17077 else
17078 cacheline_size = (int) byte * 4;
17080 /* On 5703 and later chips, the boundary bits have no
17081 * effect.
17083 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17084 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17085 !tg3_flag(tp, PCI_EXPRESS))
17086 goto out;
17088 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17089 goal = BOUNDARY_MULTI_CACHELINE;
17090 #else
17091 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17092 goal = BOUNDARY_SINGLE_CACHELINE;
17093 #else
17094 goal = 0;
17095 #endif
17096 #endif
17098 if (tg3_flag(tp, 57765_PLUS)) {
17099 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17100 goto out;
17103 if (!goal)
17104 goto out;
17106 /* PCI controllers on most RISC systems tend to disconnect
17107 * when a device tries to burst across a cache-line boundary.
17108 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17110 * Unfortunately, for PCI-E there are only limited
17111 * write-side controls for this, and thus for reads
17112 * we will still get the disconnects. We'll also waste
17113 * these PCI cycles for both read and write for chips
17114 * other than 5700 and 5701 which do not implement the
17115 * boundary bits.
17117 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17118 switch (cacheline_size) {
17119 case 16:
17120 case 32:
17121 case 64:
17122 case 128:
17123 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17124 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17125 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17126 } else {
17127 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17128 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17130 break;
17132 case 256:
17133 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17134 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17135 break;
17137 default:
17138 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17139 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17140 break;
17142 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17143 switch (cacheline_size) {
17144 case 16:
17145 case 32:
17146 case 64:
17147 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17148 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17149 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17150 break;
17152 /* fallthrough */
17153 case 128:
17154 default:
17155 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17156 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17157 break;
17159 } else {
17160 switch (cacheline_size) {
17161 case 16:
17162 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17164 DMA_RWCTRL_WRITE_BNDRY_16);
17165 break;
17167 /* fallthrough */
17168 case 32:
17169 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17170 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17171 DMA_RWCTRL_WRITE_BNDRY_32);
17172 break;
17174 /* fallthrough */
17175 case 64:
17176 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17178 DMA_RWCTRL_WRITE_BNDRY_64);
17179 break;
17181 /* fallthrough */
17182 case 128:
17183 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17184 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17185 DMA_RWCTRL_WRITE_BNDRY_128);
17186 break;
17188 /* fallthrough */
17189 case 256:
17190 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17191 DMA_RWCTRL_WRITE_BNDRY_256);
17192 break;
17193 case 512:
17194 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17195 DMA_RWCTRL_WRITE_BNDRY_512);
17196 break;
17197 case 1024:
17198 default:
17199 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17200 DMA_RWCTRL_WRITE_BNDRY_1024);
17201 break;
17205 out:
17206 return val;
17209 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17210 int size, bool to_device)
17212 struct tg3_internal_buffer_desc test_desc;
17213 u32 sram_dma_descs;
17214 int i, ret;
17216 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17218 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17219 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17220 tw32(RDMAC_STATUS, 0);
17221 tw32(WDMAC_STATUS, 0);
17223 tw32(BUFMGR_MODE, 0);
17224 tw32(FTQ_RESET, 0);
17226 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17227 test_desc.addr_lo = buf_dma & 0xffffffff;
17228 test_desc.nic_mbuf = 0x00002100;
17229 test_desc.len = size;
17232 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17233 * the *second* time the tg3 driver was getting loaded after an
17234 * initial scan.
17236 * Broadcom tells me:
17237 * ...the DMA engine is connected to the GRC block and a DMA
17238 * reset may affect the GRC block in some unpredictable way...
17239 * The behavior of resets to individual blocks has not been tested.
17241 * Broadcom noted the GRC reset will also reset all sub-components.
17243 if (to_device) {
17244 test_desc.cqid_sqid = (13 << 8) | 2;
17246 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17247 udelay(40);
17248 } else {
17249 test_desc.cqid_sqid = (16 << 8) | 7;
17251 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17252 udelay(40);
17254 test_desc.flags = 0x00000005;
17256 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17257 u32 val;
17259 val = *(((u32 *)&test_desc) + i);
17260 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17261 sram_dma_descs + (i * sizeof(u32)));
17262 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17264 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17266 if (to_device)
17267 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17268 else
17269 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17271 ret = -ENODEV;
17272 for (i = 0; i < 40; i++) {
17273 u32 val;
17275 if (to_device)
17276 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17277 else
17278 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17279 if ((val & 0xffff) == sram_dma_descs) {
17280 ret = 0;
17281 break;
17284 udelay(100);
17287 return ret;
17290 #define TEST_BUFFER_SIZE 0x2000
17292 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17293 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17294 { },
17297 static int tg3_test_dma(struct tg3 *tp)
17299 dma_addr_t buf_dma;
17300 u32 *buf, saved_dma_rwctrl;
17301 int ret = 0;
17303 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17304 &buf_dma, GFP_KERNEL);
17305 if (!buf) {
17306 ret = -ENOMEM;
17307 goto out_nofree;
17310 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17311 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17313 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17315 if (tg3_flag(tp, 57765_PLUS))
17316 goto out;
17318 if (tg3_flag(tp, PCI_EXPRESS)) {
17319 /* DMA read watermark not used on PCIE */
17320 tp->dma_rwctrl |= 0x00180000;
17321 } else if (!tg3_flag(tp, PCIX_MODE)) {
17322 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17323 tg3_asic_rev(tp) == ASIC_REV_5750)
17324 tp->dma_rwctrl |= 0x003f0000;
17325 else
17326 tp->dma_rwctrl |= 0x003f000f;
17327 } else {
17328 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17329 tg3_asic_rev(tp) == ASIC_REV_5704) {
17330 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17331 u32 read_water = 0x7;
17333 /* If the 5704 is behind the EPB bridge, we can
17334 * do the less restrictive ONE_DMA workaround for
17335 * better performance.
17337 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17338 tg3_asic_rev(tp) == ASIC_REV_5704)
17339 tp->dma_rwctrl |= 0x8000;
17340 else if (ccval == 0x6 || ccval == 0x7)
17341 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17343 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17344 read_water = 4;
17345 /* Set bit 23 to enable PCIX hw bug fix */
17346 tp->dma_rwctrl |=
17347 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17348 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17349 (1 << 23);
17350 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17351 /* 5780 always in PCIX mode */
17352 tp->dma_rwctrl |= 0x00144000;
17353 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17354 /* 5714 always in PCIX mode */
17355 tp->dma_rwctrl |= 0x00148000;
17356 } else {
17357 tp->dma_rwctrl |= 0x001b000f;
17360 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17361 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17363 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17364 tg3_asic_rev(tp) == ASIC_REV_5704)
17365 tp->dma_rwctrl &= 0xfffffff0;
17367 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17368 tg3_asic_rev(tp) == ASIC_REV_5701) {
17369 /* Remove this if it causes problems for some boards. */
17370 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17372 /* On 5700/5701 chips, we need to set this bit.
17373 * Otherwise the chip will issue cacheline transactions
17374 * to streamable DMA memory with not all the byte
17375 * enables turned on. This is an error on several
17376 * RISC PCI controllers, in particular sparc64.
17378 * On 5703/5704 chips, this bit has been reassigned
17379 * a different meaning. In particular, it is used
17380 * on those chips to enable a PCI-X workaround.
17382 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17385 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17388 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17389 tg3_asic_rev(tp) != ASIC_REV_5701)
17390 goto out;
17392 /* It is best to perform DMA test with maximum write burst size
17393 * to expose the 5700/5701 write DMA bug.
17395 saved_dma_rwctrl = tp->dma_rwctrl;
17396 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17397 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17399 while (1) {
17400 u32 *p = buf, i;
17402 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17403 p[i] = i;
17405 /* Send the buffer to the chip. */
17406 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17407 if (ret) {
17408 dev_err(&tp->pdev->dev,
17409 "%s: Buffer write failed. err = %d\n",
17410 __func__, ret);
17411 break;
17414 /* Now read it back. */
17415 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17416 if (ret) {
17417 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17418 "err = %d\n", __func__, ret);
17419 break;
17422 /* Verify it. */
17423 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17424 if (p[i] == i)
17425 continue;
17427 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17428 DMA_RWCTRL_WRITE_BNDRY_16) {
17429 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17430 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17431 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17432 break;
17433 } else {
17434 dev_err(&tp->pdev->dev,
17435 "%s: Buffer corrupted on read back! "
17436 "(%d != %d)\n", __func__, p[i], i);
17437 ret = -ENODEV;
17438 goto out;
17442 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17443 /* Success. */
17444 ret = 0;
17445 break;
17448 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17449 DMA_RWCTRL_WRITE_BNDRY_16) {
17450 /* DMA test passed without adjusting DMA boundary,
17451 * now look for chipsets that are known to expose the
17452 * DMA bug without failing the test.
17454 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17455 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17456 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17457 } else {
17458 /* Safe to use the calculated DMA boundary. */
17459 tp->dma_rwctrl = saved_dma_rwctrl;
17462 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17465 out:
17466 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17467 out_nofree:
17468 return ret;
17471 static void tg3_init_bufmgr_config(struct tg3 *tp)
17473 if (tg3_flag(tp, 57765_PLUS)) {
17474 tp->bufmgr_config.mbuf_read_dma_low_water =
17475 DEFAULT_MB_RDMA_LOW_WATER_5705;
17476 tp->bufmgr_config.mbuf_mac_rx_low_water =
17477 DEFAULT_MB_MACRX_LOW_WATER_57765;
17478 tp->bufmgr_config.mbuf_high_water =
17479 DEFAULT_MB_HIGH_WATER_57765;
17481 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17482 DEFAULT_MB_RDMA_LOW_WATER_5705;
17483 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17484 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17485 tp->bufmgr_config.mbuf_high_water_jumbo =
17486 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17487 } else if (tg3_flag(tp, 5705_PLUS)) {
17488 tp->bufmgr_config.mbuf_read_dma_low_water =
17489 DEFAULT_MB_RDMA_LOW_WATER_5705;
17490 tp->bufmgr_config.mbuf_mac_rx_low_water =
17491 DEFAULT_MB_MACRX_LOW_WATER_5705;
17492 tp->bufmgr_config.mbuf_high_water =
17493 DEFAULT_MB_HIGH_WATER_5705;
17494 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17495 tp->bufmgr_config.mbuf_mac_rx_low_water =
17496 DEFAULT_MB_MACRX_LOW_WATER_5906;
17497 tp->bufmgr_config.mbuf_high_water =
17498 DEFAULT_MB_HIGH_WATER_5906;
17501 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17502 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17503 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17504 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17505 tp->bufmgr_config.mbuf_high_water_jumbo =
17506 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17507 } else {
17508 tp->bufmgr_config.mbuf_read_dma_low_water =
17509 DEFAULT_MB_RDMA_LOW_WATER;
17510 tp->bufmgr_config.mbuf_mac_rx_low_water =
17511 DEFAULT_MB_MACRX_LOW_WATER;
17512 tp->bufmgr_config.mbuf_high_water =
17513 DEFAULT_MB_HIGH_WATER;
17515 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17516 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17517 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17518 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17519 tp->bufmgr_config.mbuf_high_water_jumbo =
17520 DEFAULT_MB_HIGH_WATER_JUMBO;
17523 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17524 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17527 static char *tg3_phy_string(struct tg3 *tp)
17529 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17530 case TG3_PHY_ID_BCM5400: return "5400";
17531 case TG3_PHY_ID_BCM5401: return "5401";
17532 case TG3_PHY_ID_BCM5411: return "5411";
17533 case TG3_PHY_ID_BCM5701: return "5701";
17534 case TG3_PHY_ID_BCM5703: return "5703";
17535 case TG3_PHY_ID_BCM5704: return "5704";
17536 case TG3_PHY_ID_BCM5705: return "5705";
17537 case TG3_PHY_ID_BCM5750: return "5750";
17538 case TG3_PHY_ID_BCM5752: return "5752";
17539 case TG3_PHY_ID_BCM5714: return "5714";
17540 case TG3_PHY_ID_BCM5780: return "5780";
17541 case TG3_PHY_ID_BCM5755: return "5755";
17542 case TG3_PHY_ID_BCM5787: return "5787";
17543 case TG3_PHY_ID_BCM5784: return "5784";
17544 case TG3_PHY_ID_BCM5756: return "5722/5756";
17545 case TG3_PHY_ID_BCM5906: return "5906";
17546 case TG3_PHY_ID_BCM5761: return "5761";
17547 case TG3_PHY_ID_BCM5718C: return "5718C";
17548 case TG3_PHY_ID_BCM5718S: return "5718S";
17549 case TG3_PHY_ID_BCM57765: return "57765";
17550 case TG3_PHY_ID_BCM5719C: return "5719C";
17551 case TG3_PHY_ID_BCM5720C: return "5720C";
17552 case TG3_PHY_ID_BCM5762: return "5762C";
17553 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17554 case 0: return "serdes";
17555 default: return "unknown";
17559 static char *tg3_bus_string(struct tg3 *tp, char *str)
17561 if (tg3_flag(tp, PCI_EXPRESS)) {
17562 strcpy(str, "PCI Express");
17563 return str;
17564 } else if (tg3_flag(tp, PCIX_MODE)) {
17565 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17567 strcpy(str, "PCIX:");
17569 if ((clock_ctrl == 7) ||
17570 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17571 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17572 strcat(str, "133MHz");
17573 else if (clock_ctrl == 0)
17574 strcat(str, "33MHz");
17575 else if (clock_ctrl == 2)
17576 strcat(str, "50MHz");
17577 else if (clock_ctrl == 4)
17578 strcat(str, "66MHz");
17579 else if (clock_ctrl == 6)
17580 strcat(str, "100MHz");
17581 } else {
17582 strcpy(str, "PCI:");
17583 if (tg3_flag(tp, PCI_HIGH_SPEED))
17584 strcat(str, "66MHz");
17585 else
17586 strcat(str, "33MHz");
17588 if (tg3_flag(tp, PCI_32BIT))
17589 strcat(str, ":32-bit");
17590 else
17591 strcat(str, ":64-bit");
17592 return str;
17595 static void tg3_init_coal(struct tg3 *tp)
17597 struct ethtool_coalesce *ec = &tp->coal;
17599 memset(ec, 0, sizeof(*ec));
17600 ec->cmd = ETHTOOL_GCOALESCE;
17601 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17602 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17603 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17604 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17605 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17606 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17607 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17608 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17609 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17611 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17612 HOSTCC_MODE_CLRTICK_TXBD)) {
17613 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17614 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17615 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17616 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17619 if (tg3_flag(tp, 5705_PLUS)) {
17620 ec->rx_coalesce_usecs_irq = 0;
17621 ec->tx_coalesce_usecs_irq = 0;
17622 ec->stats_block_coalesce_usecs = 0;
17626 static int tg3_init_one(struct pci_dev *pdev,
17627 const struct pci_device_id *ent)
17629 struct net_device *dev;
17630 struct tg3 *tp;
17631 int i, err;
17632 u32 sndmbx, rcvmbx, intmbx;
17633 char str[40];
17634 u64 dma_mask, persist_dma_mask;
17635 netdev_features_t features = 0;
17637 printk_once(KERN_INFO "%s\n", version);
17639 err = pci_enable_device(pdev);
17640 if (err) {
17641 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17642 return err;
17645 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17646 if (err) {
17647 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17648 goto err_out_disable_pdev;
17651 pci_set_master(pdev);
17653 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17654 if (!dev) {
17655 err = -ENOMEM;
17656 goto err_out_free_res;
17659 SET_NETDEV_DEV(dev, &pdev->dev);
17661 tp = netdev_priv(dev);
17662 tp->pdev = pdev;
17663 tp->dev = dev;
17664 tp->rx_mode = TG3_DEF_RX_MODE;
17665 tp->tx_mode = TG3_DEF_TX_MODE;
17666 tp->irq_sync = 1;
17667 tp->pcierr_recovery = false;
17669 if (tg3_debug > 0)
17670 tp->msg_enable = tg3_debug;
17671 else
17672 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17674 if (pdev_is_ssb_gige_core(pdev)) {
17675 tg3_flag_set(tp, IS_SSB_CORE);
17676 if (ssb_gige_must_flush_posted_writes(pdev))
17677 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17678 if (ssb_gige_one_dma_at_once(pdev))
17679 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17680 if (ssb_gige_have_roboswitch(pdev)) {
17681 tg3_flag_set(tp, USE_PHYLIB);
17682 tg3_flag_set(tp, ROBOSWITCH);
17684 if (ssb_gige_is_rgmii(pdev))
17685 tg3_flag_set(tp, RGMII_MODE);
17688 /* The word/byte swap controls here control register access byte
17689 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17690 * setting below.
17692 tp->misc_host_ctrl =
17693 MISC_HOST_CTRL_MASK_PCI_INT |
17694 MISC_HOST_CTRL_WORD_SWAP |
17695 MISC_HOST_CTRL_INDIR_ACCESS |
17696 MISC_HOST_CTRL_PCISTATE_RW;
17698 /* The NONFRM (non-frame) byte/word swap controls take effect
17699 * on descriptor entries, anything which isn't packet data.
17701 * The StrongARM chips on the board (one for tx, one for rx)
17702 * are running in big-endian mode.
17704 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17705 GRC_MODE_WSWAP_NONFRM_DATA);
17706 #ifdef __BIG_ENDIAN
17707 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17708 #endif
17709 spin_lock_init(&tp->lock);
17710 spin_lock_init(&tp->indirect_lock);
17711 INIT_WORK(&tp->reset_task, tg3_reset_task);
17713 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17714 if (!tp->regs) {
17715 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17716 err = -ENOMEM;
17717 goto err_out_free_dev;
17720 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17721 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17729 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17730 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17731 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17732 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17733 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17734 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17735 tg3_flag_set(tp, ENABLE_APE);
17736 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17737 if (!tp->aperegs) {
17738 dev_err(&pdev->dev,
17739 "Cannot map APE registers, aborting\n");
17740 err = -ENOMEM;
17741 goto err_out_iounmap;
17745 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17746 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17748 dev->ethtool_ops = &tg3_ethtool_ops;
17749 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17750 dev->netdev_ops = &tg3_netdev_ops;
17751 dev->irq = pdev->irq;
17753 err = tg3_get_invariants(tp, ent);
17754 if (err) {
17755 dev_err(&pdev->dev,
17756 "Problem fetching invariants of chip, aborting\n");
17757 goto err_out_apeunmap;
17760 /* The EPB bridge inside 5714, 5715, and 5780 and any
17761 * device behind the EPB cannot support DMA addresses > 40-bit.
17762 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17763 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17764 * do DMA address check in tg3_start_xmit().
17766 if (tg3_flag(tp, IS_5788))
17767 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17768 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17769 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17770 #ifdef CONFIG_HIGHMEM
17771 dma_mask = DMA_BIT_MASK(64);
17772 #endif
17773 } else
17774 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17776 /* Configure DMA attributes. */
17777 if (dma_mask > DMA_BIT_MASK(32)) {
17778 err = pci_set_dma_mask(pdev, dma_mask);
17779 if (!err) {
17780 features |= NETIF_F_HIGHDMA;
17781 err = pci_set_consistent_dma_mask(pdev,
17782 persist_dma_mask);
17783 if (err < 0) {
17784 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17785 "DMA for consistent allocations\n");
17786 goto err_out_apeunmap;
17790 if (err || dma_mask == DMA_BIT_MASK(32)) {
17791 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17792 if (err) {
17793 dev_err(&pdev->dev,
17794 "No usable DMA configuration, aborting\n");
17795 goto err_out_apeunmap;
17799 tg3_init_bufmgr_config(tp);
17801 /* 5700 B0 chips do not support checksumming correctly due
17802 * to hardware bugs.
17804 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17805 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17807 if (tg3_flag(tp, 5755_PLUS))
17808 features |= NETIF_F_IPV6_CSUM;
17811 /* TSO is on by default on chips that support hardware TSO.
17812 * Firmware TSO on older chips gives lower performance, so it
17813 * is off by default, but can be enabled using ethtool.
17815 if ((tg3_flag(tp, HW_TSO_1) ||
17816 tg3_flag(tp, HW_TSO_2) ||
17817 tg3_flag(tp, HW_TSO_3)) &&
17818 (features & NETIF_F_IP_CSUM))
17819 features |= NETIF_F_TSO;
17820 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17821 if (features & NETIF_F_IPV6_CSUM)
17822 features |= NETIF_F_TSO6;
17823 if (tg3_flag(tp, HW_TSO_3) ||
17824 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17825 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17826 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17827 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17828 tg3_asic_rev(tp) == ASIC_REV_57780)
17829 features |= NETIF_F_TSO_ECN;
17832 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17833 NETIF_F_HW_VLAN_CTAG_RX;
17834 dev->vlan_features |= features;
17837 * Add loopback capability only for a subset of devices that support
17838 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17839 * loopback for the remaining devices.
17841 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17842 !tg3_flag(tp, CPMU_PRESENT))
17843 /* Add the loopback capability */
17844 features |= NETIF_F_LOOPBACK;
17846 dev->hw_features |= features;
17847 dev->priv_flags |= IFF_UNICAST_FLT;
17849 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17850 dev->min_mtu = TG3_MIN_MTU;
17851 dev->max_mtu = TG3_MAX_MTU(tp);
17853 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17854 !tg3_flag(tp, TSO_CAPABLE) &&
17855 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17856 tg3_flag_set(tp, MAX_RXPEND_64);
17857 tp->rx_pending = 63;
17860 err = tg3_get_device_address(tp);
17861 if (err) {
17862 dev_err(&pdev->dev,
17863 "Could not obtain valid ethernet address, aborting\n");
17864 goto err_out_apeunmap;
17867 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17868 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17869 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17870 for (i = 0; i < tp->irq_max; i++) {
17871 struct tg3_napi *tnapi = &tp->napi[i];
17873 tnapi->tp = tp;
17874 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17876 tnapi->int_mbox = intmbx;
17877 if (i <= 4)
17878 intmbx += 0x8;
17879 else
17880 intmbx += 0x4;
17882 tnapi->consmbox = rcvmbx;
17883 tnapi->prodmbox = sndmbx;
17885 if (i)
17886 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17887 else
17888 tnapi->coal_now = HOSTCC_MODE_NOW;
17890 if (!tg3_flag(tp, SUPPORT_MSIX))
17891 break;
17894 * If we support MSIX, we'll be using RSS. If we're using
17895 * RSS, the first vector only handles link interrupts and the
17896 * remaining vectors handle rx and tx interrupts. Reuse the
17897 * mailbox values for the next iteration. The values we setup
17898 * above are still useful for the single vectored mode.
17900 if (!i)
17901 continue;
17903 rcvmbx += 0x8;
17905 if (sndmbx & 0x4)
17906 sndmbx -= 0x4;
17907 else
17908 sndmbx += 0xc;
17912 * Reset chip in case UNDI or EFI driver did not shutdown
17913 * DMA self test will enable WDMAC and we'll see (spurious)
17914 * pending DMA on the PCI bus at that point.
17916 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17917 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17918 tg3_full_lock(tp, 0);
17919 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17920 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17921 tg3_full_unlock(tp);
17924 err = tg3_test_dma(tp);
17925 if (err) {
17926 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17927 goto err_out_apeunmap;
17930 tg3_init_coal(tp);
17932 pci_set_drvdata(pdev, dev);
17934 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17935 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17936 tg3_asic_rev(tp) == ASIC_REV_5762)
17937 tg3_flag_set(tp, PTP_CAPABLE);
17939 tg3_timer_init(tp);
17941 tg3_carrier_off(tp);
17943 err = register_netdev(dev);
17944 if (err) {
17945 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17946 goto err_out_apeunmap;
17949 if (tg3_flag(tp, PTP_CAPABLE)) {
17950 tg3_ptp_init(tp);
17951 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17952 &tp->pdev->dev);
17953 if (IS_ERR(tp->ptp_clock))
17954 tp->ptp_clock = NULL;
17957 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17958 tp->board_part_number,
17959 tg3_chip_rev_id(tp),
17960 tg3_bus_string(tp, str),
17961 dev->dev_addr);
17963 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17964 char *ethtype;
17966 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17967 ethtype = "10/100Base-TX";
17968 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17969 ethtype = "1000Base-SX";
17970 else
17971 ethtype = "10/100/1000Base-T";
17973 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17974 "(WireSpeed[%d], EEE[%d])\n",
17975 tg3_phy_string(tp), ethtype,
17976 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17977 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17980 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17981 (dev->features & NETIF_F_RXCSUM) != 0,
17982 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17983 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17984 tg3_flag(tp, ENABLE_ASF) != 0,
17985 tg3_flag(tp, TSO_CAPABLE) != 0);
17986 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17987 tp->dma_rwctrl,
17988 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17989 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17991 pci_save_state(pdev);
17993 return 0;
17995 err_out_apeunmap:
17996 if (tp->aperegs) {
17997 iounmap(tp->aperegs);
17998 tp->aperegs = NULL;
18001 err_out_iounmap:
18002 if (tp->regs) {
18003 iounmap(tp->regs);
18004 tp->regs = NULL;
18007 err_out_free_dev:
18008 free_netdev(dev);
18010 err_out_free_res:
18011 pci_release_regions(pdev);
18013 err_out_disable_pdev:
18014 if (pci_is_enabled(pdev))
18015 pci_disable_device(pdev);
18016 return err;
18019 static void tg3_remove_one(struct pci_dev *pdev)
18021 struct net_device *dev = pci_get_drvdata(pdev);
18023 if (dev) {
18024 struct tg3 *tp = netdev_priv(dev);
18026 tg3_ptp_fini(tp);
18028 release_firmware(tp->fw);
18030 tg3_reset_task_cancel(tp);
18032 if (tg3_flag(tp, USE_PHYLIB)) {
18033 tg3_phy_fini(tp);
18034 tg3_mdio_fini(tp);
18037 unregister_netdev(dev);
18038 if (tp->aperegs) {
18039 iounmap(tp->aperegs);
18040 tp->aperegs = NULL;
18042 if (tp->regs) {
18043 iounmap(tp->regs);
18044 tp->regs = NULL;
18046 free_netdev(dev);
18047 pci_release_regions(pdev);
18048 pci_disable_device(pdev);
18052 #ifdef CONFIG_PM_SLEEP
18053 static int tg3_suspend(struct device *device)
18055 struct pci_dev *pdev = to_pci_dev(device);
18056 struct net_device *dev = pci_get_drvdata(pdev);
18057 struct tg3 *tp = netdev_priv(dev);
18058 int err = 0;
18060 rtnl_lock();
18062 if (!netif_running(dev))
18063 goto unlock;
18065 tg3_reset_task_cancel(tp);
18066 tg3_phy_stop(tp);
18067 tg3_netif_stop(tp);
18069 tg3_timer_stop(tp);
18071 tg3_full_lock(tp, 1);
18072 tg3_disable_ints(tp);
18073 tg3_full_unlock(tp);
18075 netif_device_detach(dev);
18077 tg3_full_lock(tp, 0);
18078 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18079 tg3_flag_clear(tp, INIT_COMPLETE);
18080 tg3_full_unlock(tp);
18082 err = tg3_power_down_prepare(tp);
18083 if (err) {
18084 int err2;
18086 tg3_full_lock(tp, 0);
18088 tg3_flag_set(tp, INIT_COMPLETE);
18089 err2 = tg3_restart_hw(tp, true);
18090 if (err2)
18091 goto out;
18093 tg3_timer_start(tp);
18095 netif_device_attach(dev);
18096 tg3_netif_start(tp);
18098 out:
18099 tg3_full_unlock(tp);
18101 if (!err2)
18102 tg3_phy_start(tp);
18105 unlock:
18106 rtnl_unlock();
18107 return err;
18110 static int tg3_resume(struct device *device)
18112 struct pci_dev *pdev = to_pci_dev(device);
18113 struct net_device *dev = pci_get_drvdata(pdev);
18114 struct tg3 *tp = netdev_priv(dev);
18115 int err = 0;
18117 rtnl_lock();
18119 if (!netif_running(dev))
18120 goto unlock;
18122 netif_device_attach(dev);
18124 tg3_full_lock(tp, 0);
18126 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18128 tg3_flag_set(tp, INIT_COMPLETE);
18129 err = tg3_restart_hw(tp,
18130 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18131 if (err)
18132 goto out;
18134 tg3_timer_start(tp);
18136 tg3_netif_start(tp);
18138 out:
18139 tg3_full_unlock(tp);
18141 if (!err)
18142 tg3_phy_start(tp);
18144 unlock:
18145 rtnl_unlock();
18146 return err;
18148 #endif /* CONFIG_PM_SLEEP */
18150 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18152 static void tg3_shutdown(struct pci_dev *pdev)
18154 struct net_device *dev = pci_get_drvdata(pdev);
18155 struct tg3 *tp = netdev_priv(dev);
18157 rtnl_lock();
18158 netif_device_detach(dev);
18160 if (netif_running(dev))
18161 dev_close(dev);
18163 if (system_state == SYSTEM_POWER_OFF)
18164 tg3_power_down(tp);
18166 rtnl_unlock();
18170 * tg3_io_error_detected - called when PCI error is detected
18171 * @pdev: Pointer to PCI device
18172 * @state: The current pci connection state
18174 * This function is called after a PCI bus error affecting
18175 * this device has been detected.
18177 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18178 pci_channel_state_t state)
18180 struct net_device *netdev = pci_get_drvdata(pdev);
18181 struct tg3 *tp = netdev_priv(netdev);
18182 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18184 netdev_info(netdev, "PCI I/O error detected\n");
18186 rtnl_lock();
18188 /* We probably don't have netdev yet */
18189 if (!netdev || !netif_running(netdev))
18190 goto done;
18192 /* We needn't recover from permanent error */
18193 if (state == pci_channel_io_frozen)
18194 tp->pcierr_recovery = true;
18196 tg3_phy_stop(tp);
18198 tg3_netif_stop(tp);
18200 tg3_timer_stop(tp);
18202 /* Want to make sure that the reset task doesn't run */
18203 tg3_reset_task_cancel(tp);
18205 netif_device_detach(netdev);
18207 /* Clean up software state, even if MMIO is blocked */
18208 tg3_full_lock(tp, 0);
18209 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18210 tg3_full_unlock(tp);
18212 done:
18213 if (state == pci_channel_io_perm_failure) {
18214 if (netdev) {
18215 tg3_napi_enable(tp);
18216 dev_close(netdev);
18218 err = PCI_ERS_RESULT_DISCONNECT;
18219 } else {
18220 pci_disable_device(pdev);
18223 rtnl_unlock();
18225 return err;
18229 * tg3_io_slot_reset - called after the pci bus has been reset.
18230 * @pdev: Pointer to PCI device
18232 * Restart the card from scratch, as if from a cold-boot.
18233 * At this point, the card has exprienced a hard reset,
18234 * followed by fixups by BIOS, and has its config space
18235 * set up identically to what it was at cold boot.
18237 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18239 struct net_device *netdev = pci_get_drvdata(pdev);
18240 struct tg3 *tp = netdev_priv(netdev);
18241 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18242 int err;
18244 rtnl_lock();
18246 if (pci_enable_device(pdev)) {
18247 dev_err(&pdev->dev,
18248 "Cannot re-enable PCI device after reset.\n");
18249 goto done;
18252 pci_set_master(pdev);
18253 pci_restore_state(pdev);
18254 pci_save_state(pdev);
18256 if (!netdev || !netif_running(netdev)) {
18257 rc = PCI_ERS_RESULT_RECOVERED;
18258 goto done;
18261 err = tg3_power_up(tp);
18262 if (err)
18263 goto done;
18265 rc = PCI_ERS_RESULT_RECOVERED;
18267 done:
18268 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18269 tg3_napi_enable(tp);
18270 dev_close(netdev);
18272 rtnl_unlock();
18274 return rc;
18278 * tg3_io_resume - called when traffic can start flowing again.
18279 * @pdev: Pointer to PCI device
18281 * This callback is called when the error recovery driver tells
18282 * us that its OK to resume normal operation.
18284 static void tg3_io_resume(struct pci_dev *pdev)
18286 struct net_device *netdev = pci_get_drvdata(pdev);
18287 struct tg3 *tp = netdev_priv(netdev);
18288 int err;
18290 rtnl_lock();
18292 if (!netdev || !netif_running(netdev))
18293 goto done;
18295 tg3_full_lock(tp, 0);
18296 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18297 tg3_flag_set(tp, INIT_COMPLETE);
18298 err = tg3_restart_hw(tp, true);
18299 if (err) {
18300 tg3_full_unlock(tp);
18301 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18302 goto done;
18305 netif_device_attach(netdev);
18307 tg3_timer_start(tp);
18309 tg3_netif_start(tp);
18311 tg3_full_unlock(tp);
18313 tg3_phy_start(tp);
18315 done:
18316 tp->pcierr_recovery = false;
18317 rtnl_unlock();
18320 static const struct pci_error_handlers tg3_err_handler = {
18321 .error_detected = tg3_io_error_detected,
18322 .slot_reset = tg3_io_slot_reset,
18323 .resume = tg3_io_resume
18326 static struct pci_driver tg3_driver = {
18327 .name = DRV_MODULE_NAME,
18328 .id_table = tg3_pci_tbl,
18329 .probe = tg3_init_one,
18330 .remove = tg3_remove_one,
18331 .err_handler = &tg3_err_handler,
18332 .driver.pm = &tg3_pm_ops,
18333 .shutdown = tg3_shutdown,
18336 module_pci_driver(tg3_driver);