2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
78 return test_bit(flag
, bits
);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
88 clear_bit(flag
, bits
);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version
[] =
226 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION
);
232 MODULE_FIRMWARE(FIRMWARE_TG3
);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
236 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug
, int, 0);
238 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl
[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
266 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
267 TG3_DRV_DATA_FLAG_5705_10_100
},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
270 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
271 TG3_DRV_DATA_FLAG_5705_10_100
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
284 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
293 PCI_VENDOR_ID_LENOVO
,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
295 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
298 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
318 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
321 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
322 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
326 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
336 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
338 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
364 static const struct {
365 const char string
[ETH_GSTRING_LEN
];
366 } ethtool_stats_keys
[] = {
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
399 { "tx_flow_control" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string
[ETH_GSTRING_LEN
];
460 } ethtool_test_keys
[] = {
461 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
462 [TG3_LINK_TEST
] = { "link test (online) " },
463 [TG3_REGISTER_TEST
] = { "register test (offline)" },
464 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
476 writel(val
, tp
->regs
+ off
);
479 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
481 return readl(tp
->regs
+ off
);
484 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
486 writel(val
, tp
->aperegs
+ off
);
489 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
491 return readl(tp
->aperegs
+ off
);
494 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
498 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
499 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
501 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
504 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
506 writel(val
, tp
->regs
+ off
);
507 readl(tp
->regs
+ off
);
510 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
515 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
516 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
517 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
518 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
522 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
526 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
527 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
528 TG3_64BIT_REG_LOW
, val
);
531 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
532 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
533 TG3_64BIT_REG_LOW
, val
);
537 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
538 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
539 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
540 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
547 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
548 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
552 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
557 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
558 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
559 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
560 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
571 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
572 /* Non-posted methods */
573 tp
->write32(tp
, off
, val
);
576 tg3_write32(tp
, off
, val
);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
588 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
590 tp
->write32_mbox(tp
, off
, val
);
591 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
592 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
593 !tg3_flag(tp
, ICH_WORKAROUND
)))
594 tp
->read32_mbox(tp
, off
);
597 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
599 void __iomem
*mbox
= tp
->regs
+ off
;
601 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
603 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
604 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
608 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
610 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
613 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
615 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
633 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
634 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
637 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
638 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
639 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
640 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
646 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
651 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
654 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
658 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
659 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
664 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
665 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
666 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
667 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
673 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
678 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
681 static void tg3_ape_lock_init(struct tg3
*tp
)
686 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
687 regbase
= TG3_APE_LOCK_GRANT
;
689 regbase
= TG3_APE_PER_LOCK_GRANT
;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
694 case TG3_APE_LOCK_PHY0
:
695 case TG3_APE_LOCK_PHY1
:
696 case TG3_APE_LOCK_PHY2
:
697 case TG3_APE_LOCK_PHY3
:
698 bit
= APE_LOCK_GRANT_DRIVER
;
702 bit
= APE_LOCK_GRANT_DRIVER
;
704 bit
= 1 << tp
->pci_fn
;
706 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
711 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
715 u32 status
, req
, gnt
, bit
;
717 if (!tg3_flag(tp
, ENABLE_APE
))
721 case TG3_APE_LOCK_GPIO
:
722 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
725 case TG3_APE_LOCK_GRC
:
726 case TG3_APE_LOCK_MEM
:
728 bit
= APE_LOCK_REQ_DRIVER
;
730 bit
= 1 << tp
->pci_fn
;
732 case TG3_APE_LOCK_PHY0
:
733 case TG3_APE_LOCK_PHY1
:
734 case TG3_APE_LOCK_PHY2
:
735 case TG3_APE_LOCK_PHY3
:
736 bit
= APE_LOCK_REQ_DRIVER
;
742 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
743 req
= TG3_APE_LOCK_REQ
;
744 gnt
= TG3_APE_LOCK_GRANT
;
746 req
= TG3_APE_PER_LOCK_REQ
;
747 gnt
= TG3_APE_PER_LOCK_GRANT
;
752 tg3_ape_write32(tp
, req
+ off
, bit
);
754 /* Wait for up to 1 millisecond to acquire lock. */
755 for (i
= 0; i
< 100; i
++) {
756 status
= tg3_ape_read32(tp
, gnt
+ off
);
759 if (pci_channel_offline(tp
->pdev
))
766 /* Revoke the lock request. */
767 tg3_ape_write32(tp
, gnt
+ off
, bit
);
774 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case TG3_APE_LOCK_GPIO
:
783 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
786 case TG3_APE_LOCK_GRC
:
787 case TG3_APE_LOCK_MEM
:
789 bit
= APE_LOCK_GRANT_DRIVER
;
791 bit
= 1 << tp
->pci_fn
;
793 case TG3_APE_LOCK_PHY0
:
794 case TG3_APE_LOCK_PHY1
:
795 case TG3_APE_LOCK_PHY2
:
796 case TG3_APE_LOCK_PHY3
:
797 bit
= APE_LOCK_GRANT_DRIVER
;
803 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
804 gnt
= TG3_APE_LOCK_GRANT
;
806 gnt
= TG3_APE_PER_LOCK_GRANT
;
808 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
811 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
816 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
819 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
820 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
823 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
826 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
829 return timeout_us
? 0 : -EBUSY
;
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
837 for (i
= 0; i
< timeout_us
/ 10; i
++) {
838 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
840 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
846 return i
== timeout_us
/ 10;
849 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
853 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
855 if (!tg3_flag(tp
, APE_HAS_NCSI
))
858 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
859 if (apedata
!= APE_SEG_SIG_MAGIC
)
862 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
863 if (!(apedata
& APE_FW_STATUS_READY
))
866 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
868 msgoff
= bufoff
+ 2 * sizeof(u32
);
869 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
874 /* Cap xfer sizes to scratchpad limits. */
875 length
= (len
> maxlen
) ? maxlen
: len
;
878 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
879 if (!(apedata
& APE_FW_STATUS_READY
))
882 /* Wait for up to 1 msec for APE to service previous event. */
883 err
= tg3_ape_event_lock(tp
, 1000);
887 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
888 APE_EVENT_STATUS_SCRTCHPD_READ
|
889 APE_EVENT_STATUS_EVENT_PENDING
;
890 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
892 tg3_ape_write32(tp
, bufoff
, base_off
);
893 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
895 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
896 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
900 if (tg3_ape_wait_for_event(tp
, 30000))
903 for (i
= 0; length
; i
+= 4, length
-= 4) {
904 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
905 memcpy(data
, &val
, sizeof(u32
));
914 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
919 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
920 if (apedata
!= APE_SEG_SIG_MAGIC
)
923 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
924 if (!(apedata
& APE_FW_STATUS_READY
))
927 /* Wait for up to 20 millisecond for APE to service previous event. */
928 err
= tg3_ape_event_lock(tp
, 20000);
932 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
933 event
| APE_EVENT_STATUS_EVENT_PENDING
);
935 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
936 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
941 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
946 if (!tg3_flag(tp
, ENABLE_APE
))
950 case RESET_KIND_INIT
:
951 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
952 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
953 APE_HOST_SEG_SIG_MAGIC
);
954 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
955 APE_HOST_SEG_LEN_MAGIC
);
956 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
957 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
958 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
959 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
960 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
961 APE_HOST_BEHAV_NO_PHYLOCK
);
962 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
963 TG3_APE_HOST_DRVR_STATE_START
);
965 event
= APE_EVENT_STATUS_STATE_START
;
967 case RESET_KIND_SHUTDOWN
:
968 if (device_may_wakeup(&tp
->pdev
->dev
) &&
969 tg3_flag(tp
, WOL_ENABLE
)) {
970 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
971 TG3_APE_HOST_WOL_SPEED_AUTO
);
972 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
974 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
976 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
978 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
984 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
986 tg3_ape_send_event(tp
, event
);
989 static void tg3_send_ape_heartbeat(struct tg3
*tp
,
990 unsigned long interval
)
992 /* Check if hb interval has exceeded */
993 if (!tg3_flag(tp
, ENABLE_APE
) ||
994 time_before(jiffies
, tp
->ape_hb_jiffies
+ interval
))
997 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
998 tp
->ape_hb_jiffies
= jiffies
;
1001 static void tg3_disable_ints(struct tg3
*tp
)
1005 tw32(TG3PCI_MISC_HOST_CTRL
,
1006 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
1007 for (i
= 0; i
< tp
->irq_max
; i
++)
1008 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
1011 static void tg3_enable_ints(struct tg3
*tp
)
1018 tw32(TG3PCI_MISC_HOST_CTRL
,
1019 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1021 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1022 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1023 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1025 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1026 if (tg3_flag(tp
, 1SHOT_MSI
))
1027 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1029 tp
->coal_now
|= tnapi
->coal_now
;
1032 /* Force an initial interrupt */
1033 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1034 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1035 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1037 tw32(HOSTCC_MODE
, tp
->coal_now
);
1039 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1042 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1044 struct tg3
*tp
= tnapi
->tp
;
1045 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1046 unsigned int work_exists
= 0;
1048 /* check for phy events */
1049 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1050 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1054 /* check for TX work to do */
1055 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1058 /* check for RX work to do */
1059 if (tnapi
->rx_rcb_prod_idx
&&
1060 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1067 * similar to tg3_enable_ints, but it accurately determines whether there
1068 * is new work pending and can return without flushing the PIO write
1069 * which reenables interrupts
1071 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1073 struct tg3
*tp
= tnapi
->tp
;
1075 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1078 /* When doing tagged status, this work check is unnecessary.
1079 * The last_tag we write above tells the chip which piece of
1080 * work we've completed.
1082 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1083 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1084 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1087 static void tg3_switch_clocks(struct tg3
*tp
)
1090 u32 orig_clock_ctrl
;
1092 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1095 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1097 orig_clock_ctrl
= clock_ctrl
;
1098 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1099 CLOCK_CTRL_CLKRUN_OENABLE
|
1101 tp
->pci_clock_ctrl
= clock_ctrl
;
1103 if (tg3_flag(tp
, 5705_PLUS
)) {
1104 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1105 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1106 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1108 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1109 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1111 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1113 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1114 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1117 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1120 #define PHY_BUSY_LOOPS 5000
1122 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1129 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1131 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1135 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1139 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1140 MI_COM_PHY_ADDR_MASK
);
1141 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1142 MI_COM_REG_ADDR_MASK
);
1143 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1145 tw32_f(MAC_MI_COM
, frame_val
);
1147 loops
= PHY_BUSY_LOOPS
;
1148 while (loops
!= 0) {
1150 frame_val
= tr32(MAC_MI_COM
);
1152 if ((frame_val
& MI_COM_BUSY
) == 0) {
1154 frame_val
= tr32(MAC_MI_COM
);
1162 *val
= frame_val
& MI_COM_DATA_MASK
;
1166 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1167 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1171 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1176 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1178 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1181 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1188 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1189 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1192 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1194 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1198 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1200 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1201 MI_COM_PHY_ADDR_MASK
);
1202 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1203 MI_COM_REG_ADDR_MASK
);
1204 frame_val
|= (val
& MI_COM_DATA_MASK
);
1205 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1207 tw32_f(MAC_MI_COM
, frame_val
);
1209 loops
= PHY_BUSY_LOOPS
;
1210 while (loops
!= 0) {
1212 frame_val
= tr32(MAC_MI_COM
);
1213 if ((frame_val
& MI_COM_BUSY
) == 0) {
1215 frame_val
= tr32(MAC_MI_COM
);
1225 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1226 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1230 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1235 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1237 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1240 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1244 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1248 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1252 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1253 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1257 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1263 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1267 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1271 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1275 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1276 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1280 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1286 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1290 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1292 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1297 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1301 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1303 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1308 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1312 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1313 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1314 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1316 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1321 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1323 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1324 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1326 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1329 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1334 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1340 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1342 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1344 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1345 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1350 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1352 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1353 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1356 static int tg3_bmcr_reset(struct tg3
*tp
)
1361 /* OK, reset it, and poll the BMCR_RESET bit until it
1362 * clears or we time out.
1364 phy_control
= BMCR_RESET
;
1365 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1371 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1375 if ((phy_control
& BMCR_RESET
) == 0) {
1387 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1389 struct tg3
*tp
= bp
->priv
;
1392 spin_lock_bh(&tp
->lock
);
1394 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1397 spin_unlock_bh(&tp
->lock
);
1402 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1404 struct tg3
*tp
= bp
->priv
;
1407 spin_lock_bh(&tp
->lock
);
1409 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1412 spin_unlock_bh(&tp
->lock
);
1417 static void tg3_mdio_config_5785(struct tg3
*tp
)
1420 struct phy_device
*phydev
;
1422 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1423 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1424 case PHY_ID_BCM50610
:
1425 case PHY_ID_BCM50610M
:
1426 val
= MAC_PHYCFG2_50610_LED_MODES
;
1428 case PHY_ID_BCMAC131
:
1429 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1431 case PHY_ID_RTL8211C
:
1432 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1434 case PHY_ID_RTL8201E
:
1435 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1441 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1442 tw32(MAC_PHYCFG2
, val
);
1444 val
= tr32(MAC_PHYCFG1
);
1445 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1446 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1447 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1448 tw32(MAC_PHYCFG1
, val
);
1453 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1454 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1455 MAC_PHYCFG2_FMODE_MASK_MASK
|
1456 MAC_PHYCFG2_GMODE_MASK_MASK
|
1457 MAC_PHYCFG2_ACT_MASK_MASK
|
1458 MAC_PHYCFG2_QUAL_MASK_MASK
|
1459 MAC_PHYCFG2_INBAND_ENABLE
;
1461 tw32(MAC_PHYCFG2
, val
);
1463 val
= tr32(MAC_PHYCFG1
);
1464 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1465 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1466 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1467 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1468 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1469 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1470 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1472 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1473 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1474 tw32(MAC_PHYCFG1
, val
);
1476 val
= tr32(MAC_EXT_RGMII_MODE
);
1477 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1478 MAC_RGMII_MODE_RX_QUALITY
|
1479 MAC_RGMII_MODE_RX_ACTIVITY
|
1480 MAC_RGMII_MODE_RX_ENG_DET
|
1481 MAC_RGMII_MODE_TX_ENABLE
|
1482 MAC_RGMII_MODE_TX_LOWPWR
|
1483 MAC_RGMII_MODE_TX_RESET
);
1484 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1485 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1486 val
|= MAC_RGMII_MODE_RX_INT_B
|
1487 MAC_RGMII_MODE_RX_QUALITY
|
1488 MAC_RGMII_MODE_RX_ACTIVITY
|
1489 MAC_RGMII_MODE_RX_ENG_DET
;
1490 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1491 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1492 MAC_RGMII_MODE_TX_LOWPWR
|
1493 MAC_RGMII_MODE_TX_RESET
;
1495 tw32(MAC_EXT_RGMII_MODE
, val
);
1498 static void tg3_mdio_start(struct tg3
*tp
)
1500 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1501 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1504 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1505 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1506 tg3_mdio_config_5785(tp
);
1509 static int tg3_mdio_init(struct tg3
*tp
)
1513 struct phy_device
*phydev
;
1515 if (tg3_flag(tp
, 5717_PLUS
)) {
1518 tp
->phy_addr
= tp
->pci_fn
+ 1;
1520 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1521 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1523 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1524 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1527 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1530 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1533 tp
->phy_addr
= addr
;
1535 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1539 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1542 tp
->mdio_bus
= mdiobus_alloc();
1543 if (tp
->mdio_bus
== NULL
)
1546 tp
->mdio_bus
->name
= "tg3 mdio bus";
1547 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1548 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1549 tp
->mdio_bus
->priv
= tp
;
1550 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1551 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1552 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1553 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1555 /* The bus registration will look for all the PHYs on the mdio bus.
1556 * Unfortunately, it does not ensure the PHY is powered up before
1557 * accessing the PHY ID registers. A chip reset is the
1558 * quickest way to bring the device back to an operational state..
1560 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1563 i
= mdiobus_register(tp
->mdio_bus
);
1565 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1566 mdiobus_free(tp
->mdio_bus
);
1570 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1572 if (!phydev
|| !phydev
->drv
) {
1573 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1574 mdiobus_unregister(tp
->mdio_bus
);
1575 mdiobus_free(tp
->mdio_bus
);
1579 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1580 case PHY_ID_BCM57780
:
1581 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1582 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1584 case PHY_ID_BCM50610
:
1585 case PHY_ID_BCM50610M
:
1586 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1587 PHY_BRCM_RX_REFCLK_UNUSED
|
1588 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1589 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1590 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1591 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1592 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1593 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1594 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1595 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1597 case PHY_ID_RTL8211C
:
1598 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1600 case PHY_ID_RTL8201E
:
1601 case PHY_ID_BCMAC131
:
1602 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1603 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1604 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1608 tg3_flag_set(tp
, MDIOBUS_INITED
);
1610 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1611 tg3_mdio_config_5785(tp
);
1616 static void tg3_mdio_fini(struct tg3
*tp
)
1618 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1619 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1620 mdiobus_unregister(tp
->mdio_bus
);
1621 mdiobus_free(tp
->mdio_bus
);
1625 /* tp->lock is held. */
1626 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1630 val
= tr32(GRC_RX_CPU_EVENT
);
1631 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1632 tw32_f(GRC_RX_CPU_EVENT
, val
);
1634 tp
->last_event_jiffies
= jiffies
;
1637 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1639 /* tp->lock is held. */
1640 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1643 unsigned int delay_cnt
;
1646 /* If enough time has passed, no wait is necessary. */
1647 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1648 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1650 if (time_remain
< 0)
1653 /* Check if we can shorten the wait time. */
1654 delay_cnt
= jiffies_to_usecs(time_remain
);
1655 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1656 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1657 delay_cnt
= (delay_cnt
>> 3) + 1;
1659 for (i
= 0; i
< delay_cnt
; i
++) {
1660 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1662 if (pci_channel_offline(tp
->pdev
))
1669 /* tp->lock is held. */
1670 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1675 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1677 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1678 val
|= (reg
& 0xffff);
1682 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1684 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1685 val
|= (reg
& 0xffff);
1689 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1690 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1692 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1693 val
|= (reg
& 0xffff);
1697 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1704 /* tp->lock is held. */
1705 static void tg3_ump_link_report(struct tg3
*tp
)
1709 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1712 tg3_phy_gather_ump_data(tp
, data
);
1714 tg3_wait_for_event_ack(tp
);
1716 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1717 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1718 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1719 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1720 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1721 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1723 tg3_generate_fw_event(tp
);
1726 /* tp->lock is held. */
1727 static void tg3_stop_fw(struct tg3
*tp
)
1729 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1730 /* Wait for RX cpu to ACK the previous event. */
1731 tg3_wait_for_event_ack(tp
);
1733 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1735 tg3_generate_fw_event(tp
);
1737 /* Wait for RX cpu to ACK this event. */
1738 tg3_wait_for_event_ack(tp
);
1742 /* tp->lock is held. */
1743 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1745 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1746 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1748 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1750 case RESET_KIND_INIT
:
1751 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1755 case RESET_KIND_SHUTDOWN
:
1756 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1760 case RESET_KIND_SUSPEND
:
1761 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1774 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1776 case RESET_KIND_INIT
:
1777 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1778 DRV_STATE_START_DONE
);
1781 case RESET_KIND_SHUTDOWN
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1783 DRV_STATE_UNLOAD_DONE
);
1792 /* tp->lock is held. */
1793 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1795 if (tg3_flag(tp
, ENABLE_ASF
)) {
1797 case RESET_KIND_INIT
:
1798 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1802 case RESET_KIND_SHUTDOWN
:
1803 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1807 case RESET_KIND_SUSPEND
:
1808 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1818 static int tg3_poll_fw(struct tg3
*tp
)
1823 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1826 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1827 /* We don't use firmware. */
1831 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1832 /* Wait up to 20ms for init done. */
1833 for (i
= 0; i
< 200; i
++) {
1834 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1836 if (pci_channel_offline(tp
->pdev
))
1844 /* Wait for firmware initialization to complete. */
1845 for (i
= 0; i
< 100000; i
++) {
1846 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1847 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1849 if (pci_channel_offline(tp
->pdev
)) {
1850 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1851 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1852 netdev_info(tp
->dev
, "No firmware running\n");
1861 /* Chip might not be fitted with firmware. Some Sun onboard
1862 * parts are configured like that. So don't signal the timeout
1863 * of the above loop as an error, but do report the lack of
1864 * running firmware once.
1866 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1867 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1869 netdev_info(tp
->dev
, "No firmware running\n");
1872 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1873 /* The 57765 A0 needs a little more
1874 * time to do some important work.
1882 static void tg3_link_report(struct tg3
*tp
)
1884 if (!netif_carrier_ok(tp
->dev
)) {
1885 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1886 tg3_ump_link_report(tp
);
1887 } else if (netif_msg_link(tp
)) {
1888 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1889 (tp
->link_config
.active_speed
== SPEED_1000
?
1891 (tp
->link_config
.active_speed
== SPEED_100
?
1893 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1896 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1897 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1899 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1902 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1903 netdev_info(tp
->dev
, "EEE is %s\n",
1904 tp
->setlpicnt
? "enabled" : "disabled");
1906 tg3_ump_link_report(tp
);
1909 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1912 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1916 if (adv
& ADVERTISE_PAUSE_CAP
) {
1917 flowctrl
|= FLOW_CTRL_RX
;
1918 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1919 flowctrl
|= FLOW_CTRL_TX
;
1920 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1921 flowctrl
|= FLOW_CTRL_TX
;
1926 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1930 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1931 miireg
= ADVERTISE_1000XPAUSE
;
1932 else if (flow_ctrl
& FLOW_CTRL_TX
)
1933 miireg
= ADVERTISE_1000XPSE_ASYM
;
1934 else if (flow_ctrl
& FLOW_CTRL_RX
)
1935 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1942 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1946 if (adv
& ADVERTISE_1000XPAUSE
) {
1947 flowctrl
|= FLOW_CTRL_RX
;
1948 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1949 flowctrl
|= FLOW_CTRL_TX
;
1950 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1951 flowctrl
|= FLOW_CTRL_TX
;
1956 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1960 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1961 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1962 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1963 if (lcladv
& ADVERTISE_1000XPAUSE
)
1965 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1972 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1976 u32 old_rx_mode
= tp
->rx_mode
;
1977 u32 old_tx_mode
= tp
->tx_mode
;
1979 if (tg3_flag(tp
, USE_PHYLIB
))
1980 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1982 autoneg
= tp
->link_config
.autoneg
;
1984 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1985 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1986 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1988 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1990 flowctrl
= tp
->link_config
.flowctrl
;
1992 tp
->link_config
.active_flowctrl
= flowctrl
;
1994 if (flowctrl
& FLOW_CTRL_RX
)
1995 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1997 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1999 if (old_rx_mode
!= tp
->rx_mode
)
2000 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
2002 if (flowctrl
& FLOW_CTRL_TX
)
2003 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
2005 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2007 if (old_tx_mode
!= tp
->tx_mode
)
2008 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2011 static void tg3_adjust_link(struct net_device
*dev
)
2013 u8 oldflowctrl
, linkmesg
= 0;
2014 u32 mac_mode
, lcl_adv
, rmt_adv
;
2015 struct tg3
*tp
= netdev_priv(dev
);
2016 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2018 spin_lock_bh(&tp
->lock
);
2020 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2021 MAC_MODE_HALF_DUPLEX
);
2023 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2029 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2030 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2031 else if (phydev
->speed
== SPEED_1000
||
2032 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2033 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2035 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2037 if (phydev
->duplex
== DUPLEX_HALF
)
2038 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2040 lcl_adv
= mii_advertise_flowctrl(
2041 tp
->link_config
.flowctrl
);
2044 rmt_adv
= LPA_PAUSE_CAP
;
2045 if (phydev
->asym_pause
)
2046 rmt_adv
|= LPA_PAUSE_ASYM
;
2049 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2051 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2053 if (mac_mode
!= tp
->mac_mode
) {
2054 tp
->mac_mode
= mac_mode
;
2055 tw32_f(MAC_MODE
, tp
->mac_mode
);
2059 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2060 if (phydev
->speed
== SPEED_10
)
2062 MAC_MI_STAT_10MBPS_MODE
|
2063 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2065 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2068 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2069 tw32(MAC_TX_LENGTHS
,
2070 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2071 (6 << TX_LENGTHS_IPG_SHIFT
) |
2072 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2074 tw32(MAC_TX_LENGTHS
,
2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2076 (6 << TX_LENGTHS_IPG_SHIFT
) |
2077 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2079 if (phydev
->link
!= tp
->old_link
||
2080 phydev
->speed
!= tp
->link_config
.active_speed
||
2081 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2082 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2085 tp
->old_link
= phydev
->link
;
2086 tp
->link_config
.active_speed
= phydev
->speed
;
2087 tp
->link_config
.active_duplex
= phydev
->duplex
;
2089 spin_unlock_bh(&tp
->lock
);
2092 tg3_link_report(tp
);
2095 static int tg3_phy_init(struct tg3
*tp
)
2097 struct phy_device
*phydev
;
2099 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2102 /* Bring the PHY back to a known state. */
2105 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2107 /* Attach the MAC to the PHY. */
2108 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2109 tg3_adjust_link
, phydev
->interface
);
2110 if (IS_ERR(phydev
)) {
2111 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2112 return PTR_ERR(phydev
);
2115 /* Mask with MAC supported features. */
2116 switch (phydev
->interface
) {
2117 case PHY_INTERFACE_MODE_GMII
:
2118 case PHY_INTERFACE_MODE_RGMII
:
2119 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2120 phy_set_max_speed(phydev
, SPEED_1000
);
2121 phy_support_asym_pause(phydev
);
2125 case PHY_INTERFACE_MODE_MII
:
2126 phy_set_max_speed(phydev
, SPEED_100
);
2127 phy_support_asym_pause(phydev
);
2130 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2134 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2136 phy_attached_info(phydev
);
2141 static void tg3_phy_start(struct tg3
*tp
)
2143 struct phy_device
*phydev
;
2145 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2148 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2150 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2151 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2152 phydev
->speed
= tp
->link_config
.speed
;
2153 phydev
->duplex
= tp
->link_config
.duplex
;
2154 phydev
->autoneg
= tp
->link_config
.autoneg
;
2155 ethtool_convert_legacy_u32_to_link_mode(
2156 phydev
->advertising
, tp
->link_config
.advertising
);
2161 phy_start_aneg(phydev
);
2164 static void tg3_phy_stop(struct tg3
*tp
)
2166 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2169 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2172 static void tg3_phy_fini(struct tg3
*tp
)
2174 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2175 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2176 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2180 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2185 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2188 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2189 /* Cannot do read-modify-write on 5401 */
2190 err
= tg3_phy_auxctl_write(tp
,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2192 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2197 err
= tg3_phy_auxctl_read(tp
,
2198 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2202 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2203 err
= tg3_phy_auxctl_write(tp
,
2204 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2210 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2214 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2217 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2218 phytest
| MII_TG3_FET_SHADOW_EN
);
2219 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2221 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2223 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2224 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2226 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2230 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2234 if (!tg3_flag(tp
, 5705_PLUS
) ||
2235 (tg3_flag(tp
, 5717_PLUS
) &&
2236 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2239 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2240 tg3_phy_fet_toggle_apd(tp
, enable
);
2244 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2245 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2246 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2247 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2248 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2249 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2251 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2254 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2256 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2258 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2261 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2265 if (!tg3_flag(tp
, 5705_PLUS
) ||
2266 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2269 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2272 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2273 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2275 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2276 ephy
| MII_TG3_FET_SHADOW_EN
);
2277 if (!tg3_readphy(tp
, reg
, &phy
)) {
2279 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2281 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2282 tg3_writephy(tp
, reg
, phy
);
2284 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2289 ret
= tg3_phy_auxctl_read(tp
,
2290 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2293 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2295 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2296 tg3_phy_auxctl_write(tp
,
2297 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2302 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2307 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2310 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2312 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2313 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2316 static void tg3_phy_apply_otp(struct tg3
*tp
)
2325 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2328 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2329 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2330 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2332 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2333 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2334 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2336 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2337 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2338 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2340 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2341 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2343 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2344 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2346 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2347 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2348 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2350 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2353 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2356 struct ethtool_eee
*dest
= &tp
->eee
;
2358 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2364 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2367 /* Pull eee_active */
2368 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2369 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2370 dest
->eee_active
= 1;
2372 dest
->eee_active
= 0;
2374 /* Pull lp advertised settings */
2375 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2377 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2379 /* Pull advertised and eee_enabled settings */
2380 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2382 dest
->eee_enabled
= !!val
;
2383 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2385 /* Pull tx_lpi_enabled */
2386 val
= tr32(TG3_CPMU_EEE_MODE
);
2387 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2389 /* Pull lpi timer value */
2390 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2393 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2397 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2402 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2404 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2405 (tp
->link_config
.active_speed
== SPEED_100
||
2406 tp
->link_config
.active_speed
== SPEED_1000
)) {
2409 if (tp
->link_config
.active_speed
== SPEED_1000
)
2410 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2412 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2414 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2416 tg3_eee_pull_config(tp
, NULL
);
2417 if (tp
->eee
.eee_active
)
2421 if (!tp
->setlpicnt
) {
2422 if (current_link_up
&&
2423 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2424 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2425 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2428 val
= tr32(TG3_CPMU_EEE_MODE
);
2429 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2433 static void tg3_phy_eee_enable(struct tg3
*tp
)
2437 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2438 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2439 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2440 tg3_flag(tp
, 57765_CLASS
)) &&
2441 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2442 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2443 MII_TG3_DSP_TAP26_RMRXSTO
;
2444 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2445 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2448 val
= tr32(TG3_CPMU_EEE_MODE
);
2449 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2452 static int tg3_wait_macro_done(struct tg3
*tp
)
2459 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2460 if ((tmp32
& 0x1000) == 0)
2470 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2472 static const u32 test_pat
[4][6] = {
2473 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2474 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2475 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2476 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2480 for (chan
= 0; chan
< 4; chan
++) {
2483 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2484 (chan
* 0x2000) | 0x0200);
2485 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2487 for (i
= 0; i
< 6; i
++)
2488 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2491 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2492 if (tg3_wait_macro_done(tp
)) {
2497 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2498 (chan
* 0x2000) | 0x0200);
2499 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2500 if (tg3_wait_macro_done(tp
)) {
2505 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2506 if (tg3_wait_macro_done(tp
)) {
2511 for (i
= 0; i
< 6; i
+= 2) {
2514 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2515 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2516 tg3_wait_macro_done(tp
)) {
2522 if (low
!= test_pat
[chan
][i
] ||
2523 high
!= test_pat
[chan
][i
+1]) {
2524 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2525 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2526 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2536 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2540 for (chan
= 0; chan
< 4; chan
++) {
2543 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2544 (chan
* 0x2000) | 0x0200);
2545 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2546 for (i
= 0; i
< 6; i
++)
2547 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2548 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2549 if (tg3_wait_macro_done(tp
))
2556 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2558 u32 reg32
, phy9_orig
;
2559 int retries
, do_phy_reset
, err
;
2565 err
= tg3_bmcr_reset(tp
);
2571 /* Disable transmitter and interrupt. */
2572 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2576 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2578 /* Set full-duplex, 1000 mbps. */
2579 tg3_writephy(tp
, MII_BMCR
,
2580 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2582 /* Set to master mode. */
2583 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2586 tg3_writephy(tp
, MII_CTRL1000
,
2587 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2589 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2593 /* Block the PHY control access. */
2594 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2596 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2599 } while (--retries
);
2601 err
= tg3_phy_reset_chanpat(tp
);
2605 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2607 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2608 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2610 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2612 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2614 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2619 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2624 static void tg3_carrier_off(struct tg3
*tp
)
2626 netif_carrier_off(tp
->dev
);
2627 tp
->link_up
= false;
2630 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2632 if (tg3_flag(tp
, ENABLE_ASF
))
2633 netdev_warn(tp
->dev
,
2634 "Management side-band traffic will be interrupted during phy settings change\n");
2637 /* This will reset the tigon3 PHY if there is no valid
2638 * link unless the FORCE argument is non-zero.
2640 static int tg3_phy_reset(struct tg3
*tp
)
2645 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2646 val
= tr32(GRC_MISC_CFG
);
2647 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2650 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2651 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2655 if (netif_running(tp
->dev
) && tp
->link_up
) {
2656 netif_carrier_off(tp
->dev
);
2657 tg3_link_report(tp
);
2660 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2661 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2662 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2663 err
= tg3_phy_reset_5703_4_5(tp
);
2670 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2671 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2672 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2673 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2675 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2678 err
= tg3_bmcr_reset(tp
);
2682 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2683 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2684 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2686 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2689 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2690 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2691 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2692 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2693 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2694 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2696 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2700 if (tg3_flag(tp
, 5717_PLUS
) &&
2701 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2704 tg3_phy_apply_otp(tp
);
2706 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2707 tg3_phy_toggle_apd(tp
, true);
2709 tg3_phy_toggle_apd(tp
, false);
2712 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2713 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2714 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2715 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2716 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2719 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2720 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2721 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2724 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2725 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2726 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2727 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2728 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2729 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2731 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2732 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2733 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2734 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2735 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2736 tg3_writephy(tp
, MII_TG3_TEST1
,
2737 MII_TG3_TEST1_TRIM_EN
| 0x4);
2739 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2741 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2745 /* Set Extended packet length bit (bit 14) on all chips that */
2746 /* support jumbo frames */
2747 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2748 /* Cannot do read-modify-write on 5401 */
2749 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2750 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2751 /* Set bit 14 with read-modify-write to preserve other bits */
2752 err
= tg3_phy_auxctl_read(tp
,
2753 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2755 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2756 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2759 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2760 * jumbo frames transmission.
2762 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2763 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2764 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2765 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2768 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2769 /* adjust output voltage */
2770 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2773 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2774 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2776 tg3_phy_toggle_automdix(tp
, true);
2777 tg3_phy_set_wirespeed(tp
);
2781 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2782 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2783 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2784 TG3_GPIO_MSG_NEED_VAUX)
2785 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2786 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2787 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2788 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2789 (TG3_GPIO_MSG_DRVR_PRES << 12))
2791 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2792 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2793 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2794 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2795 (TG3_GPIO_MSG_NEED_VAUX << 12))
2797 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2801 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2802 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2803 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2805 status
= tr32(TG3_CPMU_DRV_STATUS
);
2807 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2808 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2809 status
|= (newstat
<< shift
);
2811 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2812 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2813 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2815 tw32(TG3_CPMU_DRV_STATUS
, status
);
2817 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2820 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2822 if (!tg3_flag(tp
, IS_NIC
))
2825 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2826 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2827 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2828 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2831 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2833 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2834 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2836 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2838 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2839 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2845 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2849 if (!tg3_flag(tp
, IS_NIC
) ||
2850 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2851 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2854 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2856 tw32_wait_f(GRC_LOCAL_CTRL
,
2857 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2858 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2860 tw32_wait_f(GRC_LOCAL_CTRL
,
2862 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2864 tw32_wait_f(GRC_LOCAL_CTRL
,
2865 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2869 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2871 if (!tg3_flag(tp
, IS_NIC
))
2874 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2875 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2876 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2877 (GRC_LCLCTRL_GPIO_OE0
|
2878 GRC_LCLCTRL_GPIO_OE1
|
2879 GRC_LCLCTRL_GPIO_OE2
|
2880 GRC_LCLCTRL_GPIO_OUTPUT0
|
2881 GRC_LCLCTRL_GPIO_OUTPUT1
),
2882 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2883 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2884 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2885 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2886 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2887 GRC_LCLCTRL_GPIO_OE1
|
2888 GRC_LCLCTRL_GPIO_OE2
|
2889 GRC_LCLCTRL_GPIO_OUTPUT0
|
2890 GRC_LCLCTRL_GPIO_OUTPUT1
|
2892 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2893 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2895 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2896 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2897 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2899 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2900 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2904 u32 grc_local_ctrl
= 0;
2906 /* Workaround to prevent overdrawing Amps. */
2907 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2908 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2909 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2911 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2914 /* On 5753 and variants, GPIO2 cannot be used. */
2915 no_gpio2
= tp
->nic_sram_data_cfg
&
2916 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2918 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2919 GRC_LCLCTRL_GPIO_OE1
|
2920 GRC_LCLCTRL_GPIO_OE2
|
2921 GRC_LCLCTRL_GPIO_OUTPUT1
|
2922 GRC_LCLCTRL_GPIO_OUTPUT2
;
2924 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2925 GRC_LCLCTRL_GPIO_OUTPUT2
);
2927 tw32_wait_f(GRC_LOCAL_CTRL
,
2928 tp
->grc_local_ctrl
| grc_local_ctrl
,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2931 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2933 tw32_wait_f(GRC_LOCAL_CTRL
,
2934 tp
->grc_local_ctrl
| grc_local_ctrl
,
2935 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2938 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2939 tw32_wait_f(GRC_LOCAL_CTRL
,
2940 tp
->grc_local_ctrl
| grc_local_ctrl
,
2941 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2946 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2950 /* Serialize power state transitions */
2951 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2954 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2955 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2957 msg
= tg3_set_function_status(tp
, msg
);
2959 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2962 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2963 tg3_pwrsrc_switch_to_vaux(tp
);
2965 tg3_pwrsrc_die_with_vmain(tp
);
2968 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2971 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2973 bool need_vaux
= false;
2975 /* The GPIOs do something completely different on 57765. */
2976 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2979 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2980 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2981 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2982 tg3_frob_aux_power_5717(tp
, include_wol
?
2983 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2987 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2988 struct net_device
*dev_peer
;
2990 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2992 /* remove_one() may have been run on the peer. */
2994 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2996 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2999 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
3000 tg3_flag(tp_peer
, ENABLE_ASF
))
3005 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
3006 tg3_flag(tp
, ENABLE_ASF
))
3010 tg3_pwrsrc_switch_to_vaux(tp
);
3012 tg3_pwrsrc_die_with_vmain(tp
);
3015 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3017 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3019 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3020 if (speed
!= SPEED_10
)
3022 } else if (speed
== SPEED_10
)
3028 static bool tg3_phy_power_bug(struct tg3
*tp
)
3030 switch (tg3_asic_rev(tp
)) {
3035 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3044 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3053 static bool tg3_phy_led_bug(struct tg3
*tp
)
3055 switch (tg3_asic_rev(tp
)) {
3058 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3067 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3071 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3074 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3075 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3076 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3077 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3080 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3081 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3082 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3087 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3089 val
= tr32(GRC_MISC_CFG
);
3090 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3093 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3095 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3098 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3099 tg3_writephy(tp
, MII_BMCR
,
3100 BMCR_ANENABLE
| BMCR_ANRESTART
);
3102 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3103 phytest
| MII_TG3_FET_SHADOW_EN
);
3104 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3105 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3107 MII_TG3_FET_SHDW_AUXMODE4
,
3110 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3113 } else if (do_low_power
) {
3114 if (!tg3_phy_led_bug(tp
))
3115 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3116 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3118 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3119 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3120 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3121 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3124 /* The PHY should not be powered down on some chips because
3127 if (tg3_phy_power_bug(tp
))
3130 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3131 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3132 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3133 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3134 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3135 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3138 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3141 /* tp->lock is held. */
3142 static int tg3_nvram_lock(struct tg3
*tp
)
3144 if (tg3_flag(tp
, NVRAM
)) {
3147 if (tp
->nvram_lock_cnt
== 0) {
3148 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3149 for (i
= 0; i
< 8000; i
++) {
3150 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3155 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3159 tp
->nvram_lock_cnt
++;
3164 /* tp->lock is held. */
3165 static void tg3_nvram_unlock(struct tg3
*tp
)
3167 if (tg3_flag(tp
, NVRAM
)) {
3168 if (tp
->nvram_lock_cnt
> 0)
3169 tp
->nvram_lock_cnt
--;
3170 if (tp
->nvram_lock_cnt
== 0)
3171 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3175 /* tp->lock is held. */
3176 static void tg3_enable_nvram_access(struct tg3
*tp
)
3178 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3179 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3181 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3185 /* tp->lock is held. */
3186 static void tg3_disable_nvram_access(struct tg3
*tp
)
3188 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3189 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3191 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3195 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3196 u32 offset
, u32
*val
)
3201 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3204 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3205 EEPROM_ADDR_DEVID_MASK
|
3207 tw32(GRC_EEPROM_ADDR
,
3209 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3210 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3211 EEPROM_ADDR_ADDR_MASK
) |
3212 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3214 for (i
= 0; i
< 1000; i
++) {
3215 tmp
= tr32(GRC_EEPROM_ADDR
);
3217 if (tmp
& EEPROM_ADDR_COMPLETE
)
3221 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3224 tmp
= tr32(GRC_EEPROM_DATA
);
3227 * The data will always be opposite the native endian
3228 * format. Perform a blind byteswap to compensate.
3235 #define NVRAM_CMD_TIMEOUT 10000
3237 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3241 tw32(NVRAM_CMD
, nvram_cmd
);
3242 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3243 usleep_range(10, 40);
3244 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3250 if (i
== NVRAM_CMD_TIMEOUT
)
3256 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3258 if (tg3_flag(tp
, NVRAM
) &&
3259 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3260 tg3_flag(tp
, FLASH
) &&
3261 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3262 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3264 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3265 ATMEL_AT45DB0X1B_PAGE_POS
) +
3266 (addr
% tp
->nvram_pagesize
);
3271 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3273 if (tg3_flag(tp
, NVRAM
) &&
3274 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3275 tg3_flag(tp
, FLASH
) &&
3276 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3277 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3279 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3280 tp
->nvram_pagesize
) +
3281 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3286 /* NOTE: Data read in from NVRAM is byteswapped according to
3287 * the byteswapping settings for all other register accesses.
3288 * tg3 devices are BE devices, so on a BE machine, the data
3289 * returned will be exactly as it is seen in NVRAM. On a LE
3290 * machine, the 32-bit value will be byteswapped.
3292 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3296 if (!tg3_flag(tp
, NVRAM
))
3297 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3299 offset
= tg3_nvram_phys_addr(tp
, offset
);
3301 if (offset
> NVRAM_ADDR_MSK
)
3304 ret
= tg3_nvram_lock(tp
);
3308 tg3_enable_nvram_access(tp
);
3310 tw32(NVRAM_ADDR
, offset
);
3311 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3312 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3315 *val
= tr32(NVRAM_RDDATA
);
3317 tg3_disable_nvram_access(tp
);
3319 tg3_nvram_unlock(tp
);
3324 /* Ensures NVRAM data is in bytestream format. */
3325 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3328 int res
= tg3_nvram_read(tp
, offset
, &v
);
3330 *val
= cpu_to_be32(v
);
3334 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3335 u32 offset
, u32 len
, u8
*buf
)
3340 for (i
= 0; i
< len
; i
+= 4) {
3346 memcpy(&data
, buf
+ i
, 4);
3349 * The SEEPROM interface expects the data to always be opposite
3350 * the native endian format. We accomplish this by reversing
3351 * all the operations that would have been performed on the
3352 * data from a call to tg3_nvram_read_be32().
3354 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3356 val
= tr32(GRC_EEPROM_ADDR
);
3357 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3359 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3361 tw32(GRC_EEPROM_ADDR
, val
|
3362 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3363 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3367 for (j
= 0; j
< 1000; j
++) {
3368 val
= tr32(GRC_EEPROM_ADDR
);
3370 if (val
& EEPROM_ADDR_COMPLETE
)
3374 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3383 /* offset and length are dword aligned */
3384 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3388 u32 pagesize
= tp
->nvram_pagesize
;
3389 u32 pagemask
= pagesize
- 1;
3393 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3399 u32 phy_addr
, page_off
, size
;
3401 phy_addr
= offset
& ~pagemask
;
3403 for (j
= 0; j
< pagesize
; j
+= 4) {
3404 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3405 (__be32
*) (tmp
+ j
));
3412 page_off
= offset
& pagemask
;
3419 memcpy(tmp
+ page_off
, buf
, size
);
3421 offset
= offset
+ (pagesize
- page_off
);
3423 tg3_enable_nvram_access(tp
);
3426 * Before we can erase the flash page, we need
3427 * to issue a special "write enable" command.
3429 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3431 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3434 /* Erase the target page */
3435 tw32(NVRAM_ADDR
, phy_addr
);
3437 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3438 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3440 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3443 /* Issue another write enable to start the write. */
3444 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3446 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3449 for (j
= 0; j
< pagesize
; j
+= 4) {
3452 data
= *((__be32
*) (tmp
+ j
));
3454 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3456 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3458 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3462 nvram_cmd
|= NVRAM_CMD_FIRST
;
3463 else if (j
== (pagesize
- 4))
3464 nvram_cmd
|= NVRAM_CMD_LAST
;
3466 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3474 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3475 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3482 /* offset and length are dword aligned */
3483 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3488 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3489 u32 page_off
, phy_addr
, nvram_cmd
;
3492 memcpy(&data
, buf
+ i
, 4);
3493 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3495 page_off
= offset
% tp
->nvram_pagesize
;
3497 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3499 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3501 if (page_off
== 0 || i
== 0)
3502 nvram_cmd
|= NVRAM_CMD_FIRST
;
3503 if (page_off
== (tp
->nvram_pagesize
- 4))
3504 nvram_cmd
|= NVRAM_CMD_LAST
;
3507 nvram_cmd
|= NVRAM_CMD_LAST
;
3509 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3510 !tg3_flag(tp
, FLASH
) ||
3511 !tg3_flag(tp
, 57765_PLUS
))
3512 tw32(NVRAM_ADDR
, phy_addr
);
3514 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3515 !tg3_flag(tp
, 5755_PLUS
) &&
3516 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3517 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3520 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3521 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3525 if (!tg3_flag(tp
, FLASH
)) {
3526 /* We always do complete word writes to eeprom. */
3527 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3530 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3537 /* offset and length are dword aligned */
3538 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3542 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3543 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3544 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3548 if (!tg3_flag(tp
, NVRAM
)) {
3549 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3553 ret
= tg3_nvram_lock(tp
);
3557 tg3_enable_nvram_access(tp
);
3558 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3559 tw32(NVRAM_WRITE1
, 0x406);
3561 grc_mode
= tr32(GRC_MODE
);
3562 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3564 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3565 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3568 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3572 grc_mode
= tr32(GRC_MODE
);
3573 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3575 tg3_disable_nvram_access(tp
);
3576 tg3_nvram_unlock(tp
);
3579 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3580 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3587 #define RX_CPU_SCRATCH_BASE 0x30000
3588 #define RX_CPU_SCRATCH_SIZE 0x04000
3589 #define TX_CPU_SCRATCH_BASE 0x34000
3590 #define TX_CPU_SCRATCH_SIZE 0x04000
3592 /* tp->lock is held. */
3593 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3596 const int iters
= 10000;
3598 for (i
= 0; i
< iters
; i
++) {
3599 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3600 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3601 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3603 if (pci_channel_offline(tp
->pdev
))
3607 return (i
== iters
) ? -EBUSY
: 0;
3610 /* tp->lock is held. */
3611 static int tg3_rxcpu_pause(struct tg3
*tp
)
3613 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3615 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3616 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3622 /* tp->lock is held. */
3623 static int tg3_txcpu_pause(struct tg3
*tp
)
3625 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3628 /* tp->lock is held. */
3629 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3631 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3632 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3635 /* tp->lock is held. */
3636 static void tg3_rxcpu_resume(struct tg3
*tp
)
3638 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3641 /* tp->lock is held. */
3642 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3646 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3648 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3649 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3651 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3654 if (cpu_base
== RX_CPU_BASE
) {
3655 rc
= tg3_rxcpu_pause(tp
);
3658 * There is only an Rx CPU for the 5750 derivative in the
3661 if (tg3_flag(tp
, IS_SSB_CORE
))
3664 rc
= tg3_txcpu_pause(tp
);
3668 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3669 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3673 /* Clear firmware's nvram arbitration. */
3674 if (tg3_flag(tp
, NVRAM
))
3675 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3679 static int tg3_fw_data_len(struct tg3
*tp
,
3680 const struct tg3_firmware_hdr
*fw_hdr
)
3684 /* Non fragmented firmware have one firmware header followed by a
3685 * contiguous chunk of data to be written. The length field in that
3686 * header is not the length of data to be written but the complete
3687 * length of the bss. The data length is determined based on
3688 * tp->fw->size minus headers.
3690 * Fragmented firmware have a main header followed by multiple
3691 * fragments. Each fragment is identical to non fragmented firmware
3692 * with a firmware header followed by a contiguous chunk of data. In
3693 * the main header, the length field is unused and set to 0xffffffff.
3694 * In each fragment header the length is the entire size of that
3695 * fragment i.e. fragment data + header length. Data length is
3696 * therefore length field in the header minus TG3_FW_HDR_LEN.
3698 if (tp
->fw_len
== 0xffffffff)
3699 fw_len
= be32_to_cpu(fw_hdr
->len
);
3701 fw_len
= tp
->fw
->size
;
3703 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3706 /* tp->lock is held. */
3707 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3708 u32 cpu_scratch_base
, int cpu_scratch_size
,
3709 const struct tg3_firmware_hdr
*fw_hdr
)
3712 void (*write_op
)(struct tg3
*, u32
, u32
);
3713 int total_len
= tp
->fw
->size
;
3715 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3717 "%s: Trying to load TX cpu firmware which is 5705\n",
3722 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3723 write_op
= tg3_write_mem
;
3725 write_op
= tg3_write_indirect_reg32
;
3727 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3728 /* It is possible that bootcode is still loading at this point.
3729 * Get the nvram lock first before halting the cpu.
3731 int lock_err
= tg3_nvram_lock(tp
);
3732 err
= tg3_halt_cpu(tp
, cpu_base
);
3734 tg3_nvram_unlock(tp
);
3738 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3739 write_op(tp
, cpu_scratch_base
+ i
, 0);
3740 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3741 tw32(cpu_base
+ CPU_MODE
,
3742 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3744 /* Subtract additional main header for fragmented firmware and
3745 * advance to the first fragment
3747 total_len
-= TG3_FW_HDR_LEN
;
3752 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3753 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3754 write_op(tp
, cpu_scratch_base
+
3755 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3757 be32_to_cpu(fw_data
[i
]));
3759 total_len
-= be32_to_cpu(fw_hdr
->len
);
3761 /* Advance to next fragment */
3762 fw_hdr
= (struct tg3_firmware_hdr
*)
3763 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3764 } while (total_len
> 0);
3772 /* tp->lock is held. */
3773 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3776 const int iters
= 5;
3778 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3779 tw32_f(cpu_base
+ CPU_PC
, pc
);
3781 for (i
= 0; i
< iters
; i
++) {
3782 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3784 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3785 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3786 tw32_f(cpu_base
+ CPU_PC
, pc
);
3790 return (i
== iters
) ? -EBUSY
: 0;
3793 /* tp->lock is held. */
3794 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3796 const struct tg3_firmware_hdr
*fw_hdr
;
3799 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3801 /* Firmware blob starts with version numbers, followed by
3802 start address and length. We are setting complete length.
3803 length = end_address_of_bss - start_address_of_text.
3804 Remainder is the blob to be loaded contiguously
3805 from start address. */
3807 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3808 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3813 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3814 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3819 /* Now startup only the RX cpu. */
3820 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3821 be32_to_cpu(fw_hdr
->base_addr
));
3823 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3824 "should be %08x\n", __func__
,
3825 tr32(RX_CPU_BASE
+ CPU_PC
),
3826 be32_to_cpu(fw_hdr
->base_addr
));
3830 tg3_rxcpu_resume(tp
);
3835 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3837 const int iters
= 1000;
3841 /* Wait for boot code to complete initialization and enter service
3842 * loop. It is then safe to download service patches
3844 for (i
= 0; i
< iters
; i
++) {
3845 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3852 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3856 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3858 netdev_warn(tp
->dev
,
3859 "Other patches exist. Not downloading EEE patch\n");
3866 /* tp->lock is held. */
3867 static void tg3_load_57766_firmware(struct tg3
*tp
)
3869 struct tg3_firmware_hdr
*fw_hdr
;
3871 if (!tg3_flag(tp
, NO_NVRAM
))
3874 if (tg3_validate_rxcpu_state(tp
))
3880 /* This firmware blob has a different format than older firmware
3881 * releases as given below. The main difference is we have fragmented
3882 * data to be written to non-contiguous locations.
3884 * In the beginning we have a firmware header identical to other
3885 * firmware which consists of version, base addr and length. The length
3886 * here is unused and set to 0xffffffff.
3888 * This is followed by a series of firmware fragments which are
3889 * individually identical to previous firmware. i.e. they have the
3890 * firmware header and followed by data for that fragment. The version
3891 * field of the individual fragment header is unused.
3894 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3895 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3898 if (tg3_rxcpu_pause(tp
))
3901 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3902 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3904 tg3_rxcpu_resume(tp
);
3907 /* tp->lock is held. */
3908 static int tg3_load_tso_firmware(struct tg3
*tp
)
3910 const struct tg3_firmware_hdr
*fw_hdr
;
3911 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3914 if (!tg3_flag(tp
, FW_TSO
))
3917 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3919 /* Firmware blob starts with version numbers, followed by
3920 start address and length. We are setting complete length.
3921 length = end_address_of_bss - start_address_of_text.
3922 Remainder is the blob to be loaded contiguously
3923 from start address. */
3925 cpu_scratch_size
= tp
->fw_len
;
3927 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3928 cpu_base
= RX_CPU_BASE
;
3929 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3931 cpu_base
= TX_CPU_BASE
;
3932 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3933 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3936 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3937 cpu_scratch_base
, cpu_scratch_size
,
3942 /* Now startup the cpu. */
3943 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3944 be32_to_cpu(fw_hdr
->base_addr
));
3947 "%s fails to set CPU PC, is %08x should be %08x\n",
3948 __func__
, tr32(cpu_base
+ CPU_PC
),
3949 be32_to_cpu(fw_hdr
->base_addr
));
3953 tg3_resume_cpu(tp
, cpu_base
);
3957 /* tp->lock is held. */
3958 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3960 u32 addr_high
, addr_low
;
3962 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3963 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3964 (mac_addr
[4] << 8) | mac_addr
[5]);
3967 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3968 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3971 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3972 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3976 /* tp->lock is held. */
3977 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3982 for (i
= 0; i
< 4; i
++) {
3983 if (i
== 1 && skip_mac_1
)
3985 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3988 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3989 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3990 for (i
= 4; i
< 16; i
++)
3991 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3994 addr_high
= (tp
->dev
->dev_addr
[0] +
3995 tp
->dev
->dev_addr
[1] +
3996 tp
->dev
->dev_addr
[2] +
3997 tp
->dev
->dev_addr
[3] +
3998 tp
->dev
->dev_addr
[4] +
3999 tp
->dev
->dev_addr
[5]) &
4000 TX_BACKOFF_SEED_MASK
;
4001 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
4004 static void tg3_enable_register_access(struct tg3
*tp
)
4007 * Make sure register accesses (indirect or otherwise) will function
4010 pci_write_config_dword(tp
->pdev
,
4011 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4014 static int tg3_power_up(struct tg3
*tp
)
4018 tg3_enable_register_access(tp
);
4020 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4022 /* Switch out of Vaux if it is a NIC */
4023 tg3_pwrsrc_switch_to_vmain(tp
);
4025 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4031 static int tg3_setup_phy(struct tg3
*, bool);
4033 static int tg3_power_down_prepare(struct tg3
*tp
)
4036 bool device_should_wake
, do_low_power
;
4038 tg3_enable_register_access(tp
);
4040 /* Restore the CLKREQ setting. */
4041 if (tg3_flag(tp
, CLKREQ_BUG
))
4042 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4043 PCI_EXP_LNKCTL_CLKREQ_EN
);
4045 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4046 tw32(TG3PCI_MISC_HOST_CTRL
,
4047 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4049 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4050 tg3_flag(tp
, WOL_ENABLE
);
4052 if (tg3_flag(tp
, USE_PHYLIB
)) {
4053 do_low_power
= false;
4054 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4055 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4056 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising
) = { 0, };
4057 struct phy_device
*phydev
;
4060 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4062 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4064 tp
->link_config
.speed
= phydev
->speed
;
4065 tp
->link_config
.duplex
= phydev
->duplex
;
4066 tp
->link_config
.autoneg
= phydev
->autoneg
;
4067 ethtool_convert_link_mode_to_legacy_u32(
4068 &tp
->link_config
.advertising
,
4069 phydev
->advertising
);
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, advertising
);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
,
4079 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4080 if (tg3_flag(tp
, WOL_SPEED_100MB
)) {
4081 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
4083 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
4085 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4088 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4093 linkmode_copy(phydev
->advertising
, advertising
);
4094 phy_start_aneg(phydev
);
4096 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4097 if (phyid
!= PHY_ID_BCMAC131
) {
4098 phyid
&= PHY_BCM_OUI_MASK
;
4099 if (phyid
== PHY_BCM_OUI_1
||
4100 phyid
== PHY_BCM_OUI_2
||
4101 phyid
== PHY_BCM_OUI_3
)
4102 do_low_power
= true;
4106 do_low_power
= true;
4108 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4109 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4111 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4112 tg3_setup_phy(tp
, false);
4115 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4118 val
= tr32(GRC_VCPU_EXT_CTRL
);
4119 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4120 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4124 for (i
= 0; i
< 200; i
++) {
4125 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4126 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4131 if (tg3_flag(tp
, WOL_CAP
))
4132 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4133 WOL_DRV_STATE_SHUTDOWN
|
4137 if (device_should_wake
) {
4140 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4142 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4143 tg3_phy_auxctl_write(tp
,
4144 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4145 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4146 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4147 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4151 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4152 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4153 else if (tp
->phy_flags
&
4154 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4155 if (tp
->link_config
.active_speed
== SPEED_1000
)
4156 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4158 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4160 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4162 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4163 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4164 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4165 SPEED_100
: SPEED_10
;
4166 if (tg3_5700_link_polarity(tp
, speed
))
4167 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4169 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4172 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4175 if (!tg3_flag(tp
, 5750_PLUS
))
4176 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4178 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4179 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4180 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4181 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4183 if (tg3_flag(tp
, ENABLE_APE
))
4184 mac_mode
|= MAC_MODE_APE_TX_EN
|
4185 MAC_MODE_APE_RX_EN
|
4186 MAC_MODE_TDE_ENABLE
;
4188 tw32_f(MAC_MODE
, mac_mode
);
4191 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4195 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4196 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4197 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4200 base_val
= tp
->pci_clock_ctrl
;
4201 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4202 CLOCK_CTRL_TXCLK_DISABLE
);
4204 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4205 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4206 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4207 tg3_flag(tp
, CPMU_PRESENT
) ||
4208 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4210 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4211 u32 newbits1
, newbits2
;
4213 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4214 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4215 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4216 CLOCK_CTRL_TXCLK_DISABLE
|
4218 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4219 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4220 newbits1
= CLOCK_CTRL_625_CORE
;
4221 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4223 newbits1
= CLOCK_CTRL_ALTCLK
;
4224 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4227 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4230 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4233 if (!tg3_flag(tp
, 5705_PLUS
)) {
4236 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4237 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4238 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4239 CLOCK_CTRL_TXCLK_DISABLE
|
4240 CLOCK_CTRL_44MHZ_CORE
);
4242 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4245 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4246 tp
->pci_clock_ctrl
| newbits3
, 40);
4250 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4251 tg3_power_down_phy(tp
, do_low_power
);
4253 tg3_frob_aux_power(tp
, true);
4255 /* Workaround for unstable PLL clock */
4256 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4257 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4258 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4259 u32 val
= tr32(0x7d00);
4261 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4263 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4266 err
= tg3_nvram_lock(tp
);
4267 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4269 tg3_nvram_unlock(tp
);
4273 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4275 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4280 static void tg3_power_down(struct tg3
*tp
)
4282 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4283 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4286 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4288 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4289 case MII_TG3_AUX_STAT_10HALF
:
4291 *duplex
= DUPLEX_HALF
;
4294 case MII_TG3_AUX_STAT_10FULL
:
4296 *duplex
= DUPLEX_FULL
;
4299 case MII_TG3_AUX_STAT_100HALF
:
4301 *duplex
= DUPLEX_HALF
;
4304 case MII_TG3_AUX_STAT_100FULL
:
4306 *duplex
= DUPLEX_FULL
;
4309 case MII_TG3_AUX_STAT_1000HALF
:
4310 *speed
= SPEED_1000
;
4311 *duplex
= DUPLEX_HALF
;
4314 case MII_TG3_AUX_STAT_1000FULL
:
4315 *speed
= SPEED_1000
;
4316 *duplex
= DUPLEX_FULL
;
4320 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4321 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4323 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4327 *speed
= SPEED_UNKNOWN
;
4328 *duplex
= DUPLEX_UNKNOWN
;
4333 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4338 new_adv
= ADVERTISE_CSMA
;
4339 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4340 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4342 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4346 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4347 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4349 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4350 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4351 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4353 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4358 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4361 tw32(TG3_CPMU_EEE_MODE
,
4362 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4364 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4369 /* Advertise 100-BaseTX EEE ability */
4370 if (advertise
& ADVERTISED_100baseT_Full
)
4371 val
|= MDIO_AN_EEE_ADV_100TX
;
4372 /* Advertise 1000-BaseT EEE ability */
4373 if (advertise
& ADVERTISED_1000baseT_Full
)
4374 val
|= MDIO_AN_EEE_ADV_1000T
;
4376 if (!tp
->eee
.eee_enabled
) {
4378 tp
->eee
.advertised
= 0;
4380 tp
->eee
.advertised
= advertise
&
4381 (ADVERTISED_100baseT_Full
|
4382 ADVERTISED_1000baseT_Full
);
4385 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4389 switch (tg3_asic_rev(tp
)) {
4391 case ASIC_REV_57765
:
4392 case ASIC_REV_57766
:
4394 /* If we advertised any eee advertisements above... */
4396 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4397 MII_TG3_DSP_TAP26_RMRXSTO
|
4398 MII_TG3_DSP_TAP26_OPCSINPT
;
4399 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4403 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4404 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4405 MII_TG3_DSP_CH34TP2_HIBW01
);
4408 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4417 static void tg3_phy_copper_begin(struct tg3
*tp
)
4419 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4420 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4423 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4424 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4425 adv
= ADVERTISED_10baseT_Half
|
4426 ADVERTISED_10baseT_Full
;
4427 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4428 adv
|= ADVERTISED_100baseT_Half
|
4429 ADVERTISED_100baseT_Full
;
4430 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4431 if (!(tp
->phy_flags
&
4432 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4433 adv
|= ADVERTISED_1000baseT_Half
;
4434 adv
|= ADVERTISED_1000baseT_Full
;
4437 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4439 adv
= tp
->link_config
.advertising
;
4440 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4441 adv
&= ~(ADVERTISED_1000baseT_Half
|
4442 ADVERTISED_1000baseT_Full
);
4444 fc
= tp
->link_config
.flowctrl
;
4447 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4449 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4450 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4451 /* Normally during power down we want to autonegotiate
4452 * the lowest possible speed for WOL. However, to avoid
4453 * link flap, we leave it untouched.
4458 tg3_writephy(tp
, MII_BMCR
,
4459 BMCR_ANENABLE
| BMCR_ANRESTART
);
4462 u32 bmcr
, orig_bmcr
;
4464 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4465 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4467 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4468 /* With autoneg disabled, 5715 only links up when the
4469 * advertisement register has the configured speed
4472 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4476 switch (tp
->link_config
.speed
) {
4482 bmcr
|= BMCR_SPEED100
;
4486 bmcr
|= BMCR_SPEED1000
;
4490 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4491 bmcr
|= BMCR_FULLDPLX
;
4493 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4494 (bmcr
!= orig_bmcr
)) {
4495 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4496 for (i
= 0; i
< 1500; i
++) {
4500 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4501 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4503 if (!(tmp
& BMSR_LSTATUS
)) {
4508 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4514 static int tg3_phy_pull_config(struct tg3
*tp
)
4519 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4523 if (!(val
& BMCR_ANENABLE
)) {
4524 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4525 tp
->link_config
.advertising
= 0;
4526 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4530 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4532 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4535 tp
->link_config
.speed
= SPEED_10
;
4538 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4541 tp
->link_config
.speed
= SPEED_100
;
4543 case BMCR_SPEED1000
:
4544 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4545 tp
->link_config
.speed
= SPEED_1000
;
4553 if (val
& BMCR_FULLDPLX
)
4554 tp
->link_config
.duplex
= DUPLEX_FULL
;
4556 tp
->link_config
.duplex
= DUPLEX_HALF
;
4558 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4564 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4565 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4566 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4568 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4571 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4575 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4576 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4578 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4580 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4583 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4586 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4587 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4591 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4593 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4597 adv
= tg3_decode_flowctrl_1000X(val
);
4598 tp
->link_config
.flowctrl
= adv
;
4600 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4601 adv
= mii_adv_to_ethtool_adv_x(val
);
4604 tp
->link_config
.advertising
|= adv
;
4611 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4615 /* Turn off tap power management. */
4616 /* Set Extended packet length bit */
4617 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4619 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4620 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4621 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4622 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4623 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4630 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4632 struct ethtool_eee eee
;
4634 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4637 tg3_eee_pull_config(tp
, &eee
);
4639 if (tp
->eee
.eee_enabled
) {
4640 if (tp
->eee
.advertised
!= eee
.advertised
||
4641 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4642 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4645 /* EEE is disabled but we're advertising */
4653 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4655 u32 advmsk
, tgtadv
, advertising
;
4657 advertising
= tp
->link_config
.advertising
;
4658 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4660 advmsk
= ADVERTISE_ALL
;
4661 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4662 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4663 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4666 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4669 if ((*lcladv
& advmsk
) != tgtadv
)
4672 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4675 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4677 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4681 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4682 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4683 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4684 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4685 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4687 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4690 if (tg3_ctrl
!= tgtadv
)
4697 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4701 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4704 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4707 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4710 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4713 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4714 tp
->link_config
.rmt_adv
= lpeth
;
4719 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4721 if (curr_link_up
!= tp
->link_up
) {
4723 netif_carrier_on(tp
->dev
);
4725 netif_carrier_off(tp
->dev
);
4726 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4727 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4730 tg3_link_report(tp
);
4737 static void tg3_clear_mac_status(struct tg3
*tp
)
4742 MAC_STATUS_SYNC_CHANGED
|
4743 MAC_STATUS_CFG_CHANGED
|
4744 MAC_STATUS_MI_COMPLETION
|
4745 MAC_STATUS_LNKSTATE_CHANGED
);
4749 static void tg3_setup_eee(struct tg3
*tp
)
4753 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4754 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4755 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4756 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4758 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4760 tw32_f(TG3_CPMU_EEE_CTRL
,
4761 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4763 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4764 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4765 TG3_CPMU_EEEMD_LPI_IN_RX
|
4766 TG3_CPMU_EEEMD_EEE_ENABLE
;
4768 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4769 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4771 if (tg3_flag(tp
, ENABLE_APE
))
4772 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4774 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4776 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4777 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4778 (tp
->eee
.tx_lpi_timer
& 0xffff));
4780 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4781 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4782 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4785 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4787 bool current_link_up
;
4789 u32 lcl_adv
, rmt_adv
;
4794 tg3_clear_mac_status(tp
);
4796 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4798 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4802 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4804 /* Some third-party PHYs need to be reset on link going
4807 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4808 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4809 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4811 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4812 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4813 !(bmsr
& BMSR_LSTATUS
))
4819 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4820 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4821 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4822 !tg3_flag(tp
, INIT_COMPLETE
))
4825 if (!(bmsr
& BMSR_LSTATUS
)) {
4826 err
= tg3_init_5401phy_dsp(tp
);
4830 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4831 for (i
= 0; i
< 1000; i
++) {
4833 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4834 (bmsr
& BMSR_LSTATUS
)) {
4840 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4841 TG3_PHY_REV_BCM5401_B0
&&
4842 !(bmsr
& BMSR_LSTATUS
) &&
4843 tp
->link_config
.active_speed
== SPEED_1000
) {
4844 err
= tg3_phy_reset(tp
);
4846 err
= tg3_init_5401phy_dsp(tp
);
4851 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4852 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4853 /* 5701 {A0,B0} CRC bug workaround */
4854 tg3_writephy(tp
, 0x15, 0x0a75);
4855 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4856 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4857 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4860 /* Clear pending interrupts... */
4861 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4862 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4864 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4865 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4866 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4867 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4869 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4870 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4871 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4872 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4873 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4875 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4878 current_link_up
= false;
4879 current_speed
= SPEED_UNKNOWN
;
4880 current_duplex
= DUPLEX_UNKNOWN
;
4881 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4882 tp
->link_config
.rmt_adv
= 0;
4884 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4885 err
= tg3_phy_auxctl_read(tp
,
4886 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4888 if (!err
&& !(val
& (1 << 10))) {
4889 tg3_phy_auxctl_write(tp
,
4890 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4897 for (i
= 0; i
< 100; i
++) {
4898 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4899 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4900 (bmsr
& BMSR_LSTATUS
))
4905 if (bmsr
& BMSR_LSTATUS
) {
4908 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4909 for (i
= 0; i
< 2000; i
++) {
4911 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4916 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4921 for (i
= 0; i
< 200; i
++) {
4922 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4923 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4925 if (bmcr
&& bmcr
!= 0x7fff)
4933 tp
->link_config
.active_speed
= current_speed
;
4934 tp
->link_config
.active_duplex
= current_duplex
;
4936 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4937 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4939 if ((bmcr
& BMCR_ANENABLE
) &&
4941 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4942 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4943 current_link_up
= true;
4945 /* EEE settings changes take effect only after a phy
4946 * reset. If we have skipped a reset due to Link Flap
4947 * Avoidance being enabled, do it now.
4949 if (!eee_config_ok
&&
4950 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4956 if (!(bmcr
& BMCR_ANENABLE
) &&
4957 tp
->link_config
.speed
== current_speed
&&
4958 tp
->link_config
.duplex
== current_duplex
) {
4959 current_link_up
= true;
4963 if (current_link_up
&&
4964 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4967 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4968 reg
= MII_TG3_FET_GEN_STAT
;
4969 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4971 reg
= MII_TG3_EXT_STAT
;
4972 bit
= MII_TG3_EXT_STAT_MDIX
;
4975 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4976 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4978 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4983 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4984 tg3_phy_copper_begin(tp
);
4986 if (tg3_flag(tp
, ROBOSWITCH
)) {
4987 current_link_up
= true;
4988 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4989 current_speed
= SPEED_1000
;
4990 current_duplex
= DUPLEX_FULL
;
4991 tp
->link_config
.active_speed
= current_speed
;
4992 tp
->link_config
.active_duplex
= current_duplex
;
4995 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4996 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4997 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4998 current_link_up
= true;
5001 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5002 if (current_link_up
) {
5003 if (tp
->link_config
.active_speed
== SPEED_100
||
5004 tp
->link_config
.active_speed
== SPEED_10
)
5005 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5007 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5008 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
5009 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5011 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5013 /* In order for the 5750 core in BCM4785 chip to work properly
5014 * in RGMII mode, the Led Control Register must be set up.
5016 if (tg3_flag(tp
, RGMII_MODE
)) {
5017 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5018 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5020 if (tp
->link_config
.active_speed
== SPEED_10
)
5021 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5022 else if (tp
->link_config
.active_speed
== SPEED_100
)
5023 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5024 LED_CTRL_100MBPS_ON
);
5025 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5026 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5027 LED_CTRL_1000MBPS_ON
);
5029 tw32(MAC_LED_CTRL
, led_ctrl
);
5033 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5034 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5035 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5037 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5038 if (current_link_up
&&
5039 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5040 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5042 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5045 /* ??? Without this setting Netgear GA302T PHY does not
5046 * ??? send/receive packets...
5048 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5049 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5050 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5051 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5055 tw32_f(MAC_MODE
, tp
->mac_mode
);
5058 tg3_phy_eee_adjust(tp
, current_link_up
);
5060 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5061 /* Polled via timer. */
5062 tw32_f(MAC_EVENT
, 0);
5064 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5068 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5070 tp
->link_config
.active_speed
== SPEED_1000
&&
5071 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5074 (MAC_STATUS_SYNC_CHANGED
|
5075 MAC_STATUS_CFG_CHANGED
));
5078 NIC_SRAM_FIRMWARE_MBOX
,
5079 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5082 /* Prevent send BD corruption. */
5083 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5084 if (tp
->link_config
.active_speed
== SPEED_100
||
5085 tp
->link_config
.active_speed
== SPEED_10
)
5086 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5087 PCI_EXP_LNKCTL_CLKREQ_EN
);
5089 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5090 PCI_EXP_LNKCTL_CLKREQ_EN
);
5093 tg3_test_and_report_link_chg(tp
, current_link_up
);
5098 struct tg3_fiber_aneginfo
{
5100 #define ANEG_STATE_UNKNOWN 0
5101 #define ANEG_STATE_AN_ENABLE 1
5102 #define ANEG_STATE_RESTART_INIT 2
5103 #define ANEG_STATE_RESTART 3
5104 #define ANEG_STATE_DISABLE_LINK_OK 4
5105 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5106 #define ANEG_STATE_ABILITY_DETECT 6
5107 #define ANEG_STATE_ACK_DETECT_INIT 7
5108 #define ANEG_STATE_ACK_DETECT 8
5109 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5110 #define ANEG_STATE_COMPLETE_ACK 10
5111 #define ANEG_STATE_IDLE_DETECT_INIT 11
5112 #define ANEG_STATE_IDLE_DETECT 12
5113 #define ANEG_STATE_LINK_OK 13
5114 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5115 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5118 #define MR_AN_ENABLE 0x00000001
5119 #define MR_RESTART_AN 0x00000002
5120 #define MR_AN_COMPLETE 0x00000004
5121 #define MR_PAGE_RX 0x00000008
5122 #define MR_NP_LOADED 0x00000010
5123 #define MR_TOGGLE_TX 0x00000020
5124 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5125 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5126 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5127 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5128 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5129 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5130 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5131 #define MR_TOGGLE_RX 0x00002000
5132 #define MR_NP_RX 0x00004000
5134 #define MR_LINK_OK 0x80000000
5136 unsigned long link_time
, cur_time
;
5138 u32 ability_match_cfg
;
5139 int ability_match_count
;
5141 char ability_match
, idle_match
, ack_match
;
5143 u32 txconfig
, rxconfig
;
5144 #define ANEG_CFG_NP 0x00000080
5145 #define ANEG_CFG_ACK 0x00000040
5146 #define ANEG_CFG_RF2 0x00000020
5147 #define ANEG_CFG_RF1 0x00000010
5148 #define ANEG_CFG_PS2 0x00000001
5149 #define ANEG_CFG_PS1 0x00008000
5150 #define ANEG_CFG_HD 0x00004000
5151 #define ANEG_CFG_FD 0x00002000
5152 #define ANEG_CFG_INVAL 0x00001f06
5157 #define ANEG_TIMER_ENAB 2
5158 #define ANEG_FAILED -1
5160 #define ANEG_STATE_SETTLE_TIME 10000
5162 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5163 struct tg3_fiber_aneginfo
*ap
)
5166 unsigned long delta
;
5170 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5174 ap
->ability_match_cfg
= 0;
5175 ap
->ability_match_count
= 0;
5176 ap
->ability_match
= 0;
5182 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5183 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5185 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5186 ap
->ability_match_cfg
= rx_cfg_reg
;
5187 ap
->ability_match
= 0;
5188 ap
->ability_match_count
= 0;
5190 if (++ap
->ability_match_count
> 1) {
5191 ap
->ability_match
= 1;
5192 ap
->ability_match_cfg
= rx_cfg_reg
;
5195 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5203 ap
->ability_match_cfg
= 0;
5204 ap
->ability_match_count
= 0;
5205 ap
->ability_match
= 0;
5211 ap
->rxconfig
= rx_cfg_reg
;
5214 switch (ap
->state
) {
5215 case ANEG_STATE_UNKNOWN
:
5216 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5217 ap
->state
= ANEG_STATE_AN_ENABLE
;
5220 case ANEG_STATE_AN_ENABLE
:
5221 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5222 if (ap
->flags
& MR_AN_ENABLE
) {
5225 ap
->ability_match_cfg
= 0;
5226 ap
->ability_match_count
= 0;
5227 ap
->ability_match
= 0;
5231 ap
->state
= ANEG_STATE_RESTART_INIT
;
5233 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5237 case ANEG_STATE_RESTART_INIT
:
5238 ap
->link_time
= ap
->cur_time
;
5239 ap
->flags
&= ~(MR_NP_LOADED
);
5241 tw32(MAC_TX_AUTO_NEG
, 0);
5242 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5243 tw32_f(MAC_MODE
, tp
->mac_mode
);
5246 ret
= ANEG_TIMER_ENAB
;
5247 ap
->state
= ANEG_STATE_RESTART
;
5250 case ANEG_STATE_RESTART
:
5251 delta
= ap
->cur_time
- ap
->link_time
;
5252 if (delta
> ANEG_STATE_SETTLE_TIME
)
5253 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5255 ret
= ANEG_TIMER_ENAB
;
5258 case ANEG_STATE_DISABLE_LINK_OK
:
5262 case ANEG_STATE_ABILITY_DETECT_INIT
:
5263 ap
->flags
&= ~(MR_TOGGLE_TX
);
5264 ap
->txconfig
= ANEG_CFG_FD
;
5265 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5266 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5267 ap
->txconfig
|= ANEG_CFG_PS1
;
5268 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5269 ap
->txconfig
|= ANEG_CFG_PS2
;
5270 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5271 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5272 tw32_f(MAC_MODE
, tp
->mac_mode
);
5275 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5278 case ANEG_STATE_ABILITY_DETECT
:
5279 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5280 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5283 case ANEG_STATE_ACK_DETECT_INIT
:
5284 ap
->txconfig
|= ANEG_CFG_ACK
;
5285 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5286 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5287 tw32_f(MAC_MODE
, tp
->mac_mode
);
5290 ap
->state
= ANEG_STATE_ACK_DETECT
;
5293 case ANEG_STATE_ACK_DETECT
:
5294 if (ap
->ack_match
!= 0) {
5295 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5296 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5297 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5299 ap
->state
= ANEG_STATE_AN_ENABLE
;
5301 } else if (ap
->ability_match
!= 0 &&
5302 ap
->rxconfig
== 0) {
5303 ap
->state
= ANEG_STATE_AN_ENABLE
;
5307 case ANEG_STATE_COMPLETE_ACK_INIT
:
5308 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5312 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5313 MR_LP_ADV_HALF_DUPLEX
|
5314 MR_LP_ADV_SYM_PAUSE
|
5315 MR_LP_ADV_ASYM_PAUSE
|
5316 MR_LP_ADV_REMOTE_FAULT1
|
5317 MR_LP_ADV_REMOTE_FAULT2
|
5318 MR_LP_ADV_NEXT_PAGE
|
5321 if (ap
->rxconfig
& ANEG_CFG_FD
)
5322 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5323 if (ap
->rxconfig
& ANEG_CFG_HD
)
5324 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5325 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5326 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5327 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5328 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5329 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5330 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5331 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5332 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5333 if (ap
->rxconfig
& ANEG_CFG_NP
)
5334 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5336 ap
->link_time
= ap
->cur_time
;
5338 ap
->flags
^= (MR_TOGGLE_TX
);
5339 if (ap
->rxconfig
& 0x0008)
5340 ap
->flags
|= MR_TOGGLE_RX
;
5341 if (ap
->rxconfig
& ANEG_CFG_NP
)
5342 ap
->flags
|= MR_NP_RX
;
5343 ap
->flags
|= MR_PAGE_RX
;
5345 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5346 ret
= ANEG_TIMER_ENAB
;
5349 case ANEG_STATE_COMPLETE_ACK
:
5350 if (ap
->ability_match
!= 0 &&
5351 ap
->rxconfig
== 0) {
5352 ap
->state
= ANEG_STATE_AN_ENABLE
;
5355 delta
= ap
->cur_time
- ap
->link_time
;
5356 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5357 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5358 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5360 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5361 !(ap
->flags
& MR_NP_RX
)) {
5362 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5370 case ANEG_STATE_IDLE_DETECT_INIT
:
5371 ap
->link_time
= ap
->cur_time
;
5372 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5373 tw32_f(MAC_MODE
, tp
->mac_mode
);
5376 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5377 ret
= ANEG_TIMER_ENAB
;
5380 case ANEG_STATE_IDLE_DETECT
:
5381 if (ap
->ability_match
!= 0 &&
5382 ap
->rxconfig
== 0) {
5383 ap
->state
= ANEG_STATE_AN_ENABLE
;
5386 delta
= ap
->cur_time
- ap
->link_time
;
5387 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5388 /* XXX another gem from the Broadcom driver :( */
5389 ap
->state
= ANEG_STATE_LINK_OK
;
5393 case ANEG_STATE_LINK_OK
:
5394 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5398 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5399 /* ??? unimplemented */
5402 case ANEG_STATE_NEXT_PAGE_WAIT
:
5403 /* ??? unimplemented */
5414 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5417 struct tg3_fiber_aneginfo aninfo
;
5418 int status
= ANEG_FAILED
;
5422 tw32_f(MAC_TX_AUTO_NEG
, 0);
5424 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5425 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5428 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5431 memset(&aninfo
, 0, sizeof(aninfo
));
5432 aninfo
.flags
|= MR_AN_ENABLE
;
5433 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5434 aninfo
.cur_time
= 0;
5436 while (++tick
< 195000) {
5437 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5438 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5444 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5445 tw32_f(MAC_MODE
, tp
->mac_mode
);
5448 *txflags
= aninfo
.txconfig
;
5449 *rxflags
= aninfo
.flags
;
5451 if (status
== ANEG_DONE
&&
5452 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5453 MR_LP_ADV_FULL_DUPLEX
)))
5459 static void tg3_init_bcm8002(struct tg3
*tp
)
5461 u32 mac_status
= tr32(MAC_STATUS
);
5464 /* Reset when initting first time or we have a link. */
5465 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5466 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5469 /* Set PLL lock range. */
5470 tg3_writephy(tp
, 0x16, 0x8007);
5473 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5475 /* Wait for reset to complete. */
5476 /* XXX schedule_timeout() ... */
5477 for (i
= 0; i
< 500; i
++)
5480 /* Config mode; select PMA/Ch 1 regs. */
5481 tg3_writephy(tp
, 0x10, 0x8411);
5483 /* Enable auto-lock and comdet, select txclk for tx. */
5484 tg3_writephy(tp
, 0x11, 0x0a10);
5486 tg3_writephy(tp
, 0x18, 0x00a0);
5487 tg3_writephy(tp
, 0x16, 0x41ff);
5489 /* Assert and deassert POR. */
5490 tg3_writephy(tp
, 0x13, 0x0400);
5492 tg3_writephy(tp
, 0x13, 0x0000);
5494 tg3_writephy(tp
, 0x11, 0x0a50);
5496 tg3_writephy(tp
, 0x11, 0x0a10);
5498 /* Wait for signal to stabilize */
5499 /* XXX schedule_timeout() ... */
5500 for (i
= 0; i
< 15000; i
++)
5503 /* Deselect the channel register so we can read the PHYID
5506 tg3_writephy(tp
, 0x10, 0x8011);
5509 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5512 bool current_link_up
;
5513 u32 sg_dig_ctrl
, sg_dig_status
;
5514 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5515 int workaround
, port_a
;
5518 expected_sg_dig_ctrl
= 0;
5521 current_link_up
= false;
5523 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5524 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5526 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5529 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5530 /* preserve bits 20-23 for voltage regulator */
5531 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5534 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5536 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5537 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5539 u32 val
= serdes_cfg
;
5545 tw32_f(MAC_SERDES_CFG
, val
);
5548 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5550 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5551 tg3_setup_flow_control(tp
, 0, 0);
5552 current_link_up
= true;
5557 /* Want auto-negotiation. */
5558 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5560 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5561 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5562 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5563 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5564 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5566 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5567 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5568 tp
->serdes_counter
&&
5569 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5570 MAC_STATUS_RCVD_CFG
)) ==
5571 MAC_STATUS_PCS_SYNCED
)) {
5572 tp
->serdes_counter
--;
5573 current_link_up
= true;
5578 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5579 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5581 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5583 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5584 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5585 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5586 MAC_STATUS_SIGNAL_DET
)) {
5587 sg_dig_status
= tr32(SG_DIG_STATUS
);
5588 mac_status
= tr32(MAC_STATUS
);
5590 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5591 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5592 u32 local_adv
= 0, remote_adv
= 0;
5594 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5595 local_adv
|= ADVERTISE_1000XPAUSE
;
5596 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5597 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5599 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5600 remote_adv
|= LPA_1000XPAUSE
;
5601 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5602 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5604 tp
->link_config
.rmt_adv
=
5605 mii_adv_to_ethtool_adv_x(remote_adv
);
5607 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5608 current_link_up
= true;
5609 tp
->serdes_counter
= 0;
5610 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5611 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5612 if (tp
->serdes_counter
)
5613 tp
->serdes_counter
--;
5616 u32 val
= serdes_cfg
;
5623 tw32_f(MAC_SERDES_CFG
, val
);
5626 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5629 /* Link parallel detection - link is up */
5630 /* only if we have PCS_SYNC and not */
5631 /* receiving config code words */
5632 mac_status
= tr32(MAC_STATUS
);
5633 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5634 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5635 tg3_setup_flow_control(tp
, 0, 0);
5636 current_link_up
= true;
5638 TG3_PHYFLG_PARALLEL_DETECT
;
5639 tp
->serdes_counter
=
5640 SERDES_PARALLEL_DET_TIMEOUT
;
5642 goto restart_autoneg
;
5646 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5647 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5651 return current_link_up
;
5654 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5656 bool current_link_up
= false;
5658 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5661 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5662 u32 txflags
, rxflags
;
5665 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5666 u32 local_adv
= 0, remote_adv
= 0;
5668 if (txflags
& ANEG_CFG_PS1
)
5669 local_adv
|= ADVERTISE_1000XPAUSE
;
5670 if (txflags
& ANEG_CFG_PS2
)
5671 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5673 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5674 remote_adv
|= LPA_1000XPAUSE
;
5675 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5676 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5678 tp
->link_config
.rmt_adv
=
5679 mii_adv_to_ethtool_adv_x(remote_adv
);
5681 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5683 current_link_up
= true;
5685 for (i
= 0; i
< 30; i
++) {
5688 (MAC_STATUS_SYNC_CHANGED
|
5689 MAC_STATUS_CFG_CHANGED
));
5691 if ((tr32(MAC_STATUS
) &
5692 (MAC_STATUS_SYNC_CHANGED
|
5693 MAC_STATUS_CFG_CHANGED
)) == 0)
5697 mac_status
= tr32(MAC_STATUS
);
5698 if (!current_link_up
&&
5699 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5700 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5701 current_link_up
= true;
5703 tg3_setup_flow_control(tp
, 0, 0);
5705 /* Forcing 1000FD link up. */
5706 current_link_up
= true;
5708 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5711 tw32_f(MAC_MODE
, tp
->mac_mode
);
5716 return current_link_up
;
5719 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5722 u16 orig_active_speed
;
5723 u8 orig_active_duplex
;
5725 bool current_link_up
;
5728 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5729 orig_active_speed
= tp
->link_config
.active_speed
;
5730 orig_active_duplex
= tp
->link_config
.active_duplex
;
5732 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5734 tg3_flag(tp
, INIT_COMPLETE
)) {
5735 mac_status
= tr32(MAC_STATUS
);
5736 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5737 MAC_STATUS_SIGNAL_DET
|
5738 MAC_STATUS_CFG_CHANGED
|
5739 MAC_STATUS_RCVD_CFG
);
5740 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5741 MAC_STATUS_SIGNAL_DET
)) {
5742 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5743 MAC_STATUS_CFG_CHANGED
));
5748 tw32_f(MAC_TX_AUTO_NEG
, 0);
5750 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5751 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5752 tw32_f(MAC_MODE
, tp
->mac_mode
);
5755 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5756 tg3_init_bcm8002(tp
);
5758 /* Enable link change event even when serdes polling. */
5759 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5762 current_link_up
= false;
5763 tp
->link_config
.rmt_adv
= 0;
5764 mac_status
= tr32(MAC_STATUS
);
5766 if (tg3_flag(tp
, HW_AUTONEG
))
5767 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5769 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5771 tp
->napi
[0].hw_status
->status
=
5772 (SD_STATUS_UPDATED
|
5773 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5775 for (i
= 0; i
< 100; i
++) {
5776 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5777 MAC_STATUS_CFG_CHANGED
));
5779 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5780 MAC_STATUS_CFG_CHANGED
|
5781 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5785 mac_status
= tr32(MAC_STATUS
);
5786 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5787 current_link_up
= false;
5788 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5789 tp
->serdes_counter
== 0) {
5790 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5791 MAC_MODE_SEND_CONFIGS
));
5793 tw32_f(MAC_MODE
, tp
->mac_mode
);
5797 if (current_link_up
) {
5798 tp
->link_config
.active_speed
= SPEED_1000
;
5799 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5800 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5801 LED_CTRL_LNKLED_OVERRIDE
|
5802 LED_CTRL_1000MBPS_ON
));
5804 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5805 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5806 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5807 LED_CTRL_LNKLED_OVERRIDE
|
5808 LED_CTRL_TRAFFIC_OVERRIDE
));
5811 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5812 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5813 if (orig_pause_cfg
!= now_pause_cfg
||
5814 orig_active_speed
!= tp
->link_config
.active_speed
||
5815 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5816 tg3_link_report(tp
);
5822 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5826 u16 current_speed
= SPEED_UNKNOWN
;
5827 u8 current_duplex
= DUPLEX_UNKNOWN
;
5828 bool current_link_up
= false;
5829 u32 local_adv
, remote_adv
, sgsr
;
5831 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5832 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5833 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5834 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5839 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5841 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5842 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5844 current_link_up
= true;
5845 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5846 current_speed
= SPEED_1000
;
5847 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5848 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5849 current_speed
= SPEED_100
;
5850 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5852 current_speed
= SPEED_10
;
5853 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5856 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5857 current_duplex
= DUPLEX_FULL
;
5859 current_duplex
= DUPLEX_HALF
;
5862 tw32_f(MAC_MODE
, tp
->mac_mode
);
5865 tg3_clear_mac_status(tp
);
5867 goto fiber_setup_done
;
5870 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5871 tw32_f(MAC_MODE
, tp
->mac_mode
);
5874 tg3_clear_mac_status(tp
);
5879 tp
->link_config
.rmt_adv
= 0;
5881 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5882 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5883 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5884 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5885 bmsr
|= BMSR_LSTATUS
;
5887 bmsr
&= ~BMSR_LSTATUS
;
5890 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5892 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5893 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5894 /* do nothing, just check for link up at the end */
5895 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5898 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5899 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5900 ADVERTISE_1000XPAUSE
|
5901 ADVERTISE_1000XPSE_ASYM
|
5904 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5905 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5907 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5908 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5909 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5910 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5912 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5913 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5914 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5921 bmcr
&= ~BMCR_SPEED1000
;
5922 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5924 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5925 new_bmcr
|= BMCR_FULLDPLX
;
5927 if (new_bmcr
!= bmcr
) {
5928 /* BMCR_SPEED1000 is a reserved bit that needs
5929 * to be set on write.
5931 new_bmcr
|= BMCR_SPEED1000
;
5933 /* Force a linkdown */
5937 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5938 adv
&= ~(ADVERTISE_1000XFULL
|
5939 ADVERTISE_1000XHALF
|
5941 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5942 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5946 tg3_carrier_off(tp
);
5948 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5950 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5951 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5952 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5953 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5954 bmsr
|= BMSR_LSTATUS
;
5956 bmsr
&= ~BMSR_LSTATUS
;
5958 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5962 if (bmsr
& BMSR_LSTATUS
) {
5963 current_speed
= SPEED_1000
;
5964 current_link_up
= true;
5965 if (bmcr
& BMCR_FULLDPLX
)
5966 current_duplex
= DUPLEX_FULL
;
5968 current_duplex
= DUPLEX_HALF
;
5973 if (bmcr
& BMCR_ANENABLE
) {
5976 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5977 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5978 common
= local_adv
& remote_adv
;
5979 if (common
& (ADVERTISE_1000XHALF
|
5980 ADVERTISE_1000XFULL
)) {
5981 if (common
& ADVERTISE_1000XFULL
)
5982 current_duplex
= DUPLEX_FULL
;
5984 current_duplex
= DUPLEX_HALF
;
5986 tp
->link_config
.rmt_adv
=
5987 mii_adv_to_ethtool_adv_x(remote_adv
);
5988 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5989 /* Link is up via parallel detect */
5991 current_link_up
= false;
5997 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5998 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
6000 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6001 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6002 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
6004 tw32_f(MAC_MODE
, tp
->mac_mode
);
6007 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
6009 tp
->link_config
.active_speed
= current_speed
;
6010 tp
->link_config
.active_duplex
= current_duplex
;
6012 tg3_test_and_report_link_chg(tp
, current_link_up
);
6016 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6018 if (tp
->serdes_counter
) {
6019 /* Give autoneg time to complete. */
6020 tp
->serdes_counter
--;
6025 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6028 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6029 if (bmcr
& BMCR_ANENABLE
) {
6032 /* Select shadow register 0x1f */
6033 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6034 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6036 /* Select expansion interrupt status register */
6037 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6038 MII_TG3_DSP_EXP1_INT_STAT
);
6039 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6040 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6042 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6043 /* We have signal detect and not receiving
6044 * config code words, link is up by parallel
6048 bmcr
&= ~BMCR_ANENABLE
;
6049 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6050 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6051 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6054 } else if (tp
->link_up
&&
6055 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6056 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6059 /* Select expansion interrupt status register */
6060 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6061 MII_TG3_DSP_EXP1_INT_STAT
);
6062 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6066 /* Config code words received, turn on autoneg. */
6067 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6068 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6070 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6076 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6081 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6082 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6083 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6084 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6086 err
= tg3_setup_copper_phy(tp
, force_reset
);
6088 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6091 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6092 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6094 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6099 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6100 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6101 tw32(GRC_MISC_CFG
, val
);
6104 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6105 (6 << TX_LENGTHS_IPG_SHIFT
);
6106 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6107 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6108 val
|= tr32(MAC_TX_LENGTHS
) &
6109 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6110 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6112 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6113 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6114 tw32(MAC_TX_LENGTHS
, val
|
6115 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6117 tw32(MAC_TX_LENGTHS
, val
|
6118 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6120 if (!tg3_flag(tp
, 5705_PLUS
)) {
6122 tw32(HOSTCC_STAT_COAL_TICKS
,
6123 tp
->coal
.stats_block_coalesce_usecs
);
6125 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6129 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6130 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6132 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6135 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6136 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6142 /* tp->lock must be held */
6143 static u64
tg3_refclk_read(struct tg3
*tp
, struct ptp_system_timestamp
*sts
)
6147 ptp_read_system_prets(sts
);
6148 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6149 ptp_read_system_postts(sts
);
6150 stamp
|= (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6155 /* tp->lock must be held */
6156 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6158 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6160 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6161 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6162 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6163 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6166 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6167 static inline void tg3_full_unlock(struct tg3
*tp
);
6168 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6170 struct tg3
*tp
= netdev_priv(dev
);
6172 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6173 SOF_TIMESTAMPING_RX_SOFTWARE
|
6174 SOF_TIMESTAMPING_SOFTWARE
;
6176 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6177 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6178 SOF_TIMESTAMPING_RX_HARDWARE
|
6179 SOF_TIMESTAMPING_RAW_HARDWARE
;
6183 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6185 info
->phc_index
= -1;
6187 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6189 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6190 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6191 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6192 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6196 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6198 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6199 bool neg_adj
= false;
6207 /* Frequency adjustment is performed using hardware with a 24 bit
6208 * accumulator and a programmable correction value. On each clk, the
6209 * correction value gets added to the accumulator and when it
6210 * overflows, the time counter is incremented/decremented.
6212 * So conversion from ppb to correction value is
6213 * ppb * (1 << 24) / 1000000000
6215 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6216 TG3_EAV_REF_CLK_CORRECT_MASK
;
6218 tg3_full_lock(tp
, 0);
6221 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6222 TG3_EAV_REF_CLK_CORRECT_EN
|
6223 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6225 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6227 tg3_full_unlock(tp
);
6232 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6234 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6236 tg3_full_lock(tp
, 0);
6237 tp
->ptp_adjust
+= delta
;
6238 tg3_full_unlock(tp
);
6243 static int tg3_ptp_gettimex(struct ptp_clock_info
*ptp
, struct timespec64
*ts
,
6244 struct ptp_system_timestamp
*sts
)
6247 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6249 tg3_full_lock(tp
, 0);
6250 ns
= tg3_refclk_read(tp
, sts
);
6251 ns
+= tp
->ptp_adjust
;
6252 tg3_full_unlock(tp
);
6254 *ts
= ns_to_timespec64(ns
);
6259 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6260 const struct timespec64
*ts
)
6263 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6265 ns
= timespec64_to_ns(ts
);
6267 tg3_full_lock(tp
, 0);
6268 tg3_refclk_write(tp
, ns
);
6270 tg3_full_unlock(tp
);
6275 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6276 struct ptp_clock_request
*rq
, int on
)
6278 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6283 case PTP_CLK_REQ_PEROUT
:
6284 if (rq
->perout
.index
!= 0)
6287 tg3_full_lock(tp
, 0);
6288 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6289 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6294 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6295 rq
->perout
.start
.nsec
;
6297 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6298 netdev_warn(tp
->dev
,
6299 "Device supports only a one-shot timesync output, period must be 0\n");
6304 if (nsec
& (1ULL << 63)) {
6305 netdev_warn(tp
->dev
,
6306 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6311 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6312 tw32(TG3_EAV_WATCHDOG0_MSB
,
6313 TG3_EAV_WATCHDOG0_EN
|
6314 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6316 tw32(TG3_EAV_REF_CLCK_CTL
,
6317 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6319 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6320 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6324 tg3_full_unlock(tp
);
6334 static const struct ptp_clock_info tg3_ptp_caps
= {
6335 .owner
= THIS_MODULE
,
6336 .name
= "tg3 clock",
6337 .max_adj
= 250000000,
6343 .adjfreq
= tg3_ptp_adjfreq
,
6344 .adjtime
= tg3_ptp_adjtime
,
6345 .gettimex64
= tg3_ptp_gettimex
,
6346 .settime64
= tg3_ptp_settime
,
6347 .enable
= tg3_ptp_enable
,
6350 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6351 struct skb_shared_hwtstamps
*timestamp
)
6353 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6354 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6358 /* tp->lock must be held */
6359 static void tg3_ptp_init(struct tg3
*tp
)
6361 if (!tg3_flag(tp
, PTP_CAPABLE
))
6364 /* Initialize the hardware clock to the system time. */
6365 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6367 tp
->ptp_info
= tg3_ptp_caps
;
6370 /* tp->lock must be held */
6371 static void tg3_ptp_resume(struct tg3
*tp
)
6373 if (!tg3_flag(tp
, PTP_CAPABLE
))
6376 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6380 static void tg3_ptp_fini(struct tg3
*tp
)
6382 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6385 ptp_clock_unregister(tp
->ptp_clock
);
6386 tp
->ptp_clock
= NULL
;
6390 static inline int tg3_irq_sync(struct tg3
*tp
)
6392 return tp
->irq_sync
;
6395 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6399 dst
= (u32
*)((u8
*)dst
+ off
);
6400 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6401 *dst
++ = tr32(off
+ i
);
6404 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6406 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6407 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6408 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6409 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6410 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6411 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6412 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6413 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6414 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6415 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6416 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6417 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6418 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6419 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6420 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6421 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6422 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6423 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6424 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6426 if (tg3_flag(tp
, SUPPORT_MSIX
))
6427 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6429 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6430 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6431 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6432 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6433 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6434 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6435 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6436 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6438 if (!tg3_flag(tp
, 5705_PLUS
)) {
6439 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6440 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6441 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6444 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6445 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6446 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6447 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6448 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6450 if (tg3_flag(tp
, NVRAM
))
6451 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6454 static void tg3_dump_state(struct tg3
*tp
)
6459 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6463 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6464 /* Read up to but not including private PCI registers */
6465 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6466 regs
[i
/ sizeof(u32
)] = tr32(i
);
6468 tg3_dump_legacy_regs(tp
, regs
);
6470 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6471 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6472 !regs
[i
+ 2] && !regs
[i
+ 3])
6475 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6477 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6482 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6483 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6485 /* SW status block */
6487 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6489 tnapi
->hw_status
->status
,
6490 tnapi
->hw_status
->status_tag
,
6491 tnapi
->hw_status
->rx_jumbo_consumer
,
6492 tnapi
->hw_status
->rx_consumer
,
6493 tnapi
->hw_status
->rx_mini_consumer
,
6494 tnapi
->hw_status
->idx
[0].rx_producer
,
6495 tnapi
->hw_status
->idx
[0].tx_consumer
);
6498 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6500 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6501 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6503 tnapi
->prodring
.rx_std_prod_idx
,
6504 tnapi
->prodring
.rx_std_cons_idx
,
6505 tnapi
->prodring
.rx_jmb_prod_idx
,
6506 tnapi
->prodring
.rx_jmb_cons_idx
);
6510 /* This is called whenever we suspect that the system chipset is re-
6511 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6512 * is bogus tx completions. We try to recover by setting the
6513 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6516 static void tg3_tx_recover(struct tg3
*tp
)
6518 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6519 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6521 netdev_warn(tp
->dev
,
6522 "The system may be re-ordering memory-mapped I/O "
6523 "cycles to the network device, attempting to recover. "
6524 "Please report the problem to the driver maintainer "
6525 "and include system chipset information.\n");
6527 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6530 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6532 /* Tell compiler to fetch tx indices from memory. */
6534 return tnapi
->tx_pending
-
6535 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6538 /* Tigon3 never reports partial packet sends. So we do not
6539 * need special logic to handle SKBs that have not had all
6540 * of their frags sent yet, like SunGEM does.
6542 static void tg3_tx(struct tg3_napi
*tnapi
)
6544 struct tg3
*tp
= tnapi
->tp
;
6545 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6546 u32 sw_idx
= tnapi
->tx_cons
;
6547 struct netdev_queue
*txq
;
6548 int index
= tnapi
- tp
->napi
;
6549 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6551 if (tg3_flag(tp
, ENABLE_TSS
))
6554 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6556 while (sw_idx
!= hw_idx
) {
6557 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6558 struct sk_buff
*skb
= ri
->skb
;
6561 if (unlikely(skb
== NULL
)) {
6566 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6567 struct skb_shared_hwtstamps timestamp
;
6568 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6569 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6571 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6573 skb_tstamp_tx(skb
, ×tamp
);
6576 pci_unmap_single(tp
->pdev
,
6577 dma_unmap_addr(ri
, mapping
),
6583 while (ri
->fragmented
) {
6584 ri
->fragmented
= false;
6585 sw_idx
= NEXT_TX(sw_idx
);
6586 ri
= &tnapi
->tx_buffers
[sw_idx
];
6589 sw_idx
= NEXT_TX(sw_idx
);
6591 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6592 ri
= &tnapi
->tx_buffers
[sw_idx
];
6593 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6596 pci_unmap_page(tp
->pdev
,
6597 dma_unmap_addr(ri
, mapping
),
6598 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6601 while (ri
->fragmented
) {
6602 ri
->fragmented
= false;
6603 sw_idx
= NEXT_TX(sw_idx
);
6604 ri
= &tnapi
->tx_buffers
[sw_idx
];
6607 sw_idx
= NEXT_TX(sw_idx
);
6611 bytes_compl
+= skb
->len
;
6613 dev_consume_skb_any(skb
);
6615 if (unlikely(tx_bug
)) {
6621 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6623 tnapi
->tx_cons
= sw_idx
;
6625 /* Need to make the tx_cons update visible to tg3_start_xmit()
6626 * before checking for netif_queue_stopped(). Without the
6627 * memory barrier, there is a small possibility that tg3_start_xmit()
6628 * will miss it and cause the queue to be stopped forever.
6632 if (unlikely(netif_tx_queue_stopped(txq
) &&
6633 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6634 __netif_tx_lock(txq
, smp_processor_id());
6635 if (netif_tx_queue_stopped(txq
) &&
6636 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6637 netif_tx_wake_queue(txq
);
6638 __netif_tx_unlock(txq
);
6642 static void tg3_frag_free(bool is_frag
, void *data
)
6645 skb_free_frag(data
);
6650 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6652 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6658 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6659 map_sz
, PCI_DMA_FROMDEVICE
);
6660 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6665 /* Returns size of skb allocated or < 0 on error.
6667 * We only need to fill in the address because the other members
6668 * of the RX descriptor are invariant, see tg3_init_rings.
6670 * Note the purposeful assymetry of cpu vs. chip accesses. For
6671 * posting buffers we only dirty the first cache line of the RX
6672 * descriptor (containing the address). Whereas for the RX status
6673 * buffers the cpu only reads the last cacheline of the RX descriptor
6674 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6676 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6677 u32 opaque_key
, u32 dest_idx_unmasked
,
6678 unsigned int *frag_size
)
6680 struct tg3_rx_buffer_desc
*desc
;
6681 struct ring_info
*map
;
6684 int skb_size
, data_size
, dest_idx
;
6686 switch (opaque_key
) {
6687 case RXD_OPAQUE_RING_STD
:
6688 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6689 desc
= &tpr
->rx_std
[dest_idx
];
6690 map
= &tpr
->rx_std_buffers
[dest_idx
];
6691 data_size
= tp
->rx_pkt_map_sz
;
6694 case RXD_OPAQUE_RING_JUMBO
:
6695 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6696 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6697 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6698 data_size
= TG3_RX_JMB_MAP_SZ
;
6705 /* Do not overwrite any of the map or rp information
6706 * until we are sure we can commit to a new buffer.
6708 * Callers depend upon this behavior and assume that
6709 * we leave everything unchanged if we fail.
6711 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6712 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6713 if (skb_size
<= PAGE_SIZE
) {
6714 data
= netdev_alloc_frag(skb_size
);
6715 *frag_size
= skb_size
;
6717 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6723 mapping
= pci_map_single(tp
->pdev
,
6724 data
+ TG3_RX_OFFSET(tp
),
6726 PCI_DMA_FROMDEVICE
);
6727 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6728 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6733 dma_unmap_addr_set(map
, mapping
, mapping
);
6735 desc
->addr_hi
= ((u64
)mapping
>> 32);
6736 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6741 /* We only need to move over in the address because the other
6742 * members of the RX descriptor are invariant. See notes above
6743 * tg3_alloc_rx_data for full details.
6745 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6746 struct tg3_rx_prodring_set
*dpr
,
6747 u32 opaque_key
, int src_idx
,
6748 u32 dest_idx_unmasked
)
6750 struct tg3
*tp
= tnapi
->tp
;
6751 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6752 struct ring_info
*src_map
, *dest_map
;
6753 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6756 switch (opaque_key
) {
6757 case RXD_OPAQUE_RING_STD
:
6758 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6759 dest_desc
= &dpr
->rx_std
[dest_idx
];
6760 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6761 src_desc
= &spr
->rx_std
[src_idx
];
6762 src_map
= &spr
->rx_std_buffers
[src_idx
];
6765 case RXD_OPAQUE_RING_JUMBO
:
6766 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6767 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6768 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6769 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6770 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6777 dest_map
->data
= src_map
->data
;
6778 dma_unmap_addr_set(dest_map
, mapping
,
6779 dma_unmap_addr(src_map
, mapping
));
6780 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6781 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6783 /* Ensure that the update to the skb happens after the physical
6784 * addresses have been transferred to the new BD location.
6788 src_map
->data
= NULL
;
6791 /* The RX ring scheme is composed of multiple rings which post fresh
6792 * buffers to the chip, and one special ring the chip uses to report
6793 * status back to the host.
6795 * The special ring reports the status of received packets to the
6796 * host. The chip does not write into the original descriptor the
6797 * RX buffer was obtained from. The chip simply takes the original
6798 * descriptor as provided by the host, updates the status and length
6799 * field, then writes this into the next status ring entry.
6801 * Each ring the host uses to post buffers to the chip is described
6802 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6803 * it is first placed into the on-chip ram. When the packet's length
6804 * is known, it walks down the TG3_BDINFO entries to select the ring.
6805 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6806 * which is within the range of the new packet's length is chosen.
6808 * The "separate ring for rx status" scheme may sound queer, but it makes
6809 * sense from a cache coherency perspective. If only the host writes
6810 * to the buffer post rings, and only the chip writes to the rx status
6811 * rings, then cache lines never move beyond shared-modified state.
6812 * If both the host and chip were to write into the same ring, cache line
6813 * eviction could occur since both entities want it in an exclusive state.
6815 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6817 struct tg3
*tp
= tnapi
->tp
;
6818 u32 work_mask
, rx_std_posted
= 0;
6819 u32 std_prod_idx
, jmb_prod_idx
;
6820 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6823 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6825 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6827 * We need to order the read of hw_idx and the read of
6828 * the opaque cookie.
6833 std_prod_idx
= tpr
->rx_std_prod_idx
;
6834 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6835 while (sw_idx
!= hw_idx
&& budget
> 0) {
6836 struct ring_info
*ri
;
6837 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6839 struct sk_buff
*skb
;
6840 dma_addr_t dma_addr
;
6841 u32 opaque_key
, desc_idx
, *post_ptr
;
6845 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6846 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6847 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6848 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6849 dma_addr
= dma_unmap_addr(ri
, mapping
);
6851 post_ptr
= &std_prod_idx
;
6853 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6854 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6855 dma_addr
= dma_unmap_addr(ri
, mapping
);
6857 post_ptr
= &jmb_prod_idx
;
6859 goto next_pkt_nopost
;
6861 work_mask
|= opaque_key
;
6863 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6865 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6866 desc_idx
, *post_ptr
);
6868 /* Other statistics kept track of by card. */
6873 prefetch(data
+ TG3_RX_OFFSET(tp
));
6874 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6877 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6878 RXD_FLAG_PTPSTAT_PTPV1
||
6879 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6880 RXD_FLAG_PTPSTAT_PTPV2
) {
6881 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6882 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6885 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6887 unsigned int frag_size
;
6889 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6890 *post_ptr
, &frag_size
);
6894 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6895 PCI_DMA_FROMDEVICE
);
6897 /* Ensure that the update to the data happens
6898 * after the usage of the old DMA mapping.
6904 skb
= build_skb(data
, frag_size
);
6906 tg3_frag_free(frag_size
!= 0, data
);
6907 goto drop_it_no_recycle
;
6909 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6911 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6912 desc_idx
, *post_ptr
);
6914 skb
= netdev_alloc_skb(tp
->dev
,
6915 len
+ TG3_RAW_IP_ALIGN
);
6917 goto drop_it_no_recycle
;
6919 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6920 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6922 data
+ TG3_RX_OFFSET(tp
),
6924 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6929 tg3_hwclock_to_timestamp(tp
, tstamp
,
6930 skb_hwtstamps(skb
));
6932 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6933 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6934 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6935 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6936 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6938 skb_checksum_none_assert(skb
);
6940 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6942 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6943 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6944 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6945 dev_kfree_skb_any(skb
);
6946 goto drop_it_no_recycle
;
6949 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6950 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6951 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6952 desc
->err_vlan
& RXD_VLAN_MASK
);
6954 napi_gro_receive(&tnapi
->napi
, skb
);
6962 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6963 tpr
->rx_std_prod_idx
= std_prod_idx
&
6964 tp
->rx_std_ring_mask
;
6965 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6966 tpr
->rx_std_prod_idx
);
6967 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6972 sw_idx
&= tp
->rx_ret_ring_mask
;
6974 /* Refresh hw_idx to see if there is new work */
6975 if (sw_idx
== hw_idx
) {
6976 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6981 /* ACK the status ring. */
6982 tnapi
->rx_rcb_ptr
= sw_idx
;
6983 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6985 /* Refill RX ring(s). */
6986 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6987 /* Sync BD data before updating mailbox */
6990 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6991 tpr
->rx_std_prod_idx
= std_prod_idx
&
6992 tp
->rx_std_ring_mask
;
6993 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6994 tpr
->rx_std_prod_idx
);
6996 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6997 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6998 tp
->rx_jmb_ring_mask
;
6999 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7000 tpr
->rx_jmb_prod_idx
);
7003 } else if (work_mask
) {
7004 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7005 * updated before the producer indices can be updated.
7009 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
7010 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
7012 if (tnapi
!= &tp
->napi
[1]) {
7013 tp
->rx_refill
= true;
7014 napi_schedule(&tp
->napi
[1].napi
);
7021 static void tg3_poll_link(struct tg3
*tp
)
7023 /* handle link change and other phy events */
7024 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7025 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7027 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7028 sblk
->status
= SD_STATUS_UPDATED
|
7029 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7030 spin_lock(&tp
->lock
);
7031 if (tg3_flag(tp
, USE_PHYLIB
)) {
7033 (MAC_STATUS_SYNC_CHANGED
|
7034 MAC_STATUS_CFG_CHANGED
|
7035 MAC_STATUS_MI_COMPLETION
|
7036 MAC_STATUS_LNKSTATE_CHANGED
));
7039 tg3_setup_phy(tp
, false);
7040 spin_unlock(&tp
->lock
);
7045 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7046 struct tg3_rx_prodring_set
*dpr
,
7047 struct tg3_rx_prodring_set
*spr
)
7049 u32 si
, di
, cpycnt
, src_prod_idx
;
7053 src_prod_idx
= spr
->rx_std_prod_idx
;
7055 /* Make sure updates to the rx_std_buffers[] entries and the
7056 * standard producer index are seen in the correct order.
7060 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7063 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7064 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7066 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7067 spr
->rx_std_cons_idx
;
7069 cpycnt
= min(cpycnt
,
7070 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7072 si
= spr
->rx_std_cons_idx
;
7073 di
= dpr
->rx_std_prod_idx
;
7075 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7076 if (dpr
->rx_std_buffers
[i
].data
) {
7086 /* Ensure that updates to the rx_std_buffers ring and the
7087 * shadowed hardware producer ring from tg3_recycle_skb() are
7088 * ordered correctly WRT the skb check above.
7092 memcpy(&dpr
->rx_std_buffers
[di
],
7093 &spr
->rx_std_buffers
[si
],
7094 cpycnt
* sizeof(struct ring_info
));
7096 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7097 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7098 sbd
= &spr
->rx_std
[si
];
7099 dbd
= &dpr
->rx_std
[di
];
7100 dbd
->addr_hi
= sbd
->addr_hi
;
7101 dbd
->addr_lo
= sbd
->addr_lo
;
7104 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7105 tp
->rx_std_ring_mask
;
7106 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7107 tp
->rx_std_ring_mask
;
7111 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7113 /* Make sure updates to the rx_jmb_buffers[] entries and
7114 * the jumbo producer index are seen in the correct order.
7118 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7121 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7122 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7124 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7125 spr
->rx_jmb_cons_idx
;
7127 cpycnt
= min(cpycnt
,
7128 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7130 si
= spr
->rx_jmb_cons_idx
;
7131 di
= dpr
->rx_jmb_prod_idx
;
7133 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7134 if (dpr
->rx_jmb_buffers
[i
].data
) {
7144 /* Ensure that updates to the rx_jmb_buffers ring and the
7145 * shadowed hardware producer ring from tg3_recycle_skb() are
7146 * ordered correctly WRT the skb check above.
7150 memcpy(&dpr
->rx_jmb_buffers
[di
],
7151 &spr
->rx_jmb_buffers
[si
],
7152 cpycnt
* sizeof(struct ring_info
));
7154 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7155 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7156 sbd
= &spr
->rx_jmb
[si
].std
;
7157 dbd
= &dpr
->rx_jmb
[di
].std
;
7158 dbd
->addr_hi
= sbd
->addr_hi
;
7159 dbd
->addr_lo
= sbd
->addr_lo
;
7162 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7163 tp
->rx_jmb_ring_mask
;
7164 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7165 tp
->rx_jmb_ring_mask
;
7171 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7173 struct tg3
*tp
= tnapi
->tp
;
7175 /* run TX completion thread */
7176 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7178 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7182 if (!tnapi
->rx_rcb_prod_idx
)
7185 /* run RX thread, within the bounds set by NAPI.
7186 * All RX "locking" is done by ensuring outside
7187 * code synchronizes with tg3->napi.poll()
7189 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7190 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7192 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7193 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7195 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7196 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7198 tp
->rx_refill
= false;
7199 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7200 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7201 &tp
->napi
[i
].prodring
);
7205 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7206 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7207 dpr
->rx_std_prod_idx
);
7209 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7210 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7211 dpr
->rx_jmb_prod_idx
);
7216 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7222 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7224 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7225 schedule_work(&tp
->reset_task
);
7228 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7230 cancel_work_sync(&tp
->reset_task
);
7231 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7232 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7235 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7237 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7238 struct tg3
*tp
= tnapi
->tp
;
7240 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7243 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7245 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7248 if (unlikely(work_done
>= budget
))
7251 /* tp->last_tag is used in tg3_int_reenable() below
7252 * to tell the hw how much work has been processed,
7253 * so we must read it before checking for more work.
7255 tnapi
->last_tag
= sblk
->status_tag
;
7256 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7259 /* check for RX/TX work to do */
7260 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7261 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7263 /* This test here is not race free, but will reduce
7264 * the number of interrupts by looping again.
7266 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7269 napi_complete_done(napi
, work_done
);
7270 /* Reenable interrupts. */
7271 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7273 /* This test here is synchronized by napi_schedule()
7274 * and napi_complete() to close the race condition.
7276 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7277 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7278 HOSTCC_MODE_ENABLE
|
7286 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7290 /* work_done is guaranteed to be less than budget. */
7291 napi_complete(napi
);
7292 tg3_reset_task_schedule(tp
);
7296 static void tg3_process_error(struct tg3
*tp
)
7299 bool real_error
= false;
7301 if (tg3_flag(tp
, ERROR_PROCESSED
))
7304 /* Check Flow Attention register */
7305 val
= tr32(HOSTCC_FLOW_ATTN
);
7306 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7307 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7311 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7312 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7316 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7317 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7326 tg3_flag_set(tp
, ERROR_PROCESSED
);
7327 tg3_reset_task_schedule(tp
);
7330 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7332 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7333 struct tg3
*tp
= tnapi
->tp
;
7335 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7338 if (sblk
->status
& SD_STATUS_ERROR
)
7339 tg3_process_error(tp
);
7343 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7345 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7348 if (unlikely(work_done
>= budget
))
7351 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7352 /* tp->last_tag is used in tg3_int_reenable() below
7353 * to tell the hw how much work has been processed,
7354 * so we must read it before checking for more work.
7356 tnapi
->last_tag
= sblk
->status_tag
;
7357 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7360 sblk
->status
&= ~SD_STATUS_UPDATED
;
7362 if (likely(!tg3_has_work(tnapi
))) {
7363 napi_complete_done(napi
, work_done
);
7364 tg3_int_reenable(tnapi
);
7369 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7373 /* work_done is guaranteed to be less than budget. */
7374 napi_complete(napi
);
7375 tg3_reset_task_schedule(tp
);
7379 static void tg3_napi_disable(struct tg3
*tp
)
7383 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7384 napi_disable(&tp
->napi
[i
].napi
);
7387 static void tg3_napi_enable(struct tg3
*tp
)
7391 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7392 napi_enable(&tp
->napi
[i
].napi
);
7395 static void tg3_napi_init(struct tg3
*tp
)
7399 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7400 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7401 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7404 static void tg3_napi_fini(struct tg3
*tp
)
7408 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7409 netif_napi_del(&tp
->napi
[i
].napi
);
7412 static inline void tg3_netif_stop(struct tg3
*tp
)
7414 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7415 tg3_napi_disable(tp
);
7416 netif_carrier_off(tp
->dev
);
7417 netif_tx_disable(tp
->dev
);
7420 /* tp->lock must be held */
7421 static inline void tg3_netif_start(struct tg3
*tp
)
7425 /* NOTE: unconditional netif_tx_wake_all_queues is only
7426 * appropriate so long as all callers are assured to
7427 * have free tx slots (such as after tg3_init_hw)
7429 netif_tx_wake_all_queues(tp
->dev
);
7432 netif_carrier_on(tp
->dev
);
7434 tg3_napi_enable(tp
);
7435 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7436 tg3_enable_ints(tp
);
7439 static void tg3_irq_quiesce(struct tg3
*tp
)
7440 __releases(tp
->lock
)
7441 __acquires(tp
->lock
)
7445 BUG_ON(tp
->irq_sync
);
7450 spin_unlock_bh(&tp
->lock
);
7452 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7453 synchronize_irq(tp
->napi
[i
].irq_vec
);
7455 spin_lock_bh(&tp
->lock
);
7458 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7459 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7460 * with as well. Most of the time, this is not necessary except when
7461 * shutting down the device.
7463 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7465 spin_lock_bh(&tp
->lock
);
7467 tg3_irq_quiesce(tp
);
7470 static inline void tg3_full_unlock(struct tg3
*tp
)
7472 spin_unlock_bh(&tp
->lock
);
7475 /* One-shot MSI handler - Chip automatically disables interrupt
7476 * after sending MSI so driver doesn't have to do it.
7478 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7480 struct tg3_napi
*tnapi
= dev_id
;
7481 struct tg3
*tp
= tnapi
->tp
;
7483 prefetch(tnapi
->hw_status
);
7485 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7487 if (likely(!tg3_irq_sync(tp
)))
7488 napi_schedule(&tnapi
->napi
);
7493 /* MSI ISR - No need to check for interrupt sharing and no need to
7494 * flush status block and interrupt mailbox. PCI ordering rules
7495 * guarantee that MSI will arrive after the status block.
7497 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7499 struct tg3_napi
*tnapi
= dev_id
;
7500 struct tg3
*tp
= tnapi
->tp
;
7502 prefetch(tnapi
->hw_status
);
7504 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7506 * Writing any value to intr-mbox-0 clears PCI INTA# and
7507 * chip-internal interrupt pending events.
7508 * Writing non-zero to intr-mbox-0 additional tells the
7509 * NIC to stop sending us irqs, engaging "in-intr-handler"
7512 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7513 if (likely(!tg3_irq_sync(tp
)))
7514 napi_schedule(&tnapi
->napi
);
7516 return IRQ_RETVAL(1);
7519 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7521 struct tg3_napi
*tnapi
= dev_id
;
7522 struct tg3
*tp
= tnapi
->tp
;
7523 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7524 unsigned int handled
= 1;
7526 /* In INTx mode, it is possible for the interrupt to arrive at
7527 * the CPU before the status block posted prior to the interrupt.
7528 * Reading the PCI State register will confirm whether the
7529 * interrupt is ours and will flush the status block.
7531 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7532 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7533 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7540 * Writing any value to intr-mbox-0 clears PCI INTA# and
7541 * chip-internal interrupt pending events.
7542 * Writing non-zero to intr-mbox-0 additional tells the
7543 * NIC to stop sending us irqs, engaging "in-intr-handler"
7546 * Flush the mailbox to de-assert the IRQ immediately to prevent
7547 * spurious interrupts. The flush impacts performance but
7548 * excessive spurious interrupts can be worse in some cases.
7550 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7551 if (tg3_irq_sync(tp
))
7553 sblk
->status
&= ~SD_STATUS_UPDATED
;
7554 if (likely(tg3_has_work(tnapi
))) {
7555 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7556 napi_schedule(&tnapi
->napi
);
7558 /* No work, shared interrupt perhaps? re-enable
7559 * interrupts, and flush that PCI write
7561 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7565 return IRQ_RETVAL(handled
);
7568 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7570 struct tg3_napi
*tnapi
= dev_id
;
7571 struct tg3
*tp
= tnapi
->tp
;
7572 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7573 unsigned int handled
= 1;
7575 /* In INTx mode, it is possible for the interrupt to arrive at
7576 * the CPU before the status block posted prior to the interrupt.
7577 * Reading the PCI State register will confirm whether the
7578 * interrupt is ours and will flush the status block.
7580 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7581 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7582 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7589 * writing any value to intr-mbox-0 clears PCI INTA# and
7590 * chip-internal interrupt pending events.
7591 * writing non-zero to intr-mbox-0 additional tells the
7592 * NIC to stop sending us irqs, engaging "in-intr-handler"
7595 * Flush the mailbox to de-assert the IRQ immediately to prevent
7596 * spurious interrupts. The flush impacts performance but
7597 * excessive spurious interrupts can be worse in some cases.
7599 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7602 * In a shared interrupt configuration, sometimes other devices'
7603 * interrupts will scream. We record the current status tag here
7604 * so that the above check can report that the screaming interrupts
7605 * are unhandled. Eventually they will be silenced.
7607 tnapi
->last_irq_tag
= sblk
->status_tag
;
7609 if (tg3_irq_sync(tp
))
7612 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7614 napi_schedule(&tnapi
->napi
);
7617 return IRQ_RETVAL(handled
);
7620 /* ISR for interrupt test */
7621 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7623 struct tg3_napi
*tnapi
= dev_id
;
7624 struct tg3
*tp
= tnapi
->tp
;
7625 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7627 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7628 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7629 tg3_disable_ints(tp
);
7630 return IRQ_RETVAL(1);
7632 return IRQ_RETVAL(0);
7635 #ifdef CONFIG_NET_POLL_CONTROLLER
7636 static void tg3_poll_controller(struct net_device
*dev
)
7639 struct tg3
*tp
= netdev_priv(dev
);
7641 if (tg3_irq_sync(tp
))
7644 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7645 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7649 static void tg3_tx_timeout(struct net_device
*dev
)
7651 struct tg3
*tp
= netdev_priv(dev
);
7653 if (netif_msg_tx_err(tp
)) {
7654 netdev_err(dev
, "transmit timed out, resetting\n");
7658 tg3_reset_task_schedule(tp
);
7661 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7662 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7664 u32 base
= (u32
) mapping
& 0xffffffff;
7666 return base
+ len
+ 8 < base
;
7669 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7670 * of any 4GB boundaries: 4G, 8G, etc
7672 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7675 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7676 u32 base
= (u32
) mapping
& 0xffffffff;
7678 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7683 /* Test for DMA addresses > 40-bit */
7684 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7687 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7688 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7689 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7696 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7697 dma_addr_t mapping
, u32 len
, u32 flags
,
7700 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7701 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7702 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7703 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7706 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7707 dma_addr_t map
, u32 len
, u32 flags
,
7710 struct tg3
*tp
= tnapi
->tp
;
7713 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7716 if (tg3_4g_overflow_test(map
, len
))
7719 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7722 if (tg3_40bit_overflow_test(tp
, map
, len
))
7725 if (tp
->dma_limit
) {
7726 u32 prvidx
= *entry
;
7727 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7728 while (len
> tp
->dma_limit
&& *budget
) {
7729 u32 frag_len
= tp
->dma_limit
;
7730 len
-= tp
->dma_limit
;
7732 /* Avoid the 8byte DMA problem */
7734 len
+= tp
->dma_limit
/ 2;
7735 frag_len
= tp
->dma_limit
/ 2;
7738 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7740 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7741 frag_len
, tmp_flag
, mss
, vlan
);
7744 *entry
= NEXT_TX(*entry
);
7751 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7752 len
, flags
, mss
, vlan
);
7754 *entry
= NEXT_TX(*entry
);
7757 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7761 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7762 len
, flags
, mss
, vlan
);
7763 *entry
= NEXT_TX(*entry
);
7769 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7772 struct sk_buff
*skb
;
7773 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7778 pci_unmap_single(tnapi
->tp
->pdev
,
7779 dma_unmap_addr(txb
, mapping
),
7783 while (txb
->fragmented
) {
7784 txb
->fragmented
= false;
7785 entry
= NEXT_TX(entry
);
7786 txb
= &tnapi
->tx_buffers
[entry
];
7789 for (i
= 0; i
<= last
; i
++) {
7790 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7792 entry
= NEXT_TX(entry
);
7793 txb
= &tnapi
->tx_buffers
[entry
];
7795 pci_unmap_page(tnapi
->tp
->pdev
,
7796 dma_unmap_addr(txb
, mapping
),
7797 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7799 while (txb
->fragmented
) {
7800 txb
->fragmented
= false;
7801 entry
= NEXT_TX(entry
);
7802 txb
= &tnapi
->tx_buffers
[entry
];
7807 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7808 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7809 struct sk_buff
**pskb
,
7810 u32
*entry
, u32
*budget
,
7811 u32 base_flags
, u32 mss
, u32 vlan
)
7813 struct tg3
*tp
= tnapi
->tp
;
7814 struct sk_buff
*new_skb
, *skb
= *pskb
;
7815 dma_addr_t new_addr
= 0;
7818 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7819 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7821 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7823 new_skb
= skb_copy_expand(skb
,
7824 skb_headroom(skb
) + more_headroom
,
7825 skb_tailroom(skb
), GFP_ATOMIC
);
7831 /* New SKB is guaranteed to be linear. */
7832 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7834 /* Make sure the mapping succeeded */
7835 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7836 dev_kfree_skb_any(new_skb
);
7839 u32 save_entry
= *entry
;
7841 base_flags
|= TXD_FLAG_END
;
7843 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7844 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7847 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7848 new_skb
->len
, base_flags
,
7850 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7851 dev_kfree_skb_any(new_skb
);
7857 dev_consume_skb_any(skb
);
7862 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7864 /* Check if we will never have enough descriptors,
7865 * as gso_segs can be more than current ring size
7867 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7870 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7872 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7873 * indicated in tg3_tx_frag_set()
7875 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7876 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7878 struct sk_buff
*segs
, *nskb
;
7879 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7881 /* Estimate the number of fragments in the worst case */
7882 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7883 netif_tx_stop_queue(txq
);
7885 /* netif_tx_stop_queue() must be done before checking
7886 * checking tx index in tg3_tx_avail() below, because in
7887 * tg3_tx(), we update tx index before checking for
7888 * netif_tx_queue_stopped().
7891 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7892 return NETDEV_TX_BUSY
;
7894 netif_tx_wake_queue(txq
);
7897 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7898 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7899 if (IS_ERR(segs
) || !segs
)
7900 goto tg3_tso_bug_end
;
7906 tg3_start_xmit(nskb
, tp
->dev
);
7910 dev_consume_skb_any(skb
);
7912 return NETDEV_TX_OK
;
7915 /* hard_start_xmit for all devices */
7916 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7918 struct tg3
*tp
= netdev_priv(dev
);
7919 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7921 int i
= -1, would_hit_hwbug
;
7923 struct tg3_napi
*tnapi
;
7924 struct netdev_queue
*txq
;
7926 struct iphdr
*iph
= NULL
;
7927 struct tcphdr
*tcph
= NULL
;
7928 __sum16 tcp_csum
= 0, ip_csum
= 0;
7929 __be16 ip_tot_len
= 0;
7931 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7932 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7933 if (tg3_flag(tp
, ENABLE_TSS
))
7936 budget
= tg3_tx_avail(tnapi
);
7938 /* We are running in BH disabled context with netif_tx_lock
7939 * and TX reclaim runs via tp->napi.poll inside of a software
7940 * interrupt. Furthermore, IRQ processing runs lockless so we have
7941 * no IRQ context deadlocks to worry about either. Rejoice!
7943 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7944 if (!netif_tx_queue_stopped(txq
)) {
7945 netif_tx_stop_queue(txq
);
7947 /* This is a hard error, log it. */
7949 "BUG! Tx Ring full when queue awake!\n");
7951 return NETDEV_TX_BUSY
;
7954 entry
= tnapi
->tx_prod
;
7957 mss
= skb_shinfo(skb
)->gso_size
;
7959 u32 tcp_opt_len
, hdr_len
;
7961 if (skb_cow_head(skb
, 0))
7965 tcp_opt_len
= tcp_optlen(skb
);
7967 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7969 /* HW/FW can not correctly segment packets that have been
7970 * vlan encapsulated.
7972 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7973 skb
->protocol
== htons(ETH_P_8021AD
)) {
7974 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7975 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7979 if (!skb_is_gso_v6(skb
)) {
7980 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7981 tg3_flag(tp
, TSO_BUG
)) {
7982 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7983 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7986 ip_csum
= iph
->check
;
7987 ip_tot_len
= iph
->tot_len
;
7989 iph
->tot_len
= htons(mss
+ hdr_len
);
7992 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7993 TXD_FLAG_CPU_POST_DMA
);
7995 tcph
= tcp_hdr(skb
);
7996 tcp_csum
= tcph
->check
;
7998 if (tg3_flag(tp
, HW_TSO_1
) ||
7999 tg3_flag(tp
, HW_TSO_2
) ||
8000 tg3_flag(tp
, HW_TSO_3
)) {
8002 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
8004 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
8008 if (tg3_flag(tp
, HW_TSO_3
)) {
8009 mss
|= (hdr_len
& 0xc) << 12;
8011 base_flags
|= 0x00000010;
8012 base_flags
|= (hdr_len
& 0x3e0) << 5;
8013 } else if (tg3_flag(tp
, HW_TSO_2
))
8014 mss
|= hdr_len
<< 9;
8015 else if (tg3_flag(tp
, HW_TSO_1
) ||
8016 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8017 if (tcp_opt_len
|| iph
->ihl
> 5) {
8020 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8021 mss
|= (tsflags
<< 11);
8024 if (tcp_opt_len
|| iph
->ihl
> 5) {
8027 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8028 base_flags
|= tsflags
<< 12;
8031 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8032 /* HW/FW can not correctly checksum packets that have been
8033 * vlan encapsulated.
8035 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8036 skb
->protocol
== htons(ETH_P_8021AD
)) {
8037 if (skb_checksum_help(skb
))
8040 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8044 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8045 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8046 base_flags
|= TXD_FLAG_JMB_PKT
;
8048 if (skb_vlan_tag_present(skb
)) {
8049 base_flags
|= TXD_FLAG_VLAN
;
8050 vlan
= skb_vlan_tag_get(skb
);
8053 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8054 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8055 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8056 base_flags
|= TXD_FLAG_HWTSTAMP
;
8059 len
= skb_headlen(skb
);
8061 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8062 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8066 tnapi
->tx_buffers
[entry
].skb
= skb
;
8067 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8069 would_hit_hwbug
= 0;
8071 if (tg3_flag(tp
, 5701_DMA_BUG
))
8072 would_hit_hwbug
= 1;
8074 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8075 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8077 would_hit_hwbug
= 1;
8078 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8081 if (!tg3_flag(tp
, HW_TSO_1
) &&
8082 !tg3_flag(tp
, HW_TSO_2
) &&
8083 !tg3_flag(tp
, HW_TSO_3
))
8086 /* Now loop through additional data
8087 * fragments, and queue them.
8089 last
= skb_shinfo(skb
)->nr_frags
- 1;
8090 for (i
= 0; i
<= last
; i
++) {
8091 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8093 len
= skb_frag_size(frag
);
8094 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8095 len
, DMA_TO_DEVICE
);
8097 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8098 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8100 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8104 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8106 ((i
== last
) ? TXD_FLAG_END
: 0),
8108 would_hit_hwbug
= 1;
8114 if (would_hit_hwbug
) {
8115 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8117 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8118 /* If it's a TSO packet, do GSO instead of
8119 * allocating and copying to a large linear SKB
8122 iph
->check
= ip_csum
;
8123 iph
->tot_len
= ip_tot_len
;
8125 tcph
->check
= tcp_csum
;
8126 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8129 /* If the workaround fails due to memory/mapping
8130 * failure, silently drop this packet.
8132 entry
= tnapi
->tx_prod
;
8133 budget
= tg3_tx_avail(tnapi
);
8134 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8135 base_flags
, mss
, vlan
))
8139 skb_tx_timestamp(skb
);
8140 netdev_tx_sent_queue(txq
, skb
->len
);
8142 /* Sync BD data before updating mailbox */
8145 tnapi
->tx_prod
= entry
;
8146 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8147 netif_tx_stop_queue(txq
);
8149 /* netif_tx_stop_queue() must be done before checking
8150 * checking tx index in tg3_tx_avail() below, because in
8151 * tg3_tx(), we update tx index before checking for
8152 * netif_tx_queue_stopped().
8155 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8156 netif_tx_wake_queue(txq
);
8159 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
)) {
8160 /* Packets are ready, update Tx producer idx on card. */
8161 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8165 return NETDEV_TX_OK
;
8168 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8169 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8171 dev_kfree_skb_any(skb
);
8174 return NETDEV_TX_OK
;
8177 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8180 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8181 MAC_MODE_PORT_MODE_MASK
);
8183 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8185 if (!tg3_flag(tp
, 5705_PLUS
))
8186 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8188 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8189 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8191 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8193 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8195 if (tg3_flag(tp
, 5705_PLUS
) ||
8196 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8197 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8198 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8201 tw32(MAC_MODE
, tp
->mac_mode
);
8205 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8207 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8209 tg3_phy_toggle_apd(tp
, false);
8210 tg3_phy_toggle_automdix(tp
, false);
8212 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8215 bmcr
= BMCR_FULLDPLX
;
8220 bmcr
|= BMCR_SPEED100
;
8224 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8226 bmcr
|= BMCR_SPEED100
;
8229 bmcr
|= BMCR_SPEED1000
;
8234 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8235 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8236 val
|= CTL1000_AS_MASTER
|
8237 CTL1000_ENABLE_MASTER
;
8238 tg3_writephy(tp
, MII_CTRL1000
, val
);
8240 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8241 MII_TG3_FET_PTEST_TRIM_2
;
8242 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8245 bmcr
|= BMCR_LOOPBACK
;
8247 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8249 /* The write needs to be flushed for the FETs */
8250 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8251 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8255 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8256 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8257 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8258 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8259 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8261 /* The write needs to be flushed for the AC131 */
8262 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8265 /* Reset to prevent losing 1st rx packet intermittently */
8266 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8267 tg3_flag(tp
, 5780_CLASS
)) {
8268 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8270 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8273 mac_mode
= tp
->mac_mode
&
8274 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8275 if (speed
== SPEED_1000
)
8276 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8278 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8280 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8281 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8283 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8284 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8285 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8286 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8288 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8289 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8292 tw32(MAC_MODE
, mac_mode
);
8298 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8300 struct tg3
*tp
= netdev_priv(dev
);
8302 if (features
& NETIF_F_LOOPBACK
) {
8303 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8306 spin_lock_bh(&tp
->lock
);
8307 tg3_mac_loopback(tp
, true);
8308 netif_carrier_on(tp
->dev
);
8309 spin_unlock_bh(&tp
->lock
);
8310 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8312 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8315 spin_lock_bh(&tp
->lock
);
8316 tg3_mac_loopback(tp
, false);
8317 /* Force link status check */
8318 tg3_setup_phy(tp
, true);
8319 spin_unlock_bh(&tp
->lock
);
8320 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8324 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8325 netdev_features_t features
)
8327 struct tg3
*tp
= netdev_priv(dev
);
8329 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8330 features
&= ~NETIF_F_ALL_TSO
;
8335 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8337 netdev_features_t changed
= dev
->features
^ features
;
8339 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8340 tg3_set_loopback(dev
, features
);
8345 static void tg3_rx_prodring_free(struct tg3
*tp
,
8346 struct tg3_rx_prodring_set
*tpr
)
8350 if (tpr
!= &tp
->napi
[0].prodring
) {
8351 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8352 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8353 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8356 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8357 for (i
= tpr
->rx_jmb_cons_idx
;
8358 i
!= tpr
->rx_jmb_prod_idx
;
8359 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8360 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8368 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8369 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8372 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8373 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8374 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8379 /* Initialize rx rings for packet processing.
8381 * The chip has been shut down and the driver detached from
8382 * the networking, so no interrupts or new tx packets will
8383 * end up in the driver. tp->{tx,}lock are held and thus
8386 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8387 struct tg3_rx_prodring_set
*tpr
)
8389 u32 i
, rx_pkt_dma_sz
;
8391 tpr
->rx_std_cons_idx
= 0;
8392 tpr
->rx_std_prod_idx
= 0;
8393 tpr
->rx_jmb_cons_idx
= 0;
8394 tpr
->rx_jmb_prod_idx
= 0;
8396 if (tpr
!= &tp
->napi
[0].prodring
) {
8397 memset(&tpr
->rx_std_buffers
[0], 0,
8398 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8399 if (tpr
->rx_jmb_buffers
)
8400 memset(&tpr
->rx_jmb_buffers
[0], 0,
8401 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8405 /* Zero out all descriptors. */
8406 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8408 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8409 if (tg3_flag(tp
, 5780_CLASS
) &&
8410 tp
->dev
->mtu
> ETH_DATA_LEN
)
8411 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8412 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8414 /* Initialize invariants of the rings, we only set this
8415 * stuff once. This works because the card does not
8416 * write into the rx buffer posting rings.
8418 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8419 struct tg3_rx_buffer_desc
*rxd
;
8421 rxd
= &tpr
->rx_std
[i
];
8422 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8423 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8424 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8425 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8428 /* Now allocate fresh SKBs for each rx ring. */
8429 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8430 unsigned int frag_size
;
8432 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8434 netdev_warn(tp
->dev
,
8435 "Using a smaller RX standard ring. Only "
8436 "%d out of %d buffers were allocated "
8437 "successfully\n", i
, tp
->rx_pending
);
8445 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8448 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8450 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8453 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8454 struct tg3_rx_buffer_desc
*rxd
;
8456 rxd
= &tpr
->rx_jmb
[i
].std
;
8457 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8458 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8460 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8461 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8464 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8465 unsigned int frag_size
;
8467 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8469 netdev_warn(tp
->dev
,
8470 "Using a smaller RX jumbo ring. Only %d "
8471 "out of %d buffers were allocated "
8472 "successfully\n", i
, tp
->rx_jumbo_pending
);
8475 tp
->rx_jumbo_pending
= i
;
8484 tg3_rx_prodring_free(tp
, tpr
);
8488 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8489 struct tg3_rx_prodring_set
*tpr
)
8491 kfree(tpr
->rx_std_buffers
);
8492 tpr
->rx_std_buffers
= NULL
;
8493 kfree(tpr
->rx_jmb_buffers
);
8494 tpr
->rx_jmb_buffers
= NULL
;
8496 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8497 tpr
->rx_std
, tpr
->rx_std_mapping
);
8501 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8502 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8507 static int tg3_rx_prodring_init(struct tg3
*tp
,
8508 struct tg3_rx_prodring_set
*tpr
)
8510 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8512 if (!tpr
->rx_std_buffers
)
8515 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8516 TG3_RX_STD_RING_BYTES(tp
),
8517 &tpr
->rx_std_mapping
,
8522 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8523 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8525 if (!tpr
->rx_jmb_buffers
)
8528 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8529 TG3_RX_JMB_RING_BYTES(tp
),
8530 &tpr
->rx_jmb_mapping
,
8539 tg3_rx_prodring_fini(tp
, tpr
);
8543 /* Free up pending packets in all rx/tx rings.
8545 * The chip has been shut down and the driver detached from
8546 * the networking, so no interrupts or new tx packets will
8547 * end up in the driver. tp->{tx,}lock is not held and we are not
8548 * in an interrupt context and thus may sleep.
8550 static void tg3_free_rings(struct tg3
*tp
)
8554 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8555 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8557 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8559 if (!tnapi
->tx_buffers
)
8562 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8563 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8568 tg3_tx_skb_unmap(tnapi
, i
,
8569 skb_shinfo(skb
)->nr_frags
- 1);
8571 dev_consume_skb_any(skb
);
8573 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8577 /* Initialize tx/rx rings for packet processing.
8579 * The chip has been shut down and the driver detached from
8580 * the networking, so no interrupts or new tx packets will
8581 * end up in the driver. tp->{tx,}lock are held and thus
8584 static int tg3_init_rings(struct tg3
*tp
)
8588 /* Free up all the SKBs. */
8591 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8592 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8594 tnapi
->last_tag
= 0;
8595 tnapi
->last_irq_tag
= 0;
8596 tnapi
->hw_status
->status
= 0;
8597 tnapi
->hw_status
->status_tag
= 0;
8598 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8603 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8605 tnapi
->rx_rcb_ptr
= 0;
8607 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8609 if (tnapi
->prodring
.rx_std
&&
8610 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8619 static void tg3_mem_tx_release(struct tg3
*tp
)
8623 for (i
= 0; i
< tp
->irq_max
; i
++) {
8624 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8626 if (tnapi
->tx_ring
) {
8627 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8628 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8629 tnapi
->tx_ring
= NULL
;
8632 kfree(tnapi
->tx_buffers
);
8633 tnapi
->tx_buffers
= NULL
;
8637 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8640 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8642 /* If multivector TSS is enabled, vector 0 does not handle
8643 * tx interrupts. Don't allocate any resources for it.
8645 if (tg3_flag(tp
, ENABLE_TSS
))
8648 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8649 tnapi
->tx_buffers
= kcalloc(TG3_TX_RING_SIZE
,
8650 sizeof(struct tg3_tx_ring_info
),
8652 if (!tnapi
->tx_buffers
)
8655 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8657 &tnapi
->tx_desc_mapping
,
8659 if (!tnapi
->tx_ring
)
8666 tg3_mem_tx_release(tp
);
8670 static void tg3_mem_rx_release(struct tg3
*tp
)
8674 for (i
= 0; i
< tp
->irq_max
; i
++) {
8675 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8677 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8682 dma_free_coherent(&tp
->pdev
->dev
,
8683 TG3_RX_RCB_RING_BYTES(tp
),
8685 tnapi
->rx_rcb_mapping
);
8686 tnapi
->rx_rcb
= NULL
;
8690 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8692 unsigned int i
, limit
;
8694 limit
= tp
->rxq_cnt
;
8696 /* If RSS is enabled, we need a (dummy) producer ring
8697 * set on vector zero. This is the true hw prodring.
8699 if (tg3_flag(tp
, ENABLE_RSS
))
8702 for (i
= 0; i
< limit
; i
++) {
8703 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8705 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8708 /* If multivector RSS is enabled, vector 0
8709 * does not handle rx or tx interrupts.
8710 * Don't allocate any resources for it.
8712 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8715 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8716 TG3_RX_RCB_RING_BYTES(tp
),
8717 &tnapi
->rx_rcb_mapping
,
8726 tg3_mem_rx_release(tp
);
8731 * Must not be invoked with interrupt sources disabled and
8732 * the hardware shutdown down.
8734 static void tg3_free_consistent(struct tg3
*tp
)
8738 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8739 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8741 if (tnapi
->hw_status
) {
8742 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8744 tnapi
->status_mapping
);
8745 tnapi
->hw_status
= NULL
;
8749 tg3_mem_rx_release(tp
);
8750 tg3_mem_tx_release(tp
);
8752 /* tp->hw_stats can be referenced safely:
8753 * 1. under rtnl_lock
8754 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8757 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8758 tp
->hw_stats
, tp
->stats_mapping
);
8759 tp
->hw_stats
= NULL
;
8764 * Must not be invoked with interrupt sources disabled and
8765 * the hardware shutdown down. Can sleep.
8767 static int tg3_alloc_consistent(struct tg3
*tp
)
8771 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8772 sizeof(struct tg3_hw_stats
),
8773 &tp
->stats_mapping
, GFP_KERNEL
);
8777 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8778 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8779 struct tg3_hw_status
*sblk
;
8781 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8783 &tnapi
->status_mapping
,
8785 if (!tnapi
->hw_status
)
8788 sblk
= tnapi
->hw_status
;
8790 if (tg3_flag(tp
, ENABLE_RSS
)) {
8791 u16
*prodptr
= NULL
;
8794 * When RSS is enabled, the status block format changes
8795 * slightly. The "rx_jumbo_consumer", "reserved",
8796 * and "rx_mini_consumer" members get mapped to the
8797 * other three rx return ring producer indexes.
8801 prodptr
= &sblk
->idx
[0].rx_producer
;
8804 prodptr
= &sblk
->rx_jumbo_consumer
;
8807 prodptr
= &sblk
->reserved
;
8810 prodptr
= &sblk
->rx_mini_consumer
;
8813 tnapi
->rx_rcb_prod_idx
= prodptr
;
8815 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8819 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8825 tg3_free_consistent(tp
);
8829 #define MAX_WAIT_CNT 1000
8831 /* To stop a block, clear the enable bit and poll till it
8832 * clears. tp->lock is held.
8834 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8839 if (tg3_flag(tp
, 5705_PLUS
)) {
8846 /* We can't enable/disable these bits of the
8847 * 5705/5750, just say success.
8860 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8861 if (pci_channel_offline(tp
->pdev
)) {
8862 dev_err(&tp
->pdev
->dev
,
8863 "tg3_stop_block device offline, "
8864 "ofs=%lx enable_bit=%x\n",
8871 if ((val
& enable_bit
) == 0)
8875 if (i
== MAX_WAIT_CNT
&& !silent
) {
8876 dev_err(&tp
->pdev
->dev
,
8877 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8885 /* tp->lock is held. */
8886 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8890 tg3_disable_ints(tp
);
8892 if (pci_channel_offline(tp
->pdev
)) {
8893 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8894 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8899 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8900 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8903 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8904 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8905 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8906 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8907 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8908 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8910 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8911 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8912 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8913 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8914 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8915 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8916 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8918 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8919 tw32_f(MAC_MODE
, tp
->mac_mode
);
8922 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8923 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8925 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8927 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8930 if (i
>= MAX_WAIT_CNT
) {
8931 dev_err(&tp
->pdev
->dev
,
8932 "%s timed out, TX_MODE_ENABLE will not clear "
8933 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8937 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8938 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8939 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8941 tw32(FTQ_RESET
, 0xffffffff);
8942 tw32(FTQ_RESET
, 0x00000000);
8944 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8945 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8948 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8949 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8950 if (tnapi
->hw_status
)
8951 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8957 /* Save PCI command register before chip reset */
8958 static void tg3_save_pci_state(struct tg3
*tp
)
8960 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8963 /* Restore PCI state after chip reset */
8964 static void tg3_restore_pci_state(struct tg3
*tp
)
8968 /* Re-enable indirect register accesses. */
8969 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8970 tp
->misc_host_ctrl
);
8972 /* Set MAX PCI retry to zero. */
8973 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8974 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8975 tg3_flag(tp
, PCIX_MODE
))
8976 val
|= PCISTATE_RETRY_SAME_DMA
;
8977 /* Allow reads and writes to the APE register and memory space. */
8978 if (tg3_flag(tp
, ENABLE_APE
))
8979 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8980 PCISTATE_ALLOW_APE_SHMEM_WR
|
8981 PCISTATE_ALLOW_APE_PSPACE_WR
;
8982 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8984 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8986 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8987 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8988 tp
->pci_cacheline_sz
);
8989 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8993 /* Make sure PCI-X relaxed ordering bit is clear. */
8994 if (tg3_flag(tp
, PCIX_MODE
)) {
8997 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8999 pcix_cmd
&= ~PCI_X_CMD_ERO
;
9000 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9004 if (tg3_flag(tp
, 5780_CLASS
)) {
9006 /* Chip reset on 5780 will reset MSI enable bit,
9007 * so need to restore it.
9009 if (tg3_flag(tp
, USING_MSI
)) {
9012 pci_read_config_word(tp
->pdev
,
9013 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9015 pci_write_config_word(tp
->pdev
,
9016 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9017 ctrl
| PCI_MSI_FLAGS_ENABLE
);
9018 val
= tr32(MSGINT_MODE
);
9019 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
9024 static void tg3_override_clk(struct tg3
*tp
)
9028 switch (tg3_asic_rev(tp
)) {
9030 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9031 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9032 TG3_CPMU_MAC_ORIDE_ENABLE
);
9037 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9045 static void tg3_restore_clk(struct tg3
*tp
)
9049 switch (tg3_asic_rev(tp
)) {
9051 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9052 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9053 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9058 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9059 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9067 /* tp->lock is held. */
9068 static int tg3_chip_reset(struct tg3
*tp
)
9069 __releases(tp
->lock
)
9070 __acquires(tp
->lock
)
9073 void (*write_op
)(struct tg3
*, u32
, u32
);
9076 if (!pci_device_is_present(tp
->pdev
))
9081 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9083 /* No matching tg3_nvram_unlock() after this because
9084 * chip reset below will undo the nvram lock.
9086 tp
->nvram_lock_cnt
= 0;
9088 /* GRC_MISC_CFG core clock reset will clear the memory
9089 * enable bit in PCI register 4 and the MSI enable bit
9090 * on some chips, so we save relevant registers here.
9092 tg3_save_pci_state(tp
);
9094 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9095 tg3_flag(tp
, 5755_PLUS
))
9096 tw32(GRC_FASTBOOT_PC
, 0);
9099 * We must avoid the readl() that normally takes place.
9100 * It locks machines, causes machine checks, and other
9101 * fun things. So, temporarily disable the 5701
9102 * hardware workaround, while we do the reset.
9104 write_op
= tp
->write32
;
9105 if (write_op
== tg3_write_flush_reg32
)
9106 tp
->write32
= tg3_write32
;
9108 /* Prevent the irq handler from reading or writing PCI registers
9109 * during chip reset when the memory enable bit in the PCI command
9110 * register may be cleared. The chip does not generate interrupt
9111 * at this time, but the irq handler may still be called due to irq
9112 * sharing or irqpoll.
9114 tg3_flag_set(tp
, CHIP_RESETTING
);
9115 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9116 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9117 if (tnapi
->hw_status
) {
9118 tnapi
->hw_status
->status
= 0;
9119 tnapi
->hw_status
->status_tag
= 0;
9121 tnapi
->last_tag
= 0;
9122 tnapi
->last_irq_tag
= 0;
9126 tg3_full_unlock(tp
);
9128 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9129 synchronize_irq(tp
->napi
[i
].irq_vec
);
9131 tg3_full_lock(tp
, 0);
9133 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9134 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9135 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9139 val
= GRC_MISC_CFG_CORECLK_RESET
;
9141 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9142 /* Force PCIe 1.0a mode */
9143 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9144 !tg3_flag(tp
, 57765_PLUS
) &&
9145 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9146 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9147 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9149 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9150 tw32(GRC_MISC_CFG
, (1 << 29));
9155 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9156 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9157 tw32(GRC_VCPU_EXT_CTRL
,
9158 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9161 /* Set the clock to the highest frequency to avoid timeouts. With link
9162 * aware mode, the clock speed could be slow and bootcode does not
9163 * complete within the expected time. Override the clock to allow the
9164 * bootcode to finish sooner and then restore it.
9166 tg3_override_clk(tp
);
9168 /* Manage gphy power for all CPMU absent PCIe devices. */
9169 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9170 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9172 tw32(GRC_MISC_CFG
, val
);
9174 /* restore 5701 hardware bug workaround write method */
9175 tp
->write32
= write_op
;
9177 /* Unfortunately, we have to delay before the PCI read back.
9178 * Some 575X chips even will not respond to a PCI cfg access
9179 * when the reset command is given to the chip.
9181 * How do these hardware designers expect things to work
9182 * properly if the PCI write is posted for a long period
9183 * of time? It is always necessary to have some method by
9184 * which a register read back can occur to push the write
9185 * out which does the reset.
9187 * For most tg3 variants the trick below was working.
9192 /* Flush PCI posted writes. The normal MMIO registers
9193 * are inaccessible at this time so this is the only
9194 * way to make this reliably (actually, this is no longer
9195 * the case, see above). I tried to use indirect
9196 * register read/write but this upset some 5701 variants.
9198 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9202 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9205 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9209 /* Wait for link training to complete. */
9210 for (j
= 0; j
< 5000; j
++)
9213 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9214 pci_write_config_dword(tp
->pdev
, 0xc4,
9215 cfg_val
| (1 << 15));
9218 /* Clear the "no snoop" and "relaxed ordering" bits. */
9219 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9221 * Older PCIe devices only support the 128 byte
9222 * MPS setting. Enforce the restriction.
9224 if (!tg3_flag(tp
, CPMU_PRESENT
))
9225 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9226 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9228 /* Clear error status */
9229 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9230 PCI_EXP_DEVSTA_CED
|
9231 PCI_EXP_DEVSTA_NFED
|
9232 PCI_EXP_DEVSTA_FED
|
9233 PCI_EXP_DEVSTA_URD
);
9236 tg3_restore_pci_state(tp
);
9238 tg3_flag_clear(tp
, CHIP_RESETTING
);
9239 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9242 if (tg3_flag(tp
, 5780_CLASS
))
9243 val
= tr32(MEMARB_MODE
);
9244 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9246 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9248 tw32(0x5000, 0x400);
9251 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9253 * BCM4785: In order to avoid repercussions from using
9254 * potentially defective internal ROM, stop the Rx RISC CPU,
9255 * which is not required.
9258 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9261 err
= tg3_poll_fw(tp
);
9265 tw32(GRC_MODE
, tp
->grc_mode
);
9267 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9270 tw32(0xc4, val
| (1 << 15));
9273 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9274 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9275 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9276 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9277 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9278 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9281 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9282 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9284 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9285 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9290 tw32_f(MAC_MODE
, val
);
9293 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9297 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9298 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9299 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9300 !tg3_flag(tp
, 57765_PLUS
)) {
9303 tw32(0x7c00, val
| (1 << 25));
9306 tg3_restore_clk(tp
);
9308 /* Increase the core clock speed to fix tx timeout issue for 5762
9309 * with 100Mbps link speed.
9311 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9312 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9313 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9314 TG3_CPMU_MAC_ORIDE_ENABLE
);
9317 /* Reprobe ASF enable state. */
9318 tg3_flag_clear(tp
, ENABLE_ASF
);
9319 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9320 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9322 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9323 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9324 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9327 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9328 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9329 tg3_flag_set(tp
, ENABLE_ASF
);
9330 tp
->last_event_jiffies
= jiffies
;
9331 if (tg3_flag(tp
, 5750_PLUS
))
9332 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9334 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9335 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9336 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9337 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9338 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9345 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9346 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9347 static void __tg3_set_rx_mode(struct net_device
*);
9349 /* tp->lock is held. */
9350 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9356 tg3_write_sig_pre_reset(tp
, kind
);
9358 tg3_abort_hw(tp
, silent
);
9359 err
= tg3_chip_reset(tp
);
9361 __tg3_set_mac_addr(tp
, false);
9363 tg3_write_sig_legacy(tp
, kind
);
9364 tg3_write_sig_post_reset(tp
, kind
);
9367 /* Save the stats across chip resets... */
9368 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9369 tg3_get_estats(tp
, &tp
->estats_prev
);
9371 /* And make sure the next sample is new data */
9372 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9378 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9380 struct tg3
*tp
= netdev_priv(dev
);
9381 struct sockaddr
*addr
= p
;
9383 bool skip_mac_1
= false;
9385 if (!is_valid_ether_addr(addr
->sa_data
))
9386 return -EADDRNOTAVAIL
;
9388 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9390 if (!netif_running(dev
))
9393 if (tg3_flag(tp
, ENABLE_ASF
)) {
9394 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9396 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9397 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9398 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9399 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9401 /* Skip MAC addr 1 if ASF is using it. */
9402 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9403 !(addr1_high
== 0 && addr1_low
== 0))
9406 spin_lock_bh(&tp
->lock
);
9407 __tg3_set_mac_addr(tp
, skip_mac_1
);
9408 __tg3_set_rx_mode(dev
);
9409 spin_unlock_bh(&tp
->lock
);
9414 /* tp->lock is held. */
9415 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9416 dma_addr_t mapping
, u32 maxlen_flags
,
9420 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9421 ((u64
) mapping
>> 32));
9423 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9424 ((u64
) mapping
& 0xffffffff));
9426 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9429 if (!tg3_flag(tp
, 5705_PLUS
))
9431 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9436 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9440 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9441 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9442 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9443 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9445 tw32(HOSTCC_TXCOL_TICKS
, 0);
9446 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9447 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9449 for (; i
< tp
->txq_cnt
; i
++) {
9452 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9453 tw32(reg
, ec
->tx_coalesce_usecs
);
9454 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9455 tw32(reg
, ec
->tx_max_coalesced_frames
);
9456 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9457 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9461 for (; i
< tp
->irq_max
- 1; i
++) {
9462 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9463 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9464 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9468 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9471 u32 limit
= tp
->rxq_cnt
;
9473 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9474 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9475 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9476 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9479 tw32(HOSTCC_RXCOL_TICKS
, 0);
9480 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9481 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9484 for (; i
< limit
; i
++) {
9487 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9488 tw32(reg
, ec
->rx_coalesce_usecs
);
9489 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9490 tw32(reg
, ec
->rx_max_coalesced_frames
);
9491 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9492 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9495 for (; i
< tp
->irq_max
- 1; i
++) {
9496 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9497 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9498 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9502 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9504 tg3_coal_tx_init(tp
, ec
);
9505 tg3_coal_rx_init(tp
, ec
);
9507 if (!tg3_flag(tp
, 5705_PLUS
)) {
9508 u32 val
= ec
->stats_block_coalesce_usecs
;
9510 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9511 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9516 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9520 /* tp->lock is held. */
9521 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9525 /* Disable all transmit rings but the first. */
9526 if (!tg3_flag(tp
, 5705_PLUS
))
9527 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9528 else if (tg3_flag(tp
, 5717_PLUS
))
9529 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9530 else if (tg3_flag(tp
, 57765_CLASS
) ||
9531 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9532 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9534 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9536 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9537 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9538 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9539 BDINFO_FLAGS_DISABLED
);
9542 /* tp->lock is held. */
9543 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9546 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9548 if (tg3_flag(tp
, ENABLE_TSS
))
9551 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9552 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9554 if (!tnapi
->tx_ring
)
9557 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9558 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9559 NIC_SRAM_TX_BUFFER_DESC
);
9563 /* tp->lock is held. */
9564 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9568 /* Disable all receive return rings but the first. */
9569 if (tg3_flag(tp
, 5717_PLUS
))
9570 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9571 else if (!tg3_flag(tp
, 5705_PLUS
))
9572 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9573 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9574 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9575 tg3_flag(tp
, 57765_CLASS
))
9576 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9578 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9580 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9581 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9582 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9583 BDINFO_FLAGS_DISABLED
);
9586 /* tp->lock is held. */
9587 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9590 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9592 if (tg3_flag(tp
, ENABLE_RSS
))
9595 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9596 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9601 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9602 (tp
->rx_ret_ring_mask
+ 1) <<
9603 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9607 /* tp->lock is held. */
9608 static void tg3_rings_reset(struct tg3
*tp
)
9612 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9614 tg3_tx_rcbs_disable(tp
);
9616 tg3_rx_ret_rcbs_disable(tp
);
9618 /* Disable interrupts */
9619 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9620 tp
->napi
[0].chk_msi_cnt
= 0;
9621 tp
->napi
[0].last_rx_cons
= 0;
9622 tp
->napi
[0].last_tx_cons
= 0;
9624 /* Zero mailbox registers. */
9625 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9626 for (i
= 1; i
< tp
->irq_max
; i
++) {
9627 tp
->napi
[i
].tx_prod
= 0;
9628 tp
->napi
[i
].tx_cons
= 0;
9629 if (tg3_flag(tp
, ENABLE_TSS
))
9630 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9631 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9632 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9633 tp
->napi
[i
].chk_msi_cnt
= 0;
9634 tp
->napi
[i
].last_rx_cons
= 0;
9635 tp
->napi
[i
].last_tx_cons
= 0;
9637 if (!tg3_flag(tp
, ENABLE_TSS
))
9638 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9640 tp
->napi
[0].tx_prod
= 0;
9641 tp
->napi
[0].tx_cons
= 0;
9642 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9643 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9646 /* Make sure the NIC-based send BD rings are disabled. */
9647 if (!tg3_flag(tp
, 5705_PLUS
)) {
9648 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9649 for (i
= 0; i
< 16; i
++)
9650 tw32_tx_mbox(mbox
+ i
* 8, 0);
9653 /* Clear status block in ram. */
9654 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9656 /* Set status block DMA address */
9657 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9658 ((u64
) tnapi
->status_mapping
>> 32));
9659 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9660 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9662 stblk
= HOSTCC_STATBLCK_RING1
;
9664 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9665 u64 mapping
= (u64
)tnapi
->status_mapping
;
9666 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9667 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9670 /* Clear status block in ram. */
9671 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9674 tg3_tx_rcbs_init(tp
);
9675 tg3_rx_ret_rcbs_init(tp
);
9678 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9680 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9682 if (!tg3_flag(tp
, 5750_PLUS
) ||
9683 tg3_flag(tp
, 5780_CLASS
) ||
9684 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9685 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9686 tg3_flag(tp
, 57765_PLUS
))
9687 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9688 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9689 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9690 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9692 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9694 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9695 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9697 val
= min(nic_rep_thresh
, host_rep_thresh
);
9698 tw32(RCVBDI_STD_THRESH
, val
);
9700 if (tg3_flag(tp
, 57765_PLUS
))
9701 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9703 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9706 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9708 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9710 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9711 tw32(RCVBDI_JUMBO_THRESH
, val
);
9713 if (tg3_flag(tp
, 57765_PLUS
))
9714 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9717 static inline u32
calc_crc(unsigned char *buf
, int len
)
9725 for (j
= 0; j
< len
; j
++) {
9728 for (k
= 0; k
< 8; k
++) {
9734 reg
^= CRC32_POLY_LE
;
9741 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9743 /* accept or reject all multicast frames */
9744 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9745 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9746 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9747 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9750 static void __tg3_set_rx_mode(struct net_device
*dev
)
9752 struct tg3
*tp
= netdev_priv(dev
);
9755 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9756 RX_MODE_KEEP_VLAN_TAG
);
9758 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9759 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9762 if (!tg3_flag(tp
, ENABLE_ASF
))
9763 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9766 if (dev
->flags
& IFF_PROMISC
) {
9767 /* Promiscuous mode. */
9768 rx_mode
|= RX_MODE_PROMISC
;
9769 } else if (dev
->flags
& IFF_ALLMULTI
) {
9770 /* Accept all multicast. */
9771 tg3_set_multi(tp
, 1);
9772 } else if (netdev_mc_empty(dev
)) {
9773 /* Reject all multicast. */
9774 tg3_set_multi(tp
, 0);
9776 /* Accept one or more multicast(s). */
9777 struct netdev_hw_addr
*ha
;
9778 u32 mc_filter
[4] = { 0, };
9783 netdev_for_each_mc_addr(ha
, dev
) {
9784 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9786 regidx
= (bit
& 0x60) >> 5;
9788 mc_filter
[regidx
] |= (1 << bit
);
9791 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9792 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9793 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9794 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9797 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9798 rx_mode
|= RX_MODE_PROMISC
;
9799 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9800 /* Add all entries into to the mac addr filter list */
9802 struct netdev_hw_addr
*ha
;
9804 netdev_for_each_uc_addr(ha
, dev
) {
9805 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9806 i
+ TG3_UCAST_ADDR_IDX(tp
));
9811 if (rx_mode
!= tp
->rx_mode
) {
9812 tp
->rx_mode
= rx_mode
;
9813 tw32_f(MAC_RX_MODE
, rx_mode
);
9818 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9822 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9823 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9826 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9830 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9833 if (tp
->rxq_cnt
== 1) {
9834 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9838 /* Validate table against current IRQ count */
9839 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9840 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9844 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9845 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9848 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9851 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9853 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9854 u32 val
= tp
->rss_ind_tbl
[i
];
9856 for (; i
% 8; i
++) {
9858 val
|= tp
->rss_ind_tbl
[i
];
9865 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9867 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9868 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9870 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9873 /* tp->lock is held. */
9874 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9876 u32 val
, rdmac_mode
;
9878 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9880 tg3_disable_ints(tp
);
9884 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9886 if (tg3_flag(tp
, INIT_COMPLETE
))
9887 tg3_abort_hw(tp
, 1);
9889 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9890 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9891 tg3_phy_pull_config(tp
);
9892 tg3_eee_pull_config(tp
, NULL
);
9893 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9896 /* Enable MAC control of LPI */
9897 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9903 err
= tg3_chip_reset(tp
);
9907 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9909 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9910 val
= tr32(TG3_CPMU_CTRL
);
9911 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9912 tw32(TG3_CPMU_CTRL
, val
);
9914 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9915 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9916 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9917 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9919 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9920 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9921 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9922 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9924 val
= tr32(TG3_CPMU_HST_ACC
);
9925 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9926 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9927 tw32(TG3_CPMU_HST_ACC
, val
);
9930 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9931 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9932 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9933 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9934 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9936 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9937 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9939 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9941 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9942 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9945 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9946 u32 grc_mode
= tr32(GRC_MODE
);
9948 /* Access the lower 1K of PL PCIE block registers. */
9949 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9950 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9952 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9953 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9954 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9956 tw32(GRC_MODE
, grc_mode
);
9959 if (tg3_flag(tp
, 57765_CLASS
)) {
9960 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9961 u32 grc_mode
= tr32(GRC_MODE
);
9963 /* Access the lower 1K of PL PCIE block registers. */
9964 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9965 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9967 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9968 TG3_PCIE_PL_LO_PHYCTL5
);
9969 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9970 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9972 tw32(GRC_MODE
, grc_mode
);
9975 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9978 /* Fix transmit hangs */
9979 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9980 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9981 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9983 grc_mode
= tr32(GRC_MODE
);
9985 /* Access the lower 1K of DL PCIE block registers. */
9986 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9987 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9989 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9990 TG3_PCIE_DL_LO_FTSMAX
);
9991 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9992 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9993 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9995 tw32(GRC_MODE
, grc_mode
);
9998 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9999 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
10000 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
10001 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
10004 /* This works around an issue with Athlon chipsets on
10005 * B3 tigon3 silicon. This bit has no effect on any
10006 * other revision. But do not set this on PCI Express
10007 * chips and don't even touch the clocks if the CPMU is present.
10009 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
10010 if (!tg3_flag(tp
, PCI_EXPRESS
))
10011 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
10012 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
10015 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
10016 tg3_flag(tp
, PCIX_MODE
)) {
10017 val
= tr32(TG3PCI_PCISTATE
);
10018 val
|= PCISTATE_RETRY_SAME_DMA
;
10019 tw32(TG3PCI_PCISTATE
, val
);
10022 if (tg3_flag(tp
, ENABLE_APE
)) {
10023 /* Allow reads and writes to the
10024 * APE register and memory space.
10026 val
= tr32(TG3PCI_PCISTATE
);
10027 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
10028 PCISTATE_ALLOW_APE_SHMEM_WR
|
10029 PCISTATE_ALLOW_APE_PSPACE_WR
;
10030 tw32(TG3PCI_PCISTATE
, val
);
10033 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
10034 /* Enable some hw fixes. */
10035 val
= tr32(TG3PCI_MSI_DATA
);
10036 val
|= (1 << 26) | (1 << 28) | (1 << 29);
10037 tw32(TG3PCI_MSI_DATA
, val
);
10040 /* Descriptor ring init may make accesses to the
10041 * NIC SRAM area to setup the TX descriptors, so we
10042 * can only do this after the hardware has been
10043 * successfully reset.
10045 err
= tg3_init_rings(tp
);
10049 if (tg3_flag(tp
, 57765_PLUS
)) {
10050 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10051 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10052 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10053 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10054 if (!tg3_flag(tp
, 57765_CLASS
) &&
10055 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10056 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10057 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10058 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10059 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10060 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10061 /* This value is determined during the probe time DMA
10062 * engine test, tg3_test_dma.
10064 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10067 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10068 GRC_MODE_4X_NIC_SEND_RINGS
|
10069 GRC_MODE_NO_TX_PHDR_CSUM
|
10070 GRC_MODE_NO_RX_PHDR_CSUM
);
10071 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10073 /* Pseudo-header checksum is done by hardware logic and not
10074 * the offload processers, so make the chip do the pseudo-
10075 * header checksums on receive. For transmit it is more
10076 * convenient to do the pseudo-header checksum in software
10077 * as Linux does that on transmit for us in all cases.
10079 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10081 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10083 tw32(TG3_RX_PTP_CTL
,
10084 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10086 if (tg3_flag(tp
, PTP_CAPABLE
))
10087 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10089 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10091 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10092 * south bridge limitation. As a workaround, Driver is setting MRRS
10093 * to 2048 instead of default 4096.
10095 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10096 tp
->pdev
->subsystem_device
== TG3PCI_SUBDEVICE_ID_DELL_5762
) {
10097 val
= tr32(TG3PCI_DEV_STATUS_CTRL
) & ~MAX_READ_REQ_MASK
;
10098 tw32(TG3PCI_DEV_STATUS_CTRL
, val
| MAX_READ_REQ_SIZE_2048
);
10101 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10102 val
= tr32(GRC_MISC_CFG
);
10104 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10105 tw32(GRC_MISC_CFG
, val
);
10107 /* Initialize MBUF/DESC pool. */
10108 if (tg3_flag(tp
, 5750_PLUS
)) {
10110 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10111 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10112 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10113 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10115 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10116 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10117 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10118 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10121 fw_len
= tp
->fw_len
;
10122 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10123 tw32(BUFMGR_MB_POOL_ADDR
,
10124 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10125 tw32(BUFMGR_MB_POOL_SIZE
,
10126 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10129 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10130 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10131 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10132 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10133 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10134 tw32(BUFMGR_MB_HIGH_WATER
,
10135 tp
->bufmgr_config
.mbuf_high_water
);
10137 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10138 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10139 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10140 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10141 tw32(BUFMGR_MB_HIGH_WATER
,
10142 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10144 tw32(BUFMGR_DMA_LOW_WATER
,
10145 tp
->bufmgr_config
.dma_low_water
);
10146 tw32(BUFMGR_DMA_HIGH_WATER
,
10147 tp
->bufmgr_config
.dma_high_water
);
10149 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10150 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10151 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10152 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10153 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10154 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10155 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10156 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10157 tw32(BUFMGR_MODE
, val
);
10158 for (i
= 0; i
< 2000; i
++) {
10159 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10164 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10168 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10169 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10171 tg3_setup_rxbd_thresholds(tp
);
10173 /* Initialize TG3_BDINFO's at:
10174 * RCVDBDI_STD_BD: standard eth size rx ring
10175 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10176 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10179 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10180 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10181 * ring attribute flags
10182 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10184 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10185 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10187 * The size of each ring is fixed in the firmware, but the location is
10190 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10191 ((u64
) tpr
->rx_std_mapping
>> 32));
10192 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10193 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10194 if (!tg3_flag(tp
, 5717_PLUS
))
10195 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10196 NIC_SRAM_RX_BUFFER_DESC
);
10198 /* Disable the mini ring */
10199 if (!tg3_flag(tp
, 5705_PLUS
))
10200 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10201 BDINFO_FLAGS_DISABLED
);
10203 /* Program the jumbo buffer descriptor ring control
10204 * blocks on those devices that have them.
10206 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10207 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10209 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10210 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10211 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10212 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10213 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10214 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10215 BDINFO_FLAGS_MAXLEN_SHIFT
;
10216 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10217 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10218 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10219 tg3_flag(tp
, 57765_CLASS
) ||
10220 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10221 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10222 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10224 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10225 BDINFO_FLAGS_DISABLED
);
10228 if (tg3_flag(tp
, 57765_PLUS
)) {
10229 val
= TG3_RX_STD_RING_SIZE(tp
);
10230 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10231 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10233 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10235 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10237 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10239 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10242 tpr
->rx_jmb_prod_idx
=
10243 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10246 tg3_rings_reset(tp
);
10248 /* Initialize MAC address and backoff seed. */
10249 __tg3_set_mac_addr(tp
, false);
10251 /* MTU + ethernet header + FCS + optional VLAN tag */
10252 tw32(MAC_RX_MTU_SIZE
,
10253 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10255 /* The slot time is changed by tg3_setup_phy if we
10256 * run at gigabit with half duplex.
10258 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10259 (6 << TX_LENGTHS_IPG_SHIFT
) |
10260 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10262 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10263 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10264 val
|= tr32(MAC_TX_LENGTHS
) &
10265 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10266 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10268 tw32(MAC_TX_LENGTHS
, val
);
10270 /* Receive rules. */
10271 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10272 tw32(RCVLPC_CONFIG
, 0x0181);
10274 /* Calculate RDMAC_MODE setting early, we need it to determine
10275 * the RCVLPC_STATE_ENABLE mask.
10277 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10278 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10279 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10280 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10281 RDMAC_MODE_LNGREAD_ENAB
);
10283 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10284 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10286 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10287 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10288 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10289 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10290 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10291 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10293 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10294 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10295 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10296 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10297 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10298 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10299 !tg3_flag(tp
, IS_5788
)) {
10300 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10304 if (tg3_flag(tp
, PCI_EXPRESS
))
10305 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10307 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10309 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10310 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10311 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10315 if (tg3_flag(tp
, HW_TSO_1
) ||
10316 tg3_flag(tp
, HW_TSO_2
) ||
10317 tg3_flag(tp
, HW_TSO_3
))
10318 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10320 if (tg3_flag(tp
, 57765_PLUS
) ||
10321 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10322 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10323 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10325 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10326 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10327 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10329 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10330 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10331 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10332 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10333 tg3_flag(tp
, 57765_PLUS
)) {
10336 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10337 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10339 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10341 val
= tr32(tgtreg
);
10342 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10343 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10344 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10345 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10346 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10347 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10348 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10349 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10351 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10354 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10355 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10356 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10359 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10360 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10362 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10364 val
= tr32(tgtreg
);
10366 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10367 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10370 /* Receive/send statistics. */
10371 if (tg3_flag(tp
, 5750_PLUS
)) {
10372 val
= tr32(RCVLPC_STATS_ENABLE
);
10373 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10374 tw32(RCVLPC_STATS_ENABLE
, val
);
10375 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10376 tg3_flag(tp
, TSO_CAPABLE
)) {
10377 val
= tr32(RCVLPC_STATS_ENABLE
);
10378 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10379 tw32(RCVLPC_STATS_ENABLE
, val
);
10381 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10383 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10384 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10385 tw32(SNDDATAI_STATSCTRL
,
10386 (SNDDATAI_SCTRL_ENABLE
|
10387 SNDDATAI_SCTRL_FASTUPD
));
10389 /* Setup host coalescing engine. */
10390 tw32(HOSTCC_MODE
, 0);
10391 for (i
= 0; i
< 2000; i
++) {
10392 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10397 __tg3_set_coalesce(tp
, &tp
->coal
);
10399 if (!tg3_flag(tp
, 5705_PLUS
)) {
10400 /* Status/statistics block address. See tg3_timer,
10401 * the tg3_periodic_fetch_stats call there, and
10402 * tg3_get_stats to see how this works for 5705/5750 chips.
10404 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10405 ((u64
) tp
->stats_mapping
>> 32));
10406 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10407 ((u64
) tp
->stats_mapping
& 0xffffffff));
10408 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10410 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10412 /* Clear statistics and status block memory areas */
10413 for (i
= NIC_SRAM_STATS_BLK
;
10414 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10415 i
+= sizeof(u32
)) {
10416 tg3_write_mem(tp
, i
, 0);
10421 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10423 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10424 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10425 if (!tg3_flag(tp
, 5705_PLUS
))
10426 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10428 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10429 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10430 /* reset to prevent losing 1st rx packet intermittently */
10431 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10435 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10436 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10437 MAC_MODE_FHDE_ENABLE
;
10438 if (tg3_flag(tp
, ENABLE_APE
))
10439 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10440 if (!tg3_flag(tp
, 5705_PLUS
) &&
10441 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10442 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10443 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10444 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10447 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10448 * If TG3_FLAG_IS_NIC is zero, we should read the
10449 * register to preserve the GPIO settings for LOMs. The GPIOs,
10450 * whether used as inputs or outputs, are set by boot code after
10453 if (!tg3_flag(tp
, IS_NIC
)) {
10456 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10457 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10458 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10460 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10461 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10462 GRC_LCLCTRL_GPIO_OUTPUT3
;
10464 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10465 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10467 tp
->grc_local_ctrl
&= ~gpio_mask
;
10468 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10470 /* GPIO1 must be driven high for eeprom write protect */
10471 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10472 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10473 GRC_LCLCTRL_GPIO_OUTPUT1
);
10475 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10478 if (tg3_flag(tp
, USING_MSIX
)) {
10479 val
= tr32(MSGINT_MODE
);
10480 val
|= MSGINT_MODE_ENABLE
;
10481 if (tp
->irq_cnt
> 1)
10482 val
|= MSGINT_MODE_MULTIVEC_EN
;
10483 if (!tg3_flag(tp
, 1SHOT_MSI
))
10484 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10485 tw32(MSGINT_MODE
, val
);
10488 if (!tg3_flag(tp
, 5705_PLUS
)) {
10489 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10493 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10494 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10495 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10496 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10497 WDMAC_MODE_LNGREAD_ENAB
);
10499 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10500 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10501 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10502 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10503 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10505 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10506 !tg3_flag(tp
, IS_5788
)) {
10507 val
|= WDMAC_MODE_RX_ACCEL
;
10511 /* Enable host coalescing bug fix */
10512 if (tg3_flag(tp
, 5755_PLUS
))
10513 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10515 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10516 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10518 tw32_f(WDMAC_MODE
, val
);
10521 if (tg3_flag(tp
, PCIX_MODE
)) {
10524 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10526 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10527 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10528 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10529 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10530 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10531 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10533 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10537 tw32_f(RDMAC_MODE
, rdmac_mode
);
10540 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10541 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10542 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10543 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10546 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10547 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10548 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10549 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10550 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10554 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10555 if (!tg3_flag(tp
, 5705_PLUS
))
10556 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10558 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10559 tw32(SNDDATAC_MODE
,
10560 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10562 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10564 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10565 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10566 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10567 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10568 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10569 tw32(RCVDBDI_MODE
, val
);
10570 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10571 if (tg3_flag(tp
, HW_TSO_1
) ||
10572 tg3_flag(tp
, HW_TSO_2
) ||
10573 tg3_flag(tp
, HW_TSO_3
))
10574 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10575 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10576 if (tg3_flag(tp
, ENABLE_TSS
))
10577 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10578 tw32(SNDBDI_MODE
, val
);
10579 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10581 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10582 err
= tg3_load_5701_a0_firmware_fix(tp
);
10587 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10588 /* Ignore any errors for the firmware download. If download
10589 * fails, the device will operate with EEE disabled
10591 tg3_load_57766_firmware(tp
);
10594 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10595 err
= tg3_load_tso_firmware(tp
);
10600 tp
->tx_mode
= TX_MODE_ENABLE
;
10602 if (tg3_flag(tp
, 5755_PLUS
) ||
10603 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10604 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10606 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10607 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10608 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10609 tp
->tx_mode
&= ~val
;
10610 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10613 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10616 if (tg3_flag(tp
, ENABLE_RSS
)) {
10619 tg3_rss_write_indir_tbl(tp
);
10621 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10623 for (i
= 0; i
< 10 ; i
++)
10624 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10627 tp
->rx_mode
= RX_MODE_ENABLE
;
10628 if (tg3_flag(tp
, 5755_PLUS
))
10629 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10631 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10632 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10634 if (tg3_flag(tp
, ENABLE_RSS
))
10635 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10636 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10637 RX_MODE_RSS_IPV6_HASH_EN
|
10638 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10639 RX_MODE_RSS_IPV4_HASH_EN
|
10640 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10642 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10645 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10647 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10648 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10649 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10652 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10655 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10656 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10657 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10658 /* Set drive transmission level to 1.2V */
10659 /* only if the signal pre-emphasis bit is not set */
10660 val
= tr32(MAC_SERDES_CFG
);
10663 tw32(MAC_SERDES_CFG
, val
);
10665 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10666 tw32(MAC_SERDES_CFG
, 0x616000);
10669 /* Prevent chip from dropping frames when flow control
10672 if (tg3_flag(tp
, 57765_CLASS
))
10676 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10678 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10679 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10680 /* Use hardware link auto-negotiation */
10681 tg3_flag_set(tp
, HW_AUTONEG
);
10684 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10685 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10688 tmp
= tr32(SERDES_RX_CTRL
);
10689 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10690 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10691 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10692 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10695 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10696 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10697 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10699 err
= tg3_setup_phy(tp
, false);
10703 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10704 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10707 /* Clear CRC stats. */
10708 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10709 tg3_writephy(tp
, MII_TG3_TEST1
,
10710 tmp
| MII_TG3_TEST1_CRC_EN
);
10711 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10716 __tg3_set_rx_mode(tp
->dev
);
10718 /* Initialize receive rules. */
10719 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10720 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10721 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10722 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10724 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10728 if (tg3_flag(tp
, ENABLE_ASF
))
10732 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10735 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10738 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10741 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10744 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10747 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10750 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10753 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10756 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10759 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10762 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10765 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10768 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10770 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10778 if (tg3_flag(tp
, ENABLE_APE
))
10779 /* Write our heartbeat update interval to APE. */
10780 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10781 APE_HOST_HEARTBEAT_INT_5SEC
);
10783 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10788 /* Called at device open time to get the chip ready for
10789 * packet processing. Invoked with tp->lock held.
10791 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10793 /* Chip may have been just powered on. If so, the boot code may still
10794 * be running initialization. Wait for it to finish to avoid races in
10795 * accessing the hardware.
10797 tg3_enable_register_access(tp
);
10800 tg3_switch_clocks(tp
);
10802 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10804 return tg3_reset_hw(tp
, reset_phy
);
10807 #ifdef CONFIG_TIGON3_HWMON
10808 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10812 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10813 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10815 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10818 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10819 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10820 memset(ocir
, 0, TG3_OCIR_LEN
);
10824 /* sysfs attributes for hwmon */
10825 static ssize_t
tg3_show_temp(struct device
*dev
,
10826 struct device_attribute
*devattr
, char *buf
)
10828 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10829 struct tg3
*tp
= dev_get_drvdata(dev
);
10832 spin_lock_bh(&tp
->lock
);
10833 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10834 sizeof(temperature
));
10835 spin_unlock_bh(&tp
->lock
);
10836 return sprintf(buf
, "%u\n", temperature
* 1000);
10840 static SENSOR_DEVICE_ATTR(temp1_input
, 0444, tg3_show_temp
, NULL
,
10841 TG3_TEMP_SENSOR_OFFSET
);
10842 static SENSOR_DEVICE_ATTR(temp1_crit
, 0444, tg3_show_temp
, NULL
,
10843 TG3_TEMP_CAUTION_OFFSET
);
10844 static SENSOR_DEVICE_ATTR(temp1_max
, 0444, tg3_show_temp
, NULL
,
10845 TG3_TEMP_MAX_OFFSET
);
10847 static struct attribute
*tg3_attrs
[] = {
10848 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10849 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10850 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10853 ATTRIBUTE_GROUPS(tg3
);
10855 static void tg3_hwmon_close(struct tg3
*tp
)
10857 if (tp
->hwmon_dev
) {
10858 hwmon_device_unregister(tp
->hwmon_dev
);
10859 tp
->hwmon_dev
= NULL
;
10863 static void tg3_hwmon_open(struct tg3
*tp
)
10867 struct pci_dev
*pdev
= tp
->pdev
;
10868 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10870 tg3_sd_scan_scratchpad(tp
, ocirs
);
10872 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10873 if (!ocirs
[i
].src_data_length
)
10876 size
+= ocirs
[i
].src_hdr_length
;
10877 size
+= ocirs
[i
].src_data_length
;
10883 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10885 if (IS_ERR(tp
->hwmon_dev
)) {
10886 tp
->hwmon_dev
= NULL
;
10887 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10891 static inline void tg3_hwmon_close(struct tg3
*tp
) { }
10892 static inline void tg3_hwmon_open(struct tg3
*tp
) { }
10893 #endif /* CONFIG_TIGON3_HWMON */
10896 #define TG3_STAT_ADD32(PSTAT, REG) \
10897 do { u32 __val = tr32(REG); \
10898 (PSTAT)->low += __val; \
10899 if ((PSTAT)->low < __val) \
10900 (PSTAT)->high += 1; \
10903 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10905 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10910 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10911 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10912 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10913 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10914 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10915 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10916 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10917 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10918 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10919 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10920 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10921 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10922 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10923 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10924 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10925 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10928 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10929 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10930 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10931 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10934 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10935 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10936 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10937 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10938 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10939 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10940 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10941 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10942 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10943 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10944 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10945 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10946 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10947 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10949 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10950 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10951 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10952 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10953 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10954 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10956 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10957 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10959 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10960 sp
->rx_discards
.low
+= val
;
10961 if (sp
->rx_discards
.low
< val
)
10962 sp
->rx_discards
.high
+= 1;
10964 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10966 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10969 static void tg3_chk_missed_msi(struct tg3
*tp
)
10973 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10974 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10976 if (tg3_has_work(tnapi
)) {
10977 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10978 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10979 if (tnapi
->chk_msi_cnt
< 1) {
10980 tnapi
->chk_msi_cnt
++;
10986 tnapi
->chk_msi_cnt
= 0;
10987 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10988 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10992 static void tg3_timer(struct timer_list
*t
)
10994 struct tg3
*tp
= from_timer(tp
, t
, timer
);
10996 spin_lock(&tp
->lock
);
10998 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10999 spin_unlock(&tp
->lock
);
11000 goto restart_timer
;
11003 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
11004 tg3_flag(tp
, 57765_CLASS
))
11005 tg3_chk_missed_msi(tp
);
11007 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
11008 /* BCM4785: Flush posted writes from GbE to host memory. */
11012 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
11013 /* All of this garbage is because when using non-tagged
11014 * IRQ status the mailbox/status_block protocol the chip
11015 * uses with the cpu is race prone.
11017 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
11018 tw32(GRC_LOCAL_CTRL
,
11019 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
11021 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
11022 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
11025 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11026 spin_unlock(&tp
->lock
);
11027 tg3_reset_task_schedule(tp
);
11028 goto restart_timer
;
11032 /* This part only runs once per second. */
11033 if (!--tp
->timer_counter
) {
11034 if (tg3_flag(tp
, 5705_PLUS
))
11035 tg3_periodic_fetch_stats(tp
);
11037 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
11038 tg3_phy_eee_enable(tp
);
11040 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
11044 mac_stat
= tr32(MAC_STATUS
);
11047 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
11048 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
11050 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
11054 tg3_setup_phy(tp
, false);
11055 } else if (tg3_flag(tp
, POLL_SERDES
)) {
11056 u32 mac_stat
= tr32(MAC_STATUS
);
11057 int need_setup
= 0;
11060 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
11063 if (!tp
->link_up
&&
11064 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11065 MAC_STATUS_SIGNAL_DET
))) {
11069 if (!tp
->serdes_counter
) {
11072 ~MAC_MODE_PORT_MODE_MASK
));
11074 tw32_f(MAC_MODE
, tp
->mac_mode
);
11077 tg3_setup_phy(tp
, false);
11079 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11080 tg3_flag(tp
, 5780_CLASS
)) {
11081 tg3_serdes_parallel_detect(tp
);
11082 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11083 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11084 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11085 TG3_CPMU_STATUS_LINK_MASK
);
11087 if (link_up
!= tp
->link_up
)
11088 tg3_setup_phy(tp
, false);
11091 tp
->timer_counter
= tp
->timer_multiplier
;
11094 /* Heartbeat is only sent once every 2 seconds.
11096 * The heartbeat is to tell the ASF firmware that the host
11097 * driver is still alive. In the event that the OS crashes,
11098 * ASF needs to reset the hardware to free up the FIFO space
11099 * that may be filled with rx packets destined for the host.
11100 * If the FIFO is full, ASF will no longer function properly.
11102 * Unintended resets have been reported on real time kernels
11103 * where the timer doesn't run on time. Netpoll will also have
11106 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11107 * to check the ring condition when the heartbeat is expiring
11108 * before doing the reset. This will prevent most unintended
11111 if (!--tp
->asf_counter
) {
11112 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11113 tg3_wait_for_event_ack(tp
);
11115 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11116 FWCMD_NICDRV_ALIVE3
);
11117 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11118 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11119 TG3_FW_UPDATE_TIMEOUT_SEC
);
11121 tg3_generate_fw_event(tp
);
11123 tp
->asf_counter
= tp
->asf_multiplier
;
11126 /* Update the APE heartbeat every 5 seconds.*/
11127 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
);
11129 spin_unlock(&tp
->lock
);
11132 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11133 add_timer(&tp
->timer
);
11136 static void tg3_timer_init(struct tg3
*tp
)
11138 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11139 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11140 !tg3_flag(tp
, 57765_CLASS
))
11141 tp
->timer_offset
= HZ
;
11143 tp
->timer_offset
= HZ
/ 10;
11145 BUG_ON(tp
->timer_offset
> HZ
);
11147 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11148 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11149 TG3_FW_UPDATE_FREQ_SEC
;
11151 timer_setup(&tp
->timer
, tg3_timer
, 0);
11154 static void tg3_timer_start(struct tg3
*tp
)
11156 tp
->asf_counter
= tp
->asf_multiplier
;
11157 tp
->timer_counter
= tp
->timer_multiplier
;
11159 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11160 add_timer(&tp
->timer
);
11163 static void tg3_timer_stop(struct tg3
*tp
)
11165 del_timer_sync(&tp
->timer
);
11168 /* Restart hardware after configuration changes, self-test, etc.
11169 * Invoked with tp->lock held.
11171 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11172 __releases(tp
->lock
)
11173 __acquires(tp
->lock
)
11177 err
= tg3_init_hw(tp
, reset_phy
);
11179 netdev_err(tp
->dev
,
11180 "Failed to re-initialize device, aborting\n");
11181 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11182 tg3_full_unlock(tp
);
11183 tg3_timer_stop(tp
);
11185 tg3_napi_enable(tp
);
11186 dev_close(tp
->dev
);
11187 tg3_full_lock(tp
, 0);
11192 static void tg3_reset_task(struct work_struct
*work
)
11194 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11198 tg3_full_lock(tp
, 0);
11200 if (!netif_running(tp
->dev
)) {
11201 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11202 tg3_full_unlock(tp
);
11207 tg3_full_unlock(tp
);
11211 tg3_netif_stop(tp
);
11213 tg3_full_lock(tp
, 1);
11215 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11216 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11217 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11218 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11219 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11222 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11223 err
= tg3_init_hw(tp
, true);
11227 tg3_netif_start(tp
);
11230 tg3_full_unlock(tp
);
11235 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11239 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11242 unsigned long flags
;
11244 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11246 if (tp
->irq_cnt
== 1)
11247 name
= tp
->dev
->name
;
11249 name
= &tnapi
->irq_lbl
[0];
11250 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11251 snprintf(name
, IFNAMSIZ
,
11252 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11253 else if (tnapi
->tx_buffers
)
11254 snprintf(name
, IFNAMSIZ
,
11255 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11256 else if (tnapi
->rx_rcb
)
11257 snprintf(name
, IFNAMSIZ
,
11258 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11260 snprintf(name
, IFNAMSIZ
,
11261 "%s-%d", tp
->dev
->name
, irq_num
);
11262 name
[IFNAMSIZ
-1] = 0;
11265 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11267 if (tg3_flag(tp
, 1SHOT_MSI
))
11268 fn
= tg3_msi_1shot
;
11271 fn
= tg3_interrupt
;
11272 if (tg3_flag(tp
, TAGGED_STATUS
))
11273 fn
= tg3_interrupt_tagged
;
11274 flags
= IRQF_SHARED
;
11277 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11280 static int tg3_test_interrupt(struct tg3
*tp
)
11282 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11283 struct net_device
*dev
= tp
->dev
;
11284 int err
, i
, intr_ok
= 0;
11287 if (!netif_running(dev
))
11290 tg3_disable_ints(tp
);
11292 free_irq(tnapi
->irq_vec
, tnapi
);
11295 * Turn off MSI one shot mode. Otherwise this test has no
11296 * observable way to know whether the interrupt was delivered.
11298 if (tg3_flag(tp
, 57765_PLUS
)) {
11299 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11300 tw32(MSGINT_MODE
, val
);
11303 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11304 IRQF_SHARED
, dev
->name
, tnapi
);
11308 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11309 tg3_enable_ints(tp
);
11311 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11314 for (i
= 0; i
< 5; i
++) {
11315 u32 int_mbox
, misc_host_ctrl
;
11317 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11318 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11320 if ((int_mbox
!= 0) ||
11321 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11326 if (tg3_flag(tp
, 57765_PLUS
) &&
11327 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11328 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11333 tg3_disable_ints(tp
);
11335 free_irq(tnapi
->irq_vec
, tnapi
);
11337 err
= tg3_request_irq(tp
, 0);
11343 /* Reenable MSI one shot mode. */
11344 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11345 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11346 tw32(MSGINT_MODE
, val
);
11354 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11355 * successfully restored
11357 static int tg3_test_msi(struct tg3
*tp
)
11362 if (!tg3_flag(tp
, USING_MSI
))
11365 /* Turn off SERR reporting in case MSI terminates with Master
11368 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11369 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11370 pci_cmd
& ~PCI_COMMAND_SERR
);
11372 err
= tg3_test_interrupt(tp
);
11374 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11379 /* other failures */
11383 /* MSI test failed, go back to INTx mode */
11384 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11385 "to INTx mode. Please report this failure to the PCI "
11386 "maintainer and include system chipset information\n");
11388 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11390 pci_disable_msi(tp
->pdev
);
11392 tg3_flag_clear(tp
, USING_MSI
);
11393 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11395 err
= tg3_request_irq(tp
, 0);
11399 /* Need to reset the chip because the MSI cycle may have terminated
11400 * with Master Abort.
11402 tg3_full_lock(tp
, 1);
11404 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11405 err
= tg3_init_hw(tp
, true);
11407 tg3_full_unlock(tp
);
11410 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11415 static int tg3_request_firmware(struct tg3
*tp
)
11417 const struct tg3_firmware_hdr
*fw_hdr
;
11419 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11420 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11425 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11427 /* Firmware blob starts with version numbers, followed by
11428 * start address and _full_ length including BSS sections
11429 * (which must be longer than the actual data, of course
11432 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11433 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11434 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11435 tp
->fw_len
, tp
->fw_needed
);
11436 release_firmware(tp
->fw
);
11441 /* We no longer need firmware; we have it. */
11442 tp
->fw_needed
= NULL
;
11446 static u32
tg3_irq_count(struct tg3
*tp
)
11448 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11451 /* We want as many rx rings enabled as there are cpus.
11452 * In multiqueue MSI-X mode, the first MSI-X vector
11453 * only deals with link interrupts, etc, so we add
11454 * one to the number of vectors we are requesting.
11456 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11462 static bool tg3_enable_msix(struct tg3
*tp
)
11465 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11467 tp
->txq_cnt
= tp
->txq_req
;
11468 tp
->rxq_cnt
= tp
->rxq_req
;
11470 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11471 if (tp
->rxq_cnt
> tp
->rxq_max
)
11472 tp
->rxq_cnt
= tp
->rxq_max
;
11474 /* Disable multiple TX rings by default. Simple round-robin hardware
11475 * scheduling of the TX rings can cause starvation of rings with
11476 * small packets when other rings have TSO or jumbo packets.
11481 tp
->irq_cnt
= tg3_irq_count(tp
);
11483 for (i
= 0; i
< tp
->irq_max
; i
++) {
11484 msix_ent
[i
].entry
= i
;
11485 msix_ent
[i
].vector
= 0;
11488 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11491 } else if (rc
< tp
->irq_cnt
) {
11492 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11495 tp
->rxq_cnt
= max(rc
- 1, 1);
11497 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11500 for (i
= 0; i
< tp
->irq_max
; i
++)
11501 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11503 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11504 pci_disable_msix(tp
->pdev
);
11508 if (tp
->irq_cnt
== 1)
11511 tg3_flag_set(tp
, ENABLE_RSS
);
11513 if (tp
->txq_cnt
> 1)
11514 tg3_flag_set(tp
, ENABLE_TSS
);
11516 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11521 static void tg3_ints_init(struct tg3
*tp
)
11523 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11524 !tg3_flag(tp
, TAGGED_STATUS
)) {
11525 /* All MSI supporting chips should support tagged
11526 * status. Assert that this is the case.
11528 netdev_warn(tp
->dev
,
11529 "MSI without TAGGED_STATUS? Not using MSI\n");
11533 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11534 tg3_flag_set(tp
, USING_MSIX
);
11535 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11536 tg3_flag_set(tp
, USING_MSI
);
11538 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11539 u32 msi_mode
= tr32(MSGINT_MODE
);
11540 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11541 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11542 if (!tg3_flag(tp
, 1SHOT_MSI
))
11543 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11544 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11547 if (!tg3_flag(tp
, USING_MSIX
)) {
11549 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11552 if (tp
->irq_cnt
== 1) {
11555 netif_set_real_num_tx_queues(tp
->dev
, 1);
11556 netif_set_real_num_rx_queues(tp
->dev
, 1);
11560 static void tg3_ints_fini(struct tg3
*tp
)
11562 if (tg3_flag(tp
, USING_MSIX
))
11563 pci_disable_msix(tp
->pdev
);
11564 else if (tg3_flag(tp
, USING_MSI
))
11565 pci_disable_msi(tp
->pdev
);
11566 tg3_flag_clear(tp
, USING_MSI
);
11567 tg3_flag_clear(tp
, USING_MSIX
);
11568 tg3_flag_clear(tp
, ENABLE_RSS
);
11569 tg3_flag_clear(tp
, ENABLE_TSS
);
11572 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11575 struct net_device
*dev
= tp
->dev
;
11579 * Setup interrupts first so we know how
11580 * many NAPI resources to allocate
11584 tg3_rss_check_indir_tbl(tp
);
11586 /* The placement of this call is tied
11587 * to the setup and use of Host TX descriptors.
11589 err
= tg3_alloc_consistent(tp
);
11591 goto out_ints_fini
;
11595 tg3_napi_enable(tp
);
11597 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11598 err
= tg3_request_irq(tp
, i
);
11600 for (i
--; i
>= 0; i
--) {
11601 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11603 free_irq(tnapi
->irq_vec
, tnapi
);
11605 goto out_napi_fini
;
11609 tg3_full_lock(tp
, 0);
11612 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11614 err
= tg3_init_hw(tp
, reset_phy
);
11616 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11617 tg3_free_rings(tp
);
11620 tg3_full_unlock(tp
);
11625 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11626 err
= tg3_test_msi(tp
);
11629 tg3_full_lock(tp
, 0);
11630 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11631 tg3_free_rings(tp
);
11632 tg3_full_unlock(tp
);
11634 goto out_napi_fini
;
11637 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11638 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11640 tw32(PCIE_TRANSACTION_CFG
,
11641 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11647 tg3_hwmon_open(tp
);
11649 tg3_full_lock(tp
, 0);
11651 tg3_timer_start(tp
);
11652 tg3_flag_set(tp
, INIT_COMPLETE
);
11653 tg3_enable_ints(tp
);
11655 tg3_ptp_resume(tp
);
11657 tg3_full_unlock(tp
);
11659 netif_tx_start_all_queues(dev
);
11662 * Reset loopback feature if it was turned on while the device was down
11663 * make sure that it's installed properly now.
11665 if (dev
->features
& NETIF_F_LOOPBACK
)
11666 tg3_set_loopback(dev
, dev
->features
);
11671 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11672 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11673 free_irq(tnapi
->irq_vec
, tnapi
);
11677 tg3_napi_disable(tp
);
11679 tg3_free_consistent(tp
);
11687 static void tg3_stop(struct tg3
*tp
)
11691 tg3_reset_task_cancel(tp
);
11692 tg3_netif_stop(tp
);
11694 tg3_timer_stop(tp
);
11696 tg3_hwmon_close(tp
);
11700 tg3_full_lock(tp
, 1);
11702 tg3_disable_ints(tp
);
11704 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11705 tg3_free_rings(tp
);
11706 tg3_flag_clear(tp
, INIT_COMPLETE
);
11708 tg3_full_unlock(tp
);
11710 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11711 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11712 free_irq(tnapi
->irq_vec
, tnapi
);
11719 tg3_free_consistent(tp
);
11722 static int tg3_open(struct net_device
*dev
)
11724 struct tg3
*tp
= netdev_priv(dev
);
11727 if (tp
->pcierr_recovery
) {
11728 netdev_err(dev
, "Failed to open device. PCI error recovery "
11733 if (tp
->fw_needed
) {
11734 err
= tg3_request_firmware(tp
);
11735 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11737 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11738 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11739 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11740 netdev_warn(tp
->dev
, "EEE capability restored\n");
11741 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11743 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11747 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11748 tg3_flag_clear(tp
, TSO_CAPABLE
);
11749 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11750 netdev_notice(tp
->dev
, "TSO capability restored\n");
11751 tg3_flag_set(tp
, TSO_CAPABLE
);
11755 tg3_carrier_off(tp
);
11757 err
= tg3_power_up(tp
);
11761 tg3_full_lock(tp
, 0);
11763 tg3_disable_ints(tp
);
11764 tg3_flag_clear(tp
, INIT_COMPLETE
);
11766 tg3_full_unlock(tp
);
11768 err
= tg3_start(tp
,
11769 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11772 tg3_frob_aux_power(tp
, false);
11773 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11779 static int tg3_close(struct net_device
*dev
)
11781 struct tg3
*tp
= netdev_priv(dev
);
11783 if (tp
->pcierr_recovery
) {
11784 netdev_err(dev
, "Failed to close device. PCI error recovery "
11791 if (pci_device_is_present(tp
->pdev
)) {
11792 tg3_power_down_prepare(tp
);
11794 tg3_carrier_off(tp
);
11799 static inline u64
get_stat64(tg3_stat64_t
*val
)
11801 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11804 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11806 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11808 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11809 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11810 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11813 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11814 tg3_writephy(tp
, MII_TG3_TEST1
,
11815 val
| MII_TG3_TEST1_CRC_EN
);
11816 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11820 tp
->phy_crc_errors
+= val
;
11822 return tp
->phy_crc_errors
;
11825 return get_stat64(&hw_stats
->rx_fcs_errors
);
11828 #define ESTAT_ADD(member) \
11829 estats->member = old_estats->member + \
11830 get_stat64(&hw_stats->member)
11832 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11834 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11835 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11837 ESTAT_ADD(rx_octets
);
11838 ESTAT_ADD(rx_fragments
);
11839 ESTAT_ADD(rx_ucast_packets
);
11840 ESTAT_ADD(rx_mcast_packets
);
11841 ESTAT_ADD(rx_bcast_packets
);
11842 ESTAT_ADD(rx_fcs_errors
);
11843 ESTAT_ADD(rx_align_errors
);
11844 ESTAT_ADD(rx_xon_pause_rcvd
);
11845 ESTAT_ADD(rx_xoff_pause_rcvd
);
11846 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11847 ESTAT_ADD(rx_xoff_entered
);
11848 ESTAT_ADD(rx_frame_too_long_errors
);
11849 ESTAT_ADD(rx_jabbers
);
11850 ESTAT_ADD(rx_undersize_packets
);
11851 ESTAT_ADD(rx_in_length_errors
);
11852 ESTAT_ADD(rx_out_length_errors
);
11853 ESTAT_ADD(rx_64_or_less_octet_packets
);
11854 ESTAT_ADD(rx_65_to_127_octet_packets
);
11855 ESTAT_ADD(rx_128_to_255_octet_packets
);
11856 ESTAT_ADD(rx_256_to_511_octet_packets
);
11857 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11858 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11859 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11860 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11861 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11862 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11864 ESTAT_ADD(tx_octets
);
11865 ESTAT_ADD(tx_collisions
);
11866 ESTAT_ADD(tx_xon_sent
);
11867 ESTAT_ADD(tx_xoff_sent
);
11868 ESTAT_ADD(tx_flow_control
);
11869 ESTAT_ADD(tx_mac_errors
);
11870 ESTAT_ADD(tx_single_collisions
);
11871 ESTAT_ADD(tx_mult_collisions
);
11872 ESTAT_ADD(tx_deferred
);
11873 ESTAT_ADD(tx_excessive_collisions
);
11874 ESTAT_ADD(tx_late_collisions
);
11875 ESTAT_ADD(tx_collide_2times
);
11876 ESTAT_ADD(tx_collide_3times
);
11877 ESTAT_ADD(tx_collide_4times
);
11878 ESTAT_ADD(tx_collide_5times
);
11879 ESTAT_ADD(tx_collide_6times
);
11880 ESTAT_ADD(tx_collide_7times
);
11881 ESTAT_ADD(tx_collide_8times
);
11882 ESTAT_ADD(tx_collide_9times
);
11883 ESTAT_ADD(tx_collide_10times
);
11884 ESTAT_ADD(tx_collide_11times
);
11885 ESTAT_ADD(tx_collide_12times
);
11886 ESTAT_ADD(tx_collide_13times
);
11887 ESTAT_ADD(tx_collide_14times
);
11888 ESTAT_ADD(tx_collide_15times
);
11889 ESTAT_ADD(tx_ucast_packets
);
11890 ESTAT_ADD(tx_mcast_packets
);
11891 ESTAT_ADD(tx_bcast_packets
);
11892 ESTAT_ADD(tx_carrier_sense_errors
);
11893 ESTAT_ADD(tx_discards
);
11894 ESTAT_ADD(tx_errors
);
11896 ESTAT_ADD(dma_writeq_full
);
11897 ESTAT_ADD(dma_write_prioq_full
);
11898 ESTAT_ADD(rxbds_empty
);
11899 ESTAT_ADD(rx_discards
);
11900 ESTAT_ADD(rx_errors
);
11901 ESTAT_ADD(rx_threshold_hit
);
11903 ESTAT_ADD(dma_readq_full
);
11904 ESTAT_ADD(dma_read_prioq_full
);
11905 ESTAT_ADD(tx_comp_queue_full
);
11907 ESTAT_ADD(ring_set_send_prod_index
);
11908 ESTAT_ADD(ring_status_update
);
11909 ESTAT_ADD(nic_irqs
);
11910 ESTAT_ADD(nic_avoided_irqs
);
11911 ESTAT_ADD(nic_tx_threshold_hit
);
11913 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11916 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11918 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11919 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11921 stats
->rx_packets
= old_stats
->rx_packets
+
11922 get_stat64(&hw_stats
->rx_ucast_packets
) +
11923 get_stat64(&hw_stats
->rx_mcast_packets
) +
11924 get_stat64(&hw_stats
->rx_bcast_packets
);
11926 stats
->tx_packets
= old_stats
->tx_packets
+
11927 get_stat64(&hw_stats
->tx_ucast_packets
) +
11928 get_stat64(&hw_stats
->tx_mcast_packets
) +
11929 get_stat64(&hw_stats
->tx_bcast_packets
);
11931 stats
->rx_bytes
= old_stats
->rx_bytes
+
11932 get_stat64(&hw_stats
->rx_octets
);
11933 stats
->tx_bytes
= old_stats
->tx_bytes
+
11934 get_stat64(&hw_stats
->tx_octets
);
11936 stats
->rx_errors
= old_stats
->rx_errors
+
11937 get_stat64(&hw_stats
->rx_errors
);
11938 stats
->tx_errors
= old_stats
->tx_errors
+
11939 get_stat64(&hw_stats
->tx_errors
) +
11940 get_stat64(&hw_stats
->tx_mac_errors
) +
11941 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11942 get_stat64(&hw_stats
->tx_discards
);
11944 stats
->multicast
= old_stats
->multicast
+
11945 get_stat64(&hw_stats
->rx_mcast_packets
);
11946 stats
->collisions
= old_stats
->collisions
+
11947 get_stat64(&hw_stats
->tx_collisions
);
11949 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11950 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11951 get_stat64(&hw_stats
->rx_undersize_packets
);
11953 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11954 get_stat64(&hw_stats
->rx_align_errors
);
11955 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11956 get_stat64(&hw_stats
->tx_discards
);
11957 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11958 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11960 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11961 tg3_calc_crc_errors(tp
);
11963 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11964 get_stat64(&hw_stats
->rx_discards
);
11966 stats
->rx_dropped
= tp
->rx_dropped
;
11967 stats
->tx_dropped
= tp
->tx_dropped
;
11970 static int tg3_get_regs_len(struct net_device
*dev
)
11972 return TG3_REG_BLK_SIZE
;
11975 static void tg3_get_regs(struct net_device
*dev
,
11976 struct ethtool_regs
*regs
, void *_p
)
11978 struct tg3
*tp
= netdev_priv(dev
);
11982 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11984 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11987 tg3_full_lock(tp
, 0);
11989 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11991 tg3_full_unlock(tp
);
11994 static int tg3_get_eeprom_len(struct net_device
*dev
)
11996 struct tg3
*tp
= netdev_priv(dev
);
11998 return tp
->nvram_size
;
12001 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12003 struct tg3
*tp
= netdev_priv(dev
);
12004 int ret
, cpmu_restore
= 0;
12006 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
12009 if (tg3_flag(tp
, NO_NVRAM
))
12012 offset
= eeprom
->offset
;
12016 eeprom
->magic
= TG3_EEPROM_MAGIC
;
12018 /* Override clock, link aware and link idle modes */
12019 if (tg3_flag(tp
, CPMU_PRESENT
)) {
12020 cpmu_val
= tr32(TG3_CPMU_CTRL
);
12021 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
12022 CPMU_CTRL_LINK_IDLE_MODE
)) {
12023 tw32(TG3_CPMU_CTRL
, cpmu_val
&
12024 ~(CPMU_CTRL_LINK_AWARE_MODE
|
12025 CPMU_CTRL_LINK_IDLE_MODE
));
12029 tg3_override_clk(tp
);
12032 /* adjustments to start on required 4 byte boundary */
12033 b_offset
= offset
& 3;
12034 b_count
= 4 - b_offset
;
12035 if (b_count
> len
) {
12036 /* i.e. offset=1 len=2 */
12039 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
12042 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
12045 eeprom
->len
+= b_count
;
12048 /* read bytes up to the last 4 byte boundary */
12049 pd
= &data
[eeprom
->len
];
12050 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
12051 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
12058 memcpy(pd
+ i
, &val
, 4);
12059 if (need_resched()) {
12060 if (signal_pending(current
)) {
12071 /* read last bytes not ending on 4 byte boundary */
12072 pd
= &data
[eeprom
->len
];
12074 b_offset
= offset
+ len
- b_count
;
12075 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12078 memcpy(pd
, &val
, b_count
);
12079 eeprom
->len
+= b_count
;
12084 /* Restore clock, link aware and link idle modes */
12085 tg3_restore_clk(tp
);
12087 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12092 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12094 struct tg3
*tp
= netdev_priv(dev
);
12096 u32 offset
, len
, b_offset
, odd_len
;
12098 __be32 start
= 0, end
;
12100 if (tg3_flag(tp
, NO_NVRAM
) ||
12101 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12104 offset
= eeprom
->offset
;
12107 if ((b_offset
= (offset
& 3))) {
12108 /* adjustments to start on required 4 byte boundary */
12109 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12120 /* adjustments to end on required 4 byte boundary */
12122 len
= (len
+ 3) & ~3;
12123 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12129 if (b_offset
|| odd_len
) {
12130 buf
= kmalloc(len
, GFP_KERNEL
);
12134 memcpy(buf
, &start
, 4);
12136 memcpy(buf
+len
-4, &end
, 4);
12137 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12140 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12148 static int tg3_get_link_ksettings(struct net_device
*dev
,
12149 struct ethtool_link_ksettings
*cmd
)
12151 struct tg3
*tp
= netdev_priv(dev
);
12152 u32 supported
, advertising
;
12154 if (tg3_flag(tp
, USE_PHYLIB
)) {
12155 struct phy_device
*phydev
;
12156 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12158 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12159 phy_ethtool_ksettings_get(phydev
, cmd
);
12164 supported
= (SUPPORTED_Autoneg
);
12166 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12167 supported
|= (SUPPORTED_1000baseT_Half
|
12168 SUPPORTED_1000baseT_Full
);
12170 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12171 supported
|= (SUPPORTED_100baseT_Half
|
12172 SUPPORTED_100baseT_Full
|
12173 SUPPORTED_10baseT_Half
|
12174 SUPPORTED_10baseT_Full
|
12176 cmd
->base
.port
= PORT_TP
;
12178 supported
|= SUPPORTED_FIBRE
;
12179 cmd
->base
.port
= PORT_FIBRE
;
12181 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12184 advertising
= tp
->link_config
.advertising
;
12185 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12186 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12187 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12188 advertising
|= ADVERTISED_Pause
;
12190 advertising
|= ADVERTISED_Pause
|
12191 ADVERTISED_Asym_Pause
;
12193 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12194 advertising
|= ADVERTISED_Asym_Pause
;
12197 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12200 if (netif_running(dev
) && tp
->link_up
) {
12201 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12202 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12203 ethtool_convert_legacy_u32_to_link_mode(
12204 cmd
->link_modes
.lp_advertising
,
12205 tp
->link_config
.rmt_adv
);
12207 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12208 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12209 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12211 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12214 cmd
->base
.speed
= SPEED_UNKNOWN
;
12215 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12216 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12218 cmd
->base
.phy_address
= tp
->phy_addr
;
12219 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12223 static int tg3_set_link_ksettings(struct net_device
*dev
,
12224 const struct ethtool_link_ksettings
*cmd
)
12226 struct tg3
*tp
= netdev_priv(dev
);
12227 u32 speed
= cmd
->base
.speed
;
12230 if (tg3_flag(tp
, USE_PHYLIB
)) {
12231 struct phy_device
*phydev
;
12232 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12234 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12235 return phy_ethtool_ksettings_set(phydev
, cmd
);
12238 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12239 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12242 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12243 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12244 cmd
->base
.duplex
!= DUPLEX_HALF
)
12247 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12248 cmd
->link_modes
.advertising
);
12250 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12251 u32 mask
= ADVERTISED_Autoneg
|
12253 ADVERTISED_Asym_Pause
;
12255 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12256 mask
|= ADVERTISED_1000baseT_Half
|
12257 ADVERTISED_1000baseT_Full
;
12259 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12260 mask
|= ADVERTISED_100baseT_Half
|
12261 ADVERTISED_100baseT_Full
|
12262 ADVERTISED_10baseT_Half
|
12263 ADVERTISED_10baseT_Full
|
12266 mask
|= ADVERTISED_FIBRE
;
12268 if (advertising
& ~mask
)
12271 mask
&= (ADVERTISED_1000baseT_Half
|
12272 ADVERTISED_1000baseT_Full
|
12273 ADVERTISED_100baseT_Half
|
12274 ADVERTISED_100baseT_Full
|
12275 ADVERTISED_10baseT_Half
|
12276 ADVERTISED_10baseT_Full
);
12278 advertising
&= mask
;
12280 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12281 if (speed
!= SPEED_1000
)
12284 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12287 if (speed
!= SPEED_100
&&
12293 tg3_full_lock(tp
, 0);
12295 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12296 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12297 tp
->link_config
.advertising
= (advertising
|
12298 ADVERTISED_Autoneg
);
12299 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12300 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12302 tp
->link_config
.advertising
= 0;
12303 tp
->link_config
.speed
= speed
;
12304 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12307 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12309 tg3_warn_mgmt_link_flap(tp
);
12311 if (netif_running(dev
))
12312 tg3_setup_phy(tp
, true);
12314 tg3_full_unlock(tp
);
12319 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12321 struct tg3
*tp
= netdev_priv(dev
);
12323 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12324 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12325 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12326 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12329 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12331 struct tg3
*tp
= netdev_priv(dev
);
12333 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12334 wol
->supported
= WAKE_MAGIC
;
12336 wol
->supported
= 0;
12338 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12339 wol
->wolopts
= WAKE_MAGIC
;
12340 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12343 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12345 struct tg3
*tp
= netdev_priv(dev
);
12346 struct device
*dp
= &tp
->pdev
->dev
;
12348 if (wol
->wolopts
& ~WAKE_MAGIC
)
12350 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12351 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12354 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12356 if (device_may_wakeup(dp
))
12357 tg3_flag_set(tp
, WOL_ENABLE
);
12359 tg3_flag_clear(tp
, WOL_ENABLE
);
12364 static u32
tg3_get_msglevel(struct net_device
*dev
)
12366 struct tg3
*tp
= netdev_priv(dev
);
12367 return tp
->msg_enable
;
12370 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12372 struct tg3
*tp
= netdev_priv(dev
);
12373 tp
->msg_enable
= value
;
12376 static int tg3_nway_reset(struct net_device
*dev
)
12378 struct tg3
*tp
= netdev_priv(dev
);
12381 if (!netif_running(dev
))
12384 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12387 tg3_warn_mgmt_link_flap(tp
);
12389 if (tg3_flag(tp
, USE_PHYLIB
)) {
12390 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12392 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12396 spin_lock_bh(&tp
->lock
);
12398 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12399 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12400 ((bmcr
& BMCR_ANENABLE
) ||
12401 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12402 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12406 spin_unlock_bh(&tp
->lock
);
12412 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12414 struct tg3
*tp
= netdev_priv(dev
);
12416 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12417 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12418 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12420 ering
->rx_jumbo_max_pending
= 0;
12422 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12424 ering
->rx_pending
= tp
->rx_pending
;
12425 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12426 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12428 ering
->rx_jumbo_pending
= 0;
12430 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12433 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12435 struct tg3
*tp
= netdev_priv(dev
);
12436 int i
, irq_sync
= 0, err
= 0;
12437 bool reset_phy
= false;
12439 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12440 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12441 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12442 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12443 (tg3_flag(tp
, TSO_BUG
) &&
12444 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12447 if (netif_running(dev
)) {
12449 tg3_netif_stop(tp
);
12453 tg3_full_lock(tp
, irq_sync
);
12455 tp
->rx_pending
= ering
->rx_pending
;
12457 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12458 tp
->rx_pending
> 63)
12459 tp
->rx_pending
= 63;
12461 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12462 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12464 for (i
= 0; i
< tp
->irq_max
; i
++)
12465 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12467 if (netif_running(dev
)) {
12468 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12469 /* Reset PHY to avoid PHY lock up */
12470 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12471 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12472 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12475 err
= tg3_restart_hw(tp
, reset_phy
);
12477 tg3_netif_start(tp
);
12480 tg3_full_unlock(tp
);
12482 if (irq_sync
&& !err
)
12488 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12490 struct tg3
*tp
= netdev_priv(dev
);
12492 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12494 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12495 epause
->rx_pause
= 1;
12497 epause
->rx_pause
= 0;
12499 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12500 epause
->tx_pause
= 1;
12502 epause
->tx_pause
= 0;
12505 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12507 struct tg3
*tp
= netdev_priv(dev
);
12509 bool reset_phy
= false;
12511 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12512 tg3_warn_mgmt_link_flap(tp
);
12514 if (tg3_flag(tp
, USE_PHYLIB
)) {
12515 struct phy_device
*phydev
;
12517 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12519 if (!phy_validate_pause(phydev
, epause
))
12522 tp
->link_config
.flowctrl
= 0;
12523 phy_set_asym_pause(phydev
, epause
->rx_pause
, epause
->tx_pause
);
12524 if (epause
->rx_pause
) {
12525 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12527 if (epause
->tx_pause
) {
12528 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12530 } else if (epause
->tx_pause
) {
12531 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12534 if (epause
->autoneg
)
12535 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12537 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12539 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12540 if (phydev
->autoneg
) {
12541 /* phy_set_asym_pause() will
12542 * renegotiate the link to inform our
12543 * link partner of our flow control
12544 * settings, even if the flow control
12545 * is forced. Let tg3_adjust_link()
12546 * do the final flow control setup.
12551 if (!epause
->autoneg
)
12552 tg3_setup_flow_control(tp
, 0, 0);
12557 if (netif_running(dev
)) {
12558 tg3_netif_stop(tp
);
12562 tg3_full_lock(tp
, irq_sync
);
12564 if (epause
->autoneg
)
12565 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12567 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12568 if (epause
->rx_pause
)
12569 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12571 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12572 if (epause
->tx_pause
)
12573 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12575 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12577 if (netif_running(dev
)) {
12578 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12579 /* Reset PHY to avoid PHY lock up */
12580 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12581 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12582 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12585 err
= tg3_restart_hw(tp
, reset_phy
);
12587 tg3_netif_start(tp
);
12590 tg3_full_unlock(tp
);
12593 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12598 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12602 return TG3_NUM_TEST
;
12604 return TG3_NUM_STATS
;
12606 return -EOPNOTSUPP
;
12610 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12611 u32
*rules __always_unused
)
12613 struct tg3
*tp
= netdev_priv(dev
);
12615 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12616 return -EOPNOTSUPP
;
12618 switch (info
->cmd
) {
12619 case ETHTOOL_GRXRINGS
:
12620 if (netif_running(tp
->dev
))
12621 info
->data
= tp
->rxq_cnt
;
12623 info
->data
= num_online_cpus();
12624 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12625 info
->data
= TG3_RSS_MAX_NUM_QS
;
12631 return -EOPNOTSUPP
;
12635 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12638 struct tg3
*tp
= netdev_priv(dev
);
12640 if (tg3_flag(tp
, SUPPORT_MSIX
))
12641 size
= TG3_RSS_INDIR_TBL_SIZE
;
12646 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12648 struct tg3
*tp
= netdev_priv(dev
);
12652 *hfunc
= ETH_RSS_HASH_TOP
;
12656 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12657 indir
[i
] = tp
->rss_ind_tbl
[i
];
12662 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12665 struct tg3
*tp
= netdev_priv(dev
);
12668 /* We require at least one supported parameter to be changed and no
12669 * change in any of the unsupported parameters
12672 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12673 return -EOPNOTSUPP
;
12678 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12679 tp
->rss_ind_tbl
[i
] = indir
[i
];
12681 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12684 /* It is legal to write the indirection
12685 * table while the device is running.
12687 tg3_full_lock(tp
, 0);
12688 tg3_rss_write_indir_tbl(tp
);
12689 tg3_full_unlock(tp
);
12694 static void tg3_get_channels(struct net_device
*dev
,
12695 struct ethtool_channels
*channel
)
12697 struct tg3
*tp
= netdev_priv(dev
);
12698 u32 deflt_qs
= netif_get_num_default_rss_queues();
12700 channel
->max_rx
= tp
->rxq_max
;
12701 channel
->max_tx
= tp
->txq_max
;
12703 if (netif_running(dev
)) {
12704 channel
->rx_count
= tp
->rxq_cnt
;
12705 channel
->tx_count
= tp
->txq_cnt
;
12708 channel
->rx_count
= tp
->rxq_req
;
12710 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12713 channel
->tx_count
= tp
->txq_req
;
12715 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12719 static int tg3_set_channels(struct net_device
*dev
,
12720 struct ethtool_channels
*channel
)
12722 struct tg3
*tp
= netdev_priv(dev
);
12724 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12725 return -EOPNOTSUPP
;
12727 if (channel
->rx_count
> tp
->rxq_max
||
12728 channel
->tx_count
> tp
->txq_max
)
12731 tp
->rxq_req
= channel
->rx_count
;
12732 tp
->txq_req
= channel
->tx_count
;
12734 if (!netif_running(dev
))
12739 tg3_carrier_off(tp
);
12741 tg3_start(tp
, true, false, false);
12746 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12748 switch (stringset
) {
12750 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12753 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12756 WARN_ON(1); /* we need a WARN() */
12761 static int tg3_set_phys_id(struct net_device
*dev
,
12762 enum ethtool_phys_id_state state
)
12764 struct tg3
*tp
= netdev_priv(dev
);
12766 if (!netif_running(tp
->dev
))
12770 case ETHTOOL_ID_ACTIVE
:
12771 return 1; /* cycle on/off once per second */
12773 case ETHTOOL_ID_ON
:
12774 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12775 LED_CTRL_1000MBPS_ON
|
12776 LED_CTRL_100MBPS_ON
|
12777 LED_CTRL_10MBPS_ON
|
12778 LED_CTRL_TRAFFIC_OVERRIDE
|
12779 LED_CTRL_TRAFFIC_BLINK
|
12780 LED_CTRL_TRAFFIC_LED
);
12783 case ETHTOOL_ID_OFF
:
12784 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12785 LED_CTRL_TRAFFIC_OVERRIDE
);
12788 case ETHTOOL_ID_INACTIVE
:
12789 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12796 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12797 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12799 struct tg3
*tp
= netdev_priv(dev
);
12802 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12804 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12807 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12811 u32 offset
= 0, len
= 0;
12814 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12817 if (magic
== TG3_EEPROM_MAGIC
) {
12818 for (offset
= TG3_NVM_DIR_START
;
12819 offset
< TG3_NVM_DIR_END
;
12820 offset
+= TG3_NVM_DIRENT_SIZE
) {
12821 if (tg3_nvram_read(tp
, offset
, &val
))
12824 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12825 TG3_NVM_DIRTYPE_EXTVPD
)
12829 if (offset
!= TG3_NVM_DIR_END
) {
12830 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12831 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12834 offset
= tg3_nvram_logical_addr(tp
, offset
);
12838 if (!offset
|| !len
) {
12839 offset
= TG3_NVM_VPD_OFF
;
12840 len
= TG3_NVM_VPD_LEN
;
12843 buf
= kmalloc(len
, GFP_KERNEL
);
12847 if (magic
== TG3_EEPROM_MAGIC
) {
12848 for (i
= 0; i
< len
; i
+= 4) {
12849 /* The data is in little-endian format in NVRAM.
12850 * Use the big-endian read routines to preserve
12851 * the byte order as it exists in NVRAM.
12853 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12859 unsigned int pos
= 0;
12861 ptr
= (u8
*)&buf
[0];
12862 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12863 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12865 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12883 #define NVRAM_TEST_SIZE 0x100
12884 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12885 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12886 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12887 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12888 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12889 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12890 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12891 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12893 static int tg3_test_nvram(struct tg3
*tp
)
12895 u32 csum
, magic
, len
;
12897 int i
, j
, k
, err
= 0, size
;
12899 if (tg3_flag(tp
, NO_NVRAM
))
12902 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12905 if (magic
== TG3_EEPROM_MAGIC
)
12906 size
= NVRAM_TEST_SIZE
;
12907 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12908 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12909 TG3_EEPROM_SB_FORMAT_1
) {
12910 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12911 case TG3_EEPROM_SB_REVISION_0
:
12912 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12914 case TG3_EEPROM_SB_REVISION_2
:
12915 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12917 case TG3_EEPROM_SB_REVISION_3
:
12918 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12920 case TG3_EEPROM_SB_REVISION_4
:
12921 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12923 case TG3_EEPROM_SB_REVISION_5
:
12924 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12926 case TG3_EEPROM_SB_REVISION_6
:
12927 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12934 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12935 size
= NVRAM_SELFBOOT_HW_SIZE
;
12939 buf
= kmalloc(size
, GFP_KERNEL
);
12944 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12945 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12952 /* Selfboot format */
12953 magic
= be32_to_cpu(buf
[0]);
12954 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12955 TG3_EEPROM_MAGIC_FW
) {
12956 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12958 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12959 TG3_EEPROM_SB_REVISION_2
) {
12960 /* For rev 2, the csum doesn't include the MBA. */
12961 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12963 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12966 for (i
= 0; i
< size
; i
++)
12979 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12980 TG3_EEPROM_MAGIC_HW
) {
12981 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12982 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12983 u8
*buf8
= (u8
*) buf
;
12985 /* Separate the parity bits and the data bytes. */
12986 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12987 if ((i
== 0) || (i
== 8)) {
12991 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12992 parity
[k
++] = buf8
[i
] & msk
;
12994 } else if (i
== 16) {
12998 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12999 parity
[k
++] = buf8
[i
] & msk
;
13002 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
13003 parity
[k
++] = buf8
[i
] & msk
;
13006 data
[j
++] = buf8
[i
];
13010 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
13011 u8 hw8
= hweight8(data
[i
]);
13013 if ((hw8
& 0x1) && parity
[i
])
13015 else if (!(hw8
& 0x1) && !parity
[i
])
13024 /* Bootstrap checksum at offset 0x10 */
13025 csum
= calc_crc((unsigned char *) buf
, 0x10);
13026 if (csum
!= le32_to_cpu(buf
[0x10/4]))
13029 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13030 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
13031 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
13036 buf
= tg3_vpd_readblock(tp
, &len
);
13040 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
13042 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
13046 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
13049 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13050 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
13051 PCI_VPD_RO_KEYWORD_CHKSUM
);
13055 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13057 for (i
= 0; i
<= j
; i
++)
13058 csum8
+= ((u8
*)buf
)[i
];
13072 #define TG3_SERDES_TIMEOUT_SEC 2
13073 #define TG3_COPPER_TIMEOUT_SEC 6
13075 static int tg3_test_link(struct tg3
*tp
)
13079 if (!netif_running(tp
->dev
))
13082 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13083 max
= TG3_SERDES_TIMEOUT_SEC
;
13085 max
= TG3_COPPER_TIMEOUT_SEC
;
13087 for (i
= 0; i
< max
; i
++) {
13091 if (msleep_interruptible(1000))
13098 /* Only test the commonly used registers */
13099 static int tg3_test_registers(struct tg3
*tp
)
13101 int i
, is_5705
, is_5750
;
13102 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13106 #define TG3_FL_5705 0x1
13107 #define TG3_FL_NOT_5705 0x2
13108 #define TG3_FL_NOT_5788 0x4
13109 #define TG3_FL_NOT_5750 0x8
13113 /* MAC Control Registers */
13114 { MAC_MODE
, TG3_FL_NOT_5705
,
13115 0x00000000, 0x00ef6f8c },
13116 { MAC_MODE
, TG3_FL_5705
,
13117 0x00000000, 0x01ef6b8c },
13118 { MAC_STATUS
, TG3_FL_NOT_5705
,
13119 0x03800107, 0x00000000 },
13120 { MAC_STATUS
, TG3_FL_5705
,
13121 0x03800100, 0x00000000 },
13122 { MAC_ADDR_0_HIGH
, 0x0000,
13123 0x00000000, 0x0000ffff },
13124 { MAC_ADDR_0_LOW
, 0x0000,
13125 0x00000000, 0xffffffff },
13126 { MAC_RX_MTU_SIZE
, 0x0000,
13127 0x00000000, 0x0000ffff },
13128 { MAC_TX_MODE
, 0x0000,
13129 0x00000000, 0x00000070 },
13130 { MAC_TX_LENGTHS
, 0x0000,
13131 0x00000000, 0x00003fff },
13132 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13133 0x00000000, 0x000007fc },
13134 { MAC_RX_MODE
, TG3_FL_5705
,
13135 0x00000000, 0x000007dc },
13136 { MAC_HASH_REG_0
, 0x0000,
13137 0x00000000, 0xffffffff },
13138 { MAC_HASH_REG_1
, 0x0000,
13139 0x00000000, 0xffffffff },
13140 { MAC_HASH_REG_2
, 0x0000,
13141 0x00000000, 0xffffffff },
13142 { MAC_HASH_REG_3
, 0x0000,
13143 0x00000000, 0xffffffff },
13145 /* Receive Data and Receive BD Initiator Control Registers. */
13146 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13147 0x00000000, 0xffffffff },
13148 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13149 0x00000000, 0xffffffff },
13150 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13151 0x00000000, 0x00000003 },
13152 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13153 0x00000000, 0xffffffff },
13154 { RCVDBDI_STD_BD
+0, 0x0000,
13155 0x00000000, 0xffffffff },
13156 { RCVDBDI_STD_BD
+4, 0x0000,
13157 0x00000000, 0xffffffff },
13158 { RCVDBDI_STD_BD
+8, 0x0000,
13159 0x00000000, 0xffff0002 },
13160 { RCVDBDI_STD_BD
+0xc, 0x0000,
13161 0x00000000, 0xffffffff },
13163 /* Receive BD Initiator Control Registers. */
13164 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13165 0x00000000, 0xffffffff },
13166 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13167 0x00000000, 0x000003ff },
13168 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13169 0x00000000, 0xffffffff },
13171 /* Host Coalescing Control Registers. */
13172 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13173 0x00000000, 0x00000004 },
13174 { HOSTCC_MODE
, TG3_FL_5705
,
13175 0x00000000, 0x000000f6 },
13176 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13177 0x00000000, 0xffffffff },
13178 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13179 0x00000000, 0x000003ff },
13180 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13181 0x00000000, 0xffffffff },
13182 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13183 0x00000000, 0x000003ff },
13184 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13185 0x00000000, 0xffffffff },
13186 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13187 0x00000000, 0x000000ff },
13188 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13189 0x00000000, 0xffffffff },
13190 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13191 0x00000000, 0x000000ff },
13192 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13193 0x00000000, 0xffffffff },
13194 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13197 0x00000000, 0xffffffff },
13198 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13199 0x00000000, 0x000000ff },
13200 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13201 0x00000000, 0xffffffff },
13202 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13203 0x00000000, 0x000000ff },
13204 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13205 0x00000000, 0xffffffff },
13206 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13207 0x00000000, 0xffffffff },
13208 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13209 0x00000000, 0xffffffff },
13210 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13211 0x00000000, 0xffffffff },
13212 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13213 0x00000000, 0xffffffff },
13214 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13215 0xffffffff, 0x00000000 },
13216 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13217 0xffffffff, 0x00000000 },
13219 /* Buffer Manager Control Registers. */
13220 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13221 0x00000000, 0x007fff80 },
13222 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13223 0x00000000, 0x007fffff },
13224 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13225 0x00000000, 0x0000003f },
13226 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13227 0x00000000, 0x000001ff },
13228 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13229 0x00000000, 0x000001ff },
13230 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13231 0xffffffff, 0x00000000 },
13232 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13233 0xffffffff, 0x00000000 },
13235 /* Mailbox Registers */
13236 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13237 0x00000000, 0x000001ff },
13238 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13239 0x00000000, 0x000001ff },
13240 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13241 0x00000000, 0x000007ff },
13242 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13243 0x00000000, 0x000001ff },
13245 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13248 is_5705
= is_5750
= 0;
13249 if (tg3_flag(tp
, 5705_PLUS
)) {
13251 if (tg3_flag(tp
, 5750_PLUS
))
13255 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13256 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13259 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13262 if (tg3_flag(tp
, IS_5788
) &&
13263 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13266 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13269 offset
= (u32
) reg_tbl
[i
].offset
;
13270 read_mask
= reg_tbl
[i
].read_mask
;
13271 write_mask
= reg_tbl
[i
].write_mask
;
13273 /* Save the original register content */
13274 save_val
= tr32(offset
);
13276 /* Determine the read-only value. */
13277 read_val
= save_val
& read_mask
;
13279 /* Write zero to the register, then make sure the read-only bits
13280 * are not changed and the read/write bits are all zeros.
13284 val
= tr32(offset
);
13286 /* Test the read-only and read/write bits. */
13287 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13290 /* Write ones to all the bits defined by RdMask and WrMask, then
13291 * make sure the read-only bits are not changed and the
13292 * read/write bits are all ones.
13294 tw32(offset
, read_mask
| write_mask
);
13296 val
= tr32(offset
);
13298 /* Test the read-only bits. */
13299 if ((val
& read_mask
) != read_val
)
13302 /* Test the read/write bits. */
13303 if ((val
& write_mask
) != write_mask
)
13306 tw32(offset
, save_val
);
13312 if (netif_msg_hw(tp
))
13313 netdev_err(tp
->dev
,
13314 "Register test failed at offset %x\n", offset
);
13315 tw32(offset
, save_val
);
13319 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13321 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13325 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13326 for (j
= 0; j
< len
; j
+= 4) {
13329 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13330 tg3_read_mem(tp
, offset
+ j
, &val
);
13331 if (val
!= test_pattern
[i
])
13338 static int tg3_test_memory(struct tg3
*tp
)
13340 static struct mem_entry
{
13343 } mem_tbl_570x
[] = {
13344 { 0x00000000, 0x00b50},
13345 { 0x00002000, 0x1c000},
13346 { 0xffffffff, 0x00000}
13347 }, mem_tbl_5705
[] = {
13348 { 0x00000100, 0x0000c},
13349 { 0x00000200, 0x00008},
13350 { 0x00004000, 0x00800},
13351 { 0x00006000, 0x01000},
13352 { 0x00008000, 0x02000},
13353 { 0x00010000, 0x0e000},
13354 { 0xffffffff, 0x00000}
13355 }, mem_tbl_5755
[] = {
13356 { 0x00000200, 0x00008},
13357 { 0x00004000, 0x00800},
13358 { 0x00006000, 0x00800},
13359 { 0x00008000, 0x02000},
13360 { 0x00010000, 0x0c000},
13361 { 0xffffffff, 0x00000}
13362 }, mem_tbl_5906
[] = {
13363 { 0x00000200, 0x00008},
13364 { 0x00004000, 0x00400},
13365 { 0x00006000, 0x00400},
13366 { 0x00008000, 0x01000},
13367 { 0x00010000, 0x01000},
13368 { 0xffffffff, 0x00000}
13369 }, mem_tbl_5717
[] = {
13370 { 0x00000200, 0x00008},
13371 { 0x00010000, 0x0a000},
13372 { 0x00020000, 0x13c00},
13373 { 0xffffffff, 0x00000}
13374 }, mem_tbl_57765
[] = {
13375 { 0x00000200, 0x00008},
13376 { 0x00004000, 0x00800},
13377 { 0x00006000, 0x09800},
13378 { 0x00010000, 0x0a000},
13379 { 0xffffffff, 0x00000}
13381 struct mem_entry
*mem_tbl
;
13385 if (tg3_flag(tp
, 5717_PLUS
))
13386 mem_tbl
= mem_tbl_5717
;
13387 else if (tg3_flag(tp
, 57765_CLASS
) ||
13388 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13389 mem_tbl
= mem_tbl_57765
;
13390 else if (tg3_flag(tp
, 5755_PLUS
))
13391 mem_tbl
= mem_tbl_5755
;
13392 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13393 mem_tbl
= mem_tbl_5906
;
13394 else if (tg3_flag(tp
, 5705_PLUS
))
13395 mem_tbl
= mem_tbl_5705
;
13397 mem_tbl
= mem_tbl_570x
;
13399 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13400 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13408 #define TG3_TSO_MSS 500
13410 #define TG3_TSO_IP_HDR_LEN 20
13411 #define TG3_TSO_TCP_HDR_LEN 20
13412 #define TG3_TSO_TCP_OPT_LEN 12
13414 static const u8 tg3_tso_header
[] = {
13416 0x45, 0x00, 0x00, 0x00,
13417 0x00, 0x00, 0x40, 0x00,
13418 0x40, 0x06, 0x00, 0x00,
13419 0x0a, 0x00, 0x00, 0x01,
13420 0x0a, 0x00, 0x00, 0x02,
13421 0x0d, 0x00, 0xe0, 0x00,
13422 0x00, 0x00, 0x01, 0x00,
13423 0x00, 0x00, 0x02, 0x00,
13424 0x80, 0x10, 0x10, 0x00,
13425 0x14, 0x09, 0x00, 0x00,
13426 0x01, 0x01, 0x08, 0x0a,
13427 0x11, 0x11, 0x11, 0x11,
13428 0x11, 0x11, 0x11, 0x11,
13431 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13433 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13434 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13436 struct sk_buff
*skb
;
13437 u8
*tx_data
, *rx_data
;
13439 int num_pkts
, tx_len
, rx_len
, i
, err
;
13440 struct tg3_rx_buffer_desc
*desc
;
13441 struct tg3_napi
*tnapi
, *rnapi
;
13442 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13444 tnapi
= &tp
->napi
[0];
13445 rnapi
= &tp
->napi
[0];
13446 if (tp
->irq_cnt
> 1) {
13447 if (tg3_flag(tp
, ENABLE_RSS
))
13448 rnapi
= &tp
->napi
[1];
13449 if (tg3_flag(tp
, ENABLE_TSS
))
13450 tnapi
= &tp
->napi
[1];
13452 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13457 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13461 tx_data
= skb_put(skb
, tx_len
);
13462 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13463 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13465 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13467 if (tso_loopback
) {
13468 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13470 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13471 TG3_TSO_TCP_OPT_LEN
;
13473 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13474 sizeof(tg3_tso_header
));
13477 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13478 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13480 /* Set the total length field in the IP header */
13481 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13483 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13484 TXD_FLAG_CPU_POST_DMA
);
13486 if (tg3_flag(tp
, HW_TSO_1
) ||
13487 tg3_flag(tp
, HW_TSO_2
) ||
13488 tg3_flag(tp
, HW_TSO_3
)) {
13490 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13491 th
= (struct tcphdr
*)&tx_data
[val
];
13494 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13496 if (tg3_flag(tp
, HW_TSO_3
)) {
13497 mss
|= (hdr_len
& 0xc) << 12;
13498 if (hdr_len
& 0x10)
13499 base_flags
|= 0x00000010;
13500 base_flags
|= (hdr_len
& 0x3e0) << 5;
13501 } else if (tg3_flag(tp
, HW_TSO_2
))
13502 mss
|= hdr_len
<< 9;
13503 else if (tg3_flag(tp
, HW_TSO_1
) ||
13504 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13505 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13507 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13510 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13513 data_off
= ETH_HLEN
;
13515 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13516 tx_len
> VLAN_ETH_FRAME_LEN
)
13517 base_flags
|= TXD_FLAG_JMB_PKT
;
13520 for (i
= data_off
; i
< tx_len
; i
++)
13521 tx_data
[i
] = (u8
) (i
& 0xff);
13523 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13524 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13525 dev_kfree_skb(skb
);
13529 val
= tnapi
->tx_prod
;
13530 tnapi
->tx_buffers
[val
].skb
= skb
;
13531 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13533 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13538 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13540 budget
= tg3_tx_avail(tnapi
);
13541 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13542 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13543 tnapi
->tx_buffers
[val
].skb
= NULL
;
13544 dev_kfree_skb(skb
);
13550 /* Sync BD data before updating mailbox */
13553 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13554 tr32_mailbox(tnapi
->prodmbox
);
13558 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13559 for (i
= 0; i
< 35; i
++) {
13560 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13565 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13566 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13567 if ((tx_idx
== tnapi
->tx_prod
) &&
13568 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13572 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13573 dev_kfree_skb(skb
);
13575 if (tx_idx
!= tnapi
->tx_prod
)
13578 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13582 while (rx_idx
!= rx_start_idx
) {
13583 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13584 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13585 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13587 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13588 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13591 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13594 if (!tso_loopback
) {
13595 if (rx_len
!= tx_len
)
13598 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13599 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13602 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13605 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13606 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13607 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13611 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13612 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13613 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13615 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13616 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13617 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13622 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13623 PCI_DMA_FROMDEVICE
);
13625 rx_data
+= TG3_RX_OFFSET(tp
);
13626 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13627 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13634 /* tg3_free_rings will unmap and free the rx_data */
13639 #define TG3_STD_LOOPBACK_FAILED 1
13640 #define TG3_JMB_LOOPBACK_FAILED 2
13641 #define TG3_TSO_LOOPBACK_FAILED 4
13642 #define TG3_LOOPBACK_FAILED \
13643 (TG3_STD_LOOPBACK_FAILED | \
13644 TG3_JMB_LOOPBACK_FAILED | \
13645 TG3_TSO_LOOPBACK_FAILED)
13647 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13651 u32 jmb_pkt_sz
= 9000;
13654 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13656 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13657 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13659 if (!netif_running(tp
->dev
)) {
13660 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13661 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13663 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13667 err
= tg3_reset_hw(tp
, true);
13669 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13670 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13672 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13676 if (tg3_flag(tp
, ENABLE_RSS
)) {
13679 /* Reroute all rx packets to the 1st queue */
13680 for (i
= MAC_RSS_INDIR_TBL_0
;
13681 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13685 /* HW errata - mac loopback fails in some cases on 5780.
13686 * Normal traffic and PHY loopback are not affected by
13687 * errata. Also, the MAC loopback test is deprecated for
13688 * all newer ASIC revisions.
13690 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13691 !tg3_flag(tp
, CPMU_PRESENT
)) {
13692 tg3_mac_loopback(tp
, true);
13694 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13695 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13697 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13698 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13699 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13701 tg3_mac_loopback(tp
, false);
13704 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13705 !tg3_flag(tp
, USE_PHYLIB
)) {
13708 tg3_phy_lpbk_set(tp
, 0, false);
13710 /* Wait for link */
13711 for (i
= 0; i
< 100; i
++) {
13712 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13717 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13718 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13719 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13720 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13721 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13722 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13723 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13724 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13727 tg3_phy_lpbk_set(tp
, 0, true);
13729 /* All link indications report up, but the hardware
13730 * isn't really ready for about 20 msec. Double it
13735 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13736 data
[TG3_EXT_LOOPB_TEST
] |=
13737 TG3_STD_LOOPBACK_FAILED
;
13738 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13739 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13740 data
[TG3_EXT_LOOPB_TEST
] |=
13741 TG3_TSO_LOOPBACK_FAILED
;
13742 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13743 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13744 data
[TG3_EXT_LOOPB_TEST
] |=
13745 TG3_JMB_LOOPBACK_FAILED
;
13748 /* Re-enable gphy autopowerdown. */
13749 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13750 tg3_phy_toggle_apd(tp
, true);
13753 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13754 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13757 tp
->phy_flags
|= eee_cap
;
13762 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13765 struct tg3
*tp
= netdev_priv(dev
);
13766 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13768 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13769 if (tg3_power_up(tp
)) {
13770 etest
->flags
|= ETH_TEST_FL_FAILED
;
13771 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13774 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13777 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13779 if (tg3_test_nvram(tp
) != 0) {
13780 etest
->flags
|= ETH_TEST_FL_FAILED
;
13781 data
[TG3_NVRAM_TEST
] = 1;
13783 if (!doextlpbk
&& tg3_test_link(tp
)) {
13784 etest
->flags
|= ETH_TEST_FL_FAILED
;
13785 data
[TG3_LINK_TEST
] = 1;
13787 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13788 int err
, err2
= 0, irq_sync
= 0;
13790 if (netif_running(dev
)) {
13792 tg3_netif_stop(tp
);
13796 tg3_full_lock(tp
, irq_sync
);
13797 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13798 err
= tg3_nvram_lock(tp
);
13799 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13800 if (!tg3_flag(tp
, 5705_PLUS
))
13801 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13803 tg3_nvram_unlock(tp
);
13805 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13808 if (tg3_test_registers(tp
) != 0) {
13809 etest
->flags
|= ETH_TEST_FL_FAILED
;
13810 data
[TG3_REGISTER_TEST
] = 1;
13813 if (tg3_test_memory(tp
) != 0) {
13814 etest
->flags
|= ETH_TEST_FL_FAILED
;
13815 data
[TG3_MEMORY_TEST
] = 1;
13819 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13821 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13822 etest
->flags
|= ETH_TEST_FL_FAILED
;
13824 tg3_full_unlock(tp
);
13826 if (tg3_test_interrupt(tp
) != 0) {
13827 etest
->flags
|= ETH_TEST_FL_FAILED
;
13828 data
[TG3_INTERRUPT_TEST
] = 1;
13831 tg3_full_lock(tp
, 0);
13833 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13834 if (netif_running(dev
)) {
13835 tg3_flag_set(tp
, INIT_COMPLETE
);
13836 err2
= tg3_restart_hw(tp
, true);
13838 tg3_netif_start(tp
);
13841 tg3_full_unlock(tp
);
13843 if (irq_sync
&& !err2
)
13846 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13847 tg3_power_down_prepare(tp
);
13851 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13853 struct tg3
*tp
= netdev_priv(dev
);
13854 struct hwtstamp_config stmpconf
;
13856 if (!tg3_flag(tp
, PTP_CAPABLE
))
13857 return -EOPNOTSUPP
;
13859 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13862 if (stmpconf
.flags
)
13865 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13866 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13869 switch (stmpconf
.rx_filter
) {
13870 case HWTSTAMP_FILTER_NONE
:
13873 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13874 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13875 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13877 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13878 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13879 TG3_RX_PTP_CTL_SYNC_EVNT
;
13881 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13882 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13883 TG3_RX_PTP_CTL_DELAY_REQ
;
13885 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13886 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13887 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13889 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13890 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13891 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13893 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13894 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13895 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13897 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13898 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13899 TG3_RX_PTP_CTL_SYNC_EVNT
;
13901 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13902 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13903 TG3_RX_PTP_CTL_SYNC_EVNT
;
13905 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13906 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13907 TG3_RX_PTP_CTL_SYNC_EVNT
;
13909 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13910 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13911 TG3_RX_PTP_CTL_DELAY_REQ
;
13913 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13914 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13915 TG3_RX_PTP_CTL_DELAY_REQ
;
13917 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13918 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13919 TG3_RX_PTP_CTL_DELAY_REQ
;
13925 if (netif_running(dev
) && tp
->rxptpctl
)
13926 tw32(TG3_RX_PTP_CTL
,
13927 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13929 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13930 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13932 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13934 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13938 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13940 struct tg3
*tp
= netdev_priv(dev
);
13941 struct hwtstamp_config stmpconf
;
13943 if (!tg3_flag(tp
, PTP_CAPABLE
))
13944 return -EOPNOTSUPP
;
13946 stmpconf
.flags
= 0;
13947 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13948 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13950 switch (tp
->rxptpctl
) {
13952 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13954 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13955 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13957 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13958 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13960 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13961 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13963 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13964 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13966 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13967 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13969 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13970 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13972 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13973 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13975 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13976 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13978 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13979 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13981 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13982 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13984 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13985 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13987 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13988 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13995 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13999 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
14001 struct mii_ioctl_data
*data
= if_mii(ifr
);
14002 struct tg3
*tp
= netdev_priv(dev
);
14005 if (tg3_flag(tp
, USE_PHYLIB
)) {
14006 struct phy_device
*phydev
;
14007 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
14009 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
14010 return phy_mii_ioctl(phydev
, ifr
, cmd
);
14015 data
->phy_id
= tp
->phy_addr
;
14018 case SIOCGMIIREG
: {
14021 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14022 break; /* We have no PHY */
14024 if (!netif_running(dev
))
14027 spin_lock_bh(&tp
->lock
);
14028 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
14029 data
->reg_num
& 0x1f, &mii_regval
);
14030 spin_unlock_bh(&tp
->lock
);
14032 data
->val_out
= mii_regval
;
14038 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14039 break; /* We have no PHY */
14041 if (!netif_running(dev
))
14044 spin_lock_bh(&tp
->lock
);
14045 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
14046 data
->reg_num
& 0x1f, data
->val_in
);
14047 spin_unlock_bh(&tp
->lock
);
14051 case SIOCSHWTSTAMP
:
14052 return tg3_hwtstamp_set(dev
, ifr
);
14054 case SIOCGHWTSTAMP
:
14055 return tg3_hwtstamp_get(dev
, ifr
);
14061 return -EOPNOTSUPP
;
14064 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14066 struct tg3
*tp
= netdev_priv(dev
);
14068 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14072 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14074 struct tg3
*tp
= netdev_priv(dev
);
14075 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14076 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14078 if (!tg3_flag(tp
, 5705_PLUS
)) {
14079 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14080 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14081 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14082 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14085 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14086 (!ec
->rx_coalesce_usecs
) ||
14087 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14088 (!ec
->tx_coalesce_usecs
) ||
14089 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14090 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14091 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14092 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14093 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14094 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14095 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14096 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14099 /* Only copy relevant parameters, ignore all others. */
14100 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14101 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14102 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14103 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14104 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14105 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14106 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14107 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14108 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14110 if (netif_running(dev
)) {
14111 tg3_full_lock(tp
, 0);
14112 __tg3_set_coalesce(tp
, &tp
->coal
);
14113 tg3_full_unlock(tp
);
14118 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14120 struct tg3
*tp
= netdev_priv(dev
);
14122 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14123 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14124 return -EOPNOTSUPP
;
14127 if (edata
->advertised
!= tp
->eee
.advertised
) {
14128 netdev_warn(tp
->dev
,
14129 "Direct manipulation of EEE advertisement is not supported\n");
14133 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14134 netdev_warn(tp
->dev
,
14135 "Maximal Tx Lpi timer supported is %#x(u)\n",
14136 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14142 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14143 tg3_warn_mgmt_link_flap(tp
);
14145 if (netif_running(tp
->dev
)) {
14146 tg3_full_lock(tp
, 0);
14149 tg3_full_unlock(tp
);
14155 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14157 struct tg3
*tp
= netdev_priv(dev
);
14159 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14160 netdev_warn(tp
->dev
,
14161 "Board does not support EEE!\n");
14162 return -EOPNOTSUPP
;
14169 static const struct ethtool_ops tg3_ethtool_ops
= {
14170 .get_drvinfo
= tg3_get_drvinfo
,
14171 .get_regs_len
= tg3_get_regs_len
,
14172 .get_regs
= tg3_get_regs
,
14173 .get_wol
= tg3_get_wol
,
14174 .set_wol
= tg3_set_wol
,
14175 .get_msglevel
= tg3_get_msglevel
,
14176 .set_msglevel
= tg3_set_msglevel
,
14177 .nway_reset
= tg3_nway_reset
,
14178 .get_link
= ethtool_op_get_link
,
14179 .get_eeprom_len
= tg3_get_eeprom_len
,
14180 .get_eeprom
= tg3_get_eeprom
,
14181 .set_eeprom
= tg3_set_eeprom
,
14182 .get_ringparam
= tg3_get_ringparam
,
14183 .set_ringparam
= tg3_set_ringparam
,
14184 .get_pauseparam
= tg3_get_pauseparam
,
14185 .set_pauseparam
= tg3_set_pauseparam
,
14186 .self_test
= tg3_self_test
,
14187 .get_strings
= tg3_get_strings
,
14188 .set_phys_id
= tg3_set_phys_id
,
14189 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14190 .get_coalesce
= tg3_get_coalesce
,
14191 .set_coalesce
= tg3_set_coalesce
,
14192 .get_sset_count
= tg3_get_sset_count
,
14193 .get_rxnfc
= tg3_get_rxnfc
,
14194 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14195 .get_rxfh
= tg3_get_rxfh
,
14196 .set_rxfh
= tg3_set_rxfh
,
14197 .get_channels
= tg3_get_channels
,
14198 .set_channels
= tg3_set_channels
,
14199 .get_ts_info
= tg3_get_ts_info
,
14200 .get_eee
= tg3_get_eee
,
14201 .set_eee
= tg3_set_eee
,
14202 .get_link_ksettings
= tg3_get_link_ksettings
,
14203 .set_link_ksettings
= tg3_set_link_ksettings
,
14206 static void tg3_get_stats64(struct net_device
*dev
,
14207 struct rtnl_link_stats64
*stats
)
14209 struct tg3
*tp
= netdev_priv(dev
);
14211 spin_lock_bh(&tp
->lock
);
14212 if (!tp
->hw_stats
|| !tg3_flag(tp
, INIT_COMPLETE
)) {
14213 *stats
= tp
->net_stats_prev
;
14214 spin_unlock_bh(&tp
->lock
);
14218 tg3_get_nstats(tp
, stats
);
14219 spin_unlock_bh(&tp
->lock
);
14222 static void tg3_set_rx_mode(struct net_device
*dev
)
14224 struct tg3
*tp
= netdev_priv(dev
);
14226 if (!netif_running(dev
))
14229 tg3_full_lock(tp
, 0);
14230 __tg3_set_rx_mode(dev
);
14231 tg3_full_unlock(tp
);
14234 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14237 dev
->mtu
= new_mtu
;
14239 if (new_mtu
> ETH_DATA_LEN
) {
14240 if (tg3_flag(tp
, 5780_CLASS
)) {
14241 netdev_update_features(dev
);
14242 tg3_flag_clear(tp
, TSO_CAPABLE
);
14244 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14247 if (tg3_flag(tp
, 5780_CLASS
)) {
14248 tg3_flag_set(tp
, TSO_CAPABLE
);
14249 netdev_update_features(dev
);
14251 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14255 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14257 struct tg3
*tp
= netdev_priv(dev
);
14259 bool reset_phy
= false;
14261 if (!netif_running(dev
)) {
14262 /* We'll just catch it later when the
14265 tg3_set_mtu(dev
, tp
, new_mtu
);
14271 tg3_netif_stop(tp
);
14273 tg3_set_mtu(dev
, tp
, new_mtu
);
14275 tg3_full_lock(tp
, 1);
14277 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14279 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14280 * breaks all requests to 256 bytes.
14282 if (tg3_asic_rev(tp
) == ASIC_REV_57766
||
14283 tg3_asic_rev(tp
) == ASIC_REV_5717
||
14284 tg3_asic_rev(tp
) == ASIC_REV_5719
||
14285 tg3_asic_rev(tp
) == ASIC_REV_5720
)
14288 err
= tg3_restart_hw(tp
, reset_phy
);
14291 tg3_netif_start(tp
);
14293 tg3_full_unlock(tp
);
14301 static const struct net_device_ops tg3_netdev_ops
= {
14302 .ndo_open
= tg3_open
,
14303 .ndo_stop
= tg3_close
,
14304 .ndo_start_xmit
= tg3_start_xmit
,
14305 .ndo_get_stats64
= tg3_get_stats64
,
14306 .ndo_validate_addr
= eth_validate_addr
,
14307 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14308 .ndo_set_mac_address
= tg3_set_mac_addr
,
14309 .ndo_do_ioctl
= tg3_ioctl
,
14310 .ndo_tx_timeout
= tg3_tx_timeout
,
14311 .ndo_change_mtu
= tg3_change_mtu
,
14312 .ndo_fix_features
= tg3_fix_features
,
14313 .ndo_set_features
= tg3_set_features
,
14314 #ifdef CONFIG_NET_POLL_CONTROLLER
14315 .ndo_poll_controller
= tg3_poll_controller
,
14319 static void tg3_get_eeprom_size(struct tg3
*tp
)
14321 u32 cursize
, val
, magic
;
14323 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14325 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14328 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14329 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14330 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14334 * Size the chip by reading offsets at increasing powers of two.
14335 * When we encounter our validation signature, we know the addressing
14336 * has wrapped around, and thus have our chip size.
14340 while (cursize
< tp
->nvram_size
) {
14341 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14350 tp
->nvram_size
= cursize
;
14353 static void tg3_get_nvram_size(struct tg3
*tp
)
14357 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14360 /* Selfboot format */
14361 if (val
!= TG3_EEPROM_MAGIC
) {
14362 tg3_get_eeprom_size(tp
);
14366 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14368 /* This is confusing. We want to operate on the
14369 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14370 * call will read from NVRAM and byteswap the data
14371 * according to the byteswapping settings for all
14372 * other register accesses. This ensures the data we
14373 * want will always reside in the lower 16-bits.
14374 * However, the data in NVRAM is in LE format, which
14375 * means the data from the NVRAM read will always be
14376 * opposite the endianness of the CPU. The 16-bit
14377 * byteswap then brings the data to CPU endianness.
14379 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14383 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14386 static void tg3_get_nvram_info(struct tg3
*tp
)
14390 nvcfg1
= tr32(NVRAM_CFG1
);
14391 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14392 tg3_flag_set(tp
, FLASH
);
14394 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14395 tw32(NVRAM_CFG1
, nvcfg1
);
14398 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14399 tg3_flag(tp
, 5780_CLASS
)) {
14400 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14401 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14402 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14403 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14404 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14406 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14407 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14408 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14410 case FLASH_VENDOR_ATMEL_EEPROM
:
14411 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14412 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14413 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14415 case FLASH_VENDOR_ST
:
14416 tp
->nvram_jedecnum
= JEDEC_ST
;
14417 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14418 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14420 case FLASH_VENDOR_SAIFUN
:
14421 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14422 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14424 case FLASH_VENDOR_SST_SMALL
:
14425 case FLASH_VENDOR_SST_LARGE
:
14426 tp
->nvram_jedecnum
= JEDEC_SST
;
14427 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14431 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14432 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14433 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14437 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14439 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14440 case FLASH_5752PAGE_SIZE_256
:
14441 tp
->nvram_pagesize
= 256;
14443 case FLASH_5752PAGE_SIZE_512
:
14444 tp
->nvram_pagesize
= 512;
14446 case FLASH_5752PAGE_SIZE_1K
:
14447 tp
->nvram_pagesize
= 1024;
14449 case FLASH_5752PAGE_SIZE_2K
:
14450 tp
->nvram_pagesize
= 2048;
14452 case FLASH_5752PAGE_SIZE_4K
:
14453 tp
->nvram_pagesize
= 4096;
14455 case FLASH_5752PAGE_SIZE_264
:
14456 tp
->nvram_pagesize
= 264;
14458 case FLASH_5752PAGE_SIZE_528
:
14459 tp
->nvram_pagesize
= 528;
14464 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14468 nvcfg1
= tr32(NVRAM_CFG1
);
14470 /* NVRAM protection for TPM */
14471 if (nvcfg1
& (1 << 27))
14472 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14474 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14475 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14476 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14477 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14478 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14480 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14481 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14482 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14483 tg3_flag_set(tp
, FLASH
);
14485 case FLASH_5752VENDOR_ST_M45PE10
:
14486 case FLASH_5752VENDOR_ST_M45PE20
:
14487 case FLASH_5752VENDOR_ST_M45PE40
:
14488 tp
->nvram_jedecnum
= JEDEC_ST
;
14489 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14490 tg3_flag_set(tp
, FLASH
);
14494 if (tg3_flag(tp
, FLASH
)) {
14495 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14497 /* For eeprom, set pagesize to maximum eeprom size */
14498 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14500 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14501 tw32(NVRAM_CFG1
, nvcfg1
);
14505 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14507 u32 nvcfg1
, protect
= 0;
14509 nvcfg1
= tr32(NVRAM_CFG1
);
14511 /* NVRAM protection for TPM */
14512 if (nvcfg1
& (1 << 27)) {
14513 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14517 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14519 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14520 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14521 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14522 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14523 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14524 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14525 tg3_flag_set(tp
, FLASH
);
14526 tp
->nvram_pagesize
= 264;
14527 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14528 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14529 tp
->nvram_size
= (protect
? 0x3e200 :
14530 TG3_NVRAM_SIZE_512KB
);
14531 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14532 tp
->nvram_size
= (protect
? 0x1f200 :
14533 TG3_NVRAM_SIZE_256KB
);
14535 tp
->nvram_size
= (protect
? 0x1f200 :
14536 TG3_NVRAM_SIZE_128KB
);
14538 case FLASH_5752VENDOR_ST_M45PE10
:
14539 case FLASH_5752VENDOR_ST_M45PE20
:
14540 case FLASH_5752VENDOR_ST_M45PE40
:
14541 tp
->nvram_jedecnum
= JEDEC_ST
;
14542 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14543 tg3_flag_set(tp
, FLASH
);
14544 tp
->nvram_pagesize
= 256;
14545 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14546 tp
->nvram_size
= (protect
?
14547 TG3_NVRAM_SIZE_64KB
:
14548 TG3_NVRAM_SIZE_128KB
);
14549 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14550 tp
->nvram_size
= (protect
?
14551 TG3_NVRAM_SIZE_64KB
:
14552 TG3_NVRAM_SIZE_256KB
);
14554 tp
->nvram_size
= (protect
?
14555 TG3_NVRAM_SIZE_128KB
:
14556 TG3_NVRAM_SIZE_512KB
);
14561 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14565 nvcfg1
= tr32(NVRAM_CFG1
);
14567 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14568 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14569 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14570 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14571 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14572 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14573 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14574 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14576 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14577 tw32(NVRAM_CFG1
, nvcfg1
);
14579 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14580 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14581 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14582 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14583 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14584 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14585 tg3_flag_set(tp
, FLASH
);
14586 tp
->nvram_pagesize
= 264;
14588 case FLASH_5752VENDOR_ST_M45PE10
:
14589 case FLASH_5752VENDOR_ST_M45PE20
:
14590 case FLASH_5752VENDOR_ST_M45PE40
:
14591 tp
->nvram_jedecnum
= JEDEC_ST
;
14592 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14593 tg3_flag_set(tp
, FLASH
);
14594 tp
->nvram_pagesize
= 256;
14599 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14601 u32 nvcfg1
, protect
= 0;
14603 nvcfg1
= tr32(NVRAM_CFG1
);
14605 /* NVRAM protection for TPM */
14606 if (nvcfg1
& (1 << 27)) {
14607 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14611 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14613 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14614 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14615 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14616 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14617 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14618 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14619 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14620 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14621 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14622 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14623 tg3_flag_set(tp
, FLASH
);
14624 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14625 tp
->nvram_pagesize
= 256;
14627 case FLASH_5761VENDOR_ST_A_M45PE20
:
14628 case FLASH_5761VENDOR_ST_A_M45PE40
:
14629 case FLASH_5761VENDOR_ST_A_M45PE80
:
14630 case FLASH_5761VENDOR_ST_A_M45PE16
:
14631 case FLASH_5761VENDOR_ST_M_M45PE20
:
14632 case FLASH_5761VENDOR_ST_M_M45PE40
:
14633 case FLASH_5761VENDOR_ST_M_M45PE80
:
14634 case FLASH_5761VENDOR_ST_M_M45PE16
:
14635 tp
->nvram_jedecnum
= JEDEC_ST
;
14636 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14637 tg3_flag_set(tp
, FLASH
);
14638 tp
->nvram_pagesize
= 256;
14643 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14646 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14647 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14648 case FLASH_5761VENDOR_ST_A_M45PE16
:
14649 case FLASH_5761VENDOR_ST_M_M45PE16
:
14650 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14652 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14653 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14654 case FLASH_5761VENDOR_ST_A_M45PE80
:
14655 case FLASH_5761VENDOR_ST_M_M45PE80
:
14656 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14658 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14659 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14660 case FLASH_5761VENDOR_ST_A_M45PE40
:
14661 case FLASH_5761VENDOR_ST_M_M45PE40
:
14662 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14664 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14665 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14666 case FLASH_5761VENDOR_ST_A_M45PE20
:
14667 case FLASH_5761VENDOR_ST_M_M45PE20
:
14668 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14674 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14676 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14677 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14678 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14681 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14685 nvcfg1
= tr32(NVRAM_CFG1
);
14687 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14688 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14689 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14690 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14691 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14692 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14694 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14695 tw32(NVRAM_CFG1
, nvcfg1
);
14697 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14698 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14699 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14700 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14702 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14703 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14704 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14705 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14706 tg3_flag_set(tp
, FLASH
);
14708 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14709 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14710 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14711 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14712 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14714 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14715 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14716 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14718 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14719 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14720 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14724 case FLASH_5752VENDOR_ST_M45PE10
:
14725 case FLASH_5752VENDOR_ST_M45PE20
:
14726 case FLASH_5752VENDOR_ST_M45PE40
:
14727 tp
->nvram_jedecnum
= JEDEC_ST
;
14728 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14729 tg3_flag_set(tp
, FLASH
);
14731 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14732 case FLASH_5752VENDOR_ST_M45PE10
:
14733 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14735 case FLASH_5752VENDOR_ST_M45PE20
:
14736 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14738 case FLASH_5752VENDOR_ST_M45PE40
:
14739 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14744 tg3_flag_set(tp
, NO_NVRAM
);
14748 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14749 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14750 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14754 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14758 nvcfg1
= tr32(NVRAM_CFG1
);
14760 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14761 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14762 case FLASH_5717VENDOR_MICRO_EEPROM
:
14763 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14764 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14765 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14767 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14768 tw32(NVRAM_CFG1
, nvcfg1
);
14770 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14771 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14772 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14773 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14774 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14775 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14776 case FLASH_5717VENDOR_ATMEL_45USPT
:
14777 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14778 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14779 tg3_flag_set(tp
, FLASH
);
14781 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14782 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14783 /* Detect size with tg3_nvram_get_size() */
14785 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14786 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14787 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14790 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14794 case FLASH_5717VENDOR_ST_M_M25PE10
:
14795 case FLASH_5717VENDOR_ST_A_M25PE10
:
14796 case FLASH_5717VENDOR_ST_M_M45PE10
:
14797 case FLASH_5717VENDOR_ST_A_M45PE10
:
14798 case FLASH_5717VENDOR_ST_M_M25PE20
:
14799 case FLASH_5717VENDOR_ST_A_M25PE20
:
14800 case FLASH_5717VENDOR_ST_M_M45PE20
:
14801 case FLASH_5717VENDOR_ST_A_M45PE20
:
14802 case FLASH_5717VENDOR_ST_25USPT
:
14803 case FLASH_5717VENDOR_ST_45USPT
:
14804 tp
->nvram_jedecnum
= JEDEC_ST
;
14805 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14806 tg3_flag_set(tp
, FLASH
);
14808 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14809 case FLASH_5717VENDOR_ST_M_M25PE20
:
14810 case FLASH_5717VENDOR_ST_M_M45PE20
:
14811 /* Detect size with tg3_nvram_get_size() */
14813 case FLASH_5717VENDOR_ST_A_M25PE20
:
14814 case FLASH_5717VENDOR_ST_A_M45PE20
:
14815 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14818 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14823 tg3_flag_set(tp
, NO_NVRAM
);
14827 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14828 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14829 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14832 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14834 u32 nvcfg1
, nvmpinstrp
, nv_status
;
14836 nvcfg1
= tr32(NVRAM_CFG1
);
14837 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14839 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14840 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14841 tg3_flag_set(tp
, NO_NVRAM
);
14845 switch (nvmpinstrp
) {
14846 case FLASH_5762_MX25L_100
:
14847 case FLASH_5762_MX25L_200
:
14848 case FLASH_5762_MX25L_400
:
14849 case FLASH_5762_MX25L_800
:
14850 case FLASH_5762_MX25L_160_320
:
14851 tp
->nvram_pagesize
= 4096;
14852 tp
->nvram_jedecnum
= JEDEC_MACRONIX
;
14853 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14854 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14855 tg3_flag_set(tp
, FLASH
);
14856 nv_status
= tr32(NVRAM_AUTOSENSE_STATUS
);
14858 (1 << (nv_status
>> AUTOSENSE_DEVID
&
14859 AUTOSENSE_DEVID_MASK
)
14860 << AUTOSENSE_SIZE_IN_MB
);
14863 case FLASH_5762_EEPROM_HD
:
14864 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14866 case FLASH_5762_EEPROM_LD
:
14867 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14869 case FLASH_5720VENDOR_M_ST_M45PE20
:
14870 /* This pinstrap supports multiple sizes, so force it
14871 * to read the actual size from location 0xf0.
14873 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14878 switch (nvmpinstrp
) {
14879 case FLASH_5720_EEPROM_HD
:
14880 case FLASH_5720_EEPROM_LD
:
14881 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14882 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14884 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14885 tw32(NVRAM_CFG1
, nvcfg1
);
14886 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14887 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14889 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14891 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14892 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14893 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14894 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14895 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14896 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14897 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14898 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14899 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14900 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14901 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14902 case FLASH_5720VENDOR_ATMEL_45USPT
:
14903 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14904 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14905 tg3_flag_set(tp
, FLASH
);
14907 switch (nvmpinstrp
) {
14908 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14909 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14910 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14911 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14913 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14914 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14915 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14916 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14918 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14919 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14920 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14923 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14924 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14928 case FLASH_5720VENDOR_M_ST_M25PE10
:
14929 case FLASH_5720VENDOR_M_ST_M45PE10
:
14930 case FLASH_5720VENDOR_A_ST_M25PE10
:
14931 case FLASH_5720VENDOR_A_ST_M45PE10
:
14932 case FLASH_5720VENDOR_M_ST_M25PE20
:
14933 case FLASH_5720VENDOR_M_ST_M45PE20
:
14934 case FLASH_5720VENDOR_A_ST_M25PE20
:
14935 case FLASH_5720VENDOR_A_ST_M45PE20
:
14936 case FLASH_5720VENDOR_M_ST_M25PE40
:
14937 case FLASH_5720VENDOR_M_ST_M45PE40
:
14938 case FLASH_5720VENDOR_A_ST_M25PE40
:
14939 case FLASH_5720VENDOR_A_ST_M45PE40
:
14940 case FLASH_5720VENDOR_M_ST_M25PE80
:
14941 case FLASH_5720VENDOR_M_ST_M45PE80
:
14942 case FLASH_5720VENDOR_A_ST_M25PE80
:
14943 case FLASH_5720VENDOR_A_ST_M45PE80
:
14944 case FLASH_5720VENDOR_ST_25USPT
:
14945 case FLASH_5720VENDOR_ST_45USPT
:
14946 tp
->nvram_jedecnum
= JEDEC_ST
;
14947 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14948 tg3_flag_set(tp
, FLASH
);
14950 switch (nvmpinstrp
) {
14951 case FLASH_5720VENDOR_M_ST_M25PE20
:
14952 case FLASH_5720VENDOR_M_ST_M45PE20
:
14953 case FLASH_5720VENDOR_A_ST_M25PE20
:
14954 case FLASH_5720VENDOR_A_ST_M45PE20
:
14955 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14957 case FLASH_5720VENDOR_M_ST_M25PE40
:
14958 case FLASH_5720VENDOR_M_ST_M45PE40
:
14959 case FLASH_5720VENDOR_A_ST_M25PE40
:
14960 case FLASH_5720VENDOR_A_ST_M45PE40
:
14961 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14963 case FLASH_5720VENDOR_M_ST_M25PE80
:
14964 case FLASH_5720VENDOR_M_ST_M45PE80
:
14965 case FLASH_5720VENDOR_A_ST_M25PE80
:
14966 case FLASH_5720VENDOR_A_ST_M45PE80
:
14967 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14970 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14971 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14976 tg3_flag_set(tp
, NO_NVRAM
);
14980 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14981 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14982 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14984 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14987 if (tg3_nvram_read(tp
, 0, &val
))
14990 if (val
!= TG3_EEPROM_MAGIC
&&
14991 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14992 tg3_flag_set(tp
, NO_NVRAM
);
14996 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14997 static void tg3_nvram_init(struct tg3
*tp
)
14999 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15000 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15001 tg3_flag_clear(tp
, NVRAM
);
15002 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15003 tg3_flag_set(tp
, NO_NVRAM
);
15007 tw32_f(GRC_EEPROM_ADDR
,
15008 (EEPROM_ADDR_FSM_RESET
|
15009 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
15010 EEPROM_ADDR_CLKPERD_SHIFT
)));
15014 /* Enable seeprom accesses. */
15015 tw32_f(GRC_LOCAL_CTRL
,
15016 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
15019 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15020 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
15021 tg3_flag_set(tp
, NVRAM
);
15023 if (tg3_nvram_lock(tp
)) {
15024 netdev_warn(tp
->dev
,
15025 "Cannot get nvram lock, %s failed\n",
15029 tg3_enable_nvram_access(tp
);
15031 tp
->nvram_size
= 0;
15033 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15034 tg3_get_5752_nvram_info(tp
);
15035 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
15036 tg3_get_5755_nvram_info(tp
);
15037 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
15038 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15039 tg3_asic_rev(tp
) == ASIC_REV_5785
)
15040 tg3_get_5787_nvram_info(tp
);
15041 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
15042 tg3_get_5761_nvram_info(tp
);
15043 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15044 tg3_get_5906_nvram_info(tp
);
15045 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
15046 tg3_flag(tp
, 57765_CLASS
))
15047 tg3_get_57780_nvram_info(tp
);
15048 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15049 tg3_asic_rev(tp
) == ASIC_REV_5719
)
15050 tg3_get_5717_nvram_info(tp
);
15051 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15052 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15053 tg3_get_5720_nvram_info(tp
);
15055 tg3_get_nvram_info(tp
);
15057 if (tp
->nvram_size
== 0)
15058 tg3_get_nvram_size(tp
);
15060 tg3_disable_nvram_access(tp
);
15061 tg3_nvram_unlock(tp
);
15064 tg3_flag_clear(tp
, NVRAM
);
15065 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15067 tg3_get_eeprom_size(tp
);
15071 struct subsys_tbl_ent
{
15072 u16 subsys_vendor
, subsys_devid
;
15076 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15077 /* Broadcom boards. */
15078 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15080 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15082 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15084 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15086 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15087 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15088 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15089 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15090 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15091 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15092 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15093 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15094 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15095 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15096 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15097 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15098 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15099 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15102 { TG3PCI_SUBVENDOR_ID_3COM
,
15103 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15104 { TG3PCI_SUBVENDOR_ID_3COM
,
15105 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15106 { TG3PCI_SUBVENDOR_ID_3COM
,
15107 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15108 { TG3PCI_SUBVENDOR_ID_3COM
,
15109 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15110 { TG3PCI_SUBVENDOR_ID_3COM
,
15111 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15114 { TG3PCI_SUBVENDOR_ID_DELL
,
15115 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15116 { TG3PCI_SUBVENDOR_ID_DELL
,
15117 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15118 { TG3PCI_SUBVENDOR_ID_DELL
,
15119 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15120 { TG3PCI_SUBVENDOR_ID_DELL
,
15121 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15123 /* Compaq boards. */
15124 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15125 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15126 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15127 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15128 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15129 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15130 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15131 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15132 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15133 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15136 { TG3PCI_SUBVENDOR_ID_IBM
,
15137 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15140 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15144 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15145 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15146 tp
->pdev
->subsystem_vendor
) &&
15147 (subsys_id_to_phy_id
[i
].subsys_devid
==
15148 tp
->pdev
->subsystem_device
))
15149 return &subsys_id_to_phy_id
[i
];
15154 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15158 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15159 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15161 /* Assume an onboard device and WOL capable by default. */
15162 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15163 tg3_flag_set(tp
, WOL_CAP
);
15165 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15166 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15167 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15168 tg3_flag_set(tp
, IS_NIC
);
15170 val
= tr32(VCPU_CFGSHDW
);
15171 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15172 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15173 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15174 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15175 tg3_flag_set(tp
, WOL_ENABLE
);
15176 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15181 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15182 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15183 u32 nic_cfg
, led_cfg
;
15184 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15185 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15186 int eeprom_phy_serdes
= 0;
15188 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15189 tp
->nic_sram_data_cfg
= nic_cfg
;
15191 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15192 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15193 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15194 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15195 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15196 (ver
> 0) && (ver
< 0x100))
15197 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15199 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15200 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15202 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15203 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15204 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15205 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15207 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15208 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15209 eeprom_phy_serdes
= 1;
15211 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15212 if (nic_phy_id
!= 0) {
15213 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15214 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15216 eeprom_phy_id
= (id1
>> 16) << 10;
15217 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15218 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15222 tp
->phy_id
= eeprom_phy_id
;
15223 if (eeprom_phy_serdes
) {
15224 if (!tg3_flag(tp
, 5705_PLUS
))
15225 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15227 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15230 if (tg3_flag(tp
, 5750_PLUS
))
15231 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15232 SHASTA_EXT_LED_MODE_MASK
);
15234 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15238 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15239 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15242 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15243 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15246 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15247 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15249 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15250 * read on some older 5700/5701 bootcode.
15252 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15253 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15254 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15258 case SHASTA_EXT_LED_SHARED
:
15259 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15260 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15261 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15262 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15263 LED_CTRL_MODE_PHY_2
);
15265 if (tg3_flag(tp
, 5717_PLUS
) ||
15266 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15267 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15268 LED_CTRL_BLINK_RATE_MASK
;
15272 case SHASTA_EXT_LED_MAC
:
15273 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15276 case SHASTA_EXT_LED_COMBO
:
15277 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15278 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15279 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15280 LED_CTRL_MODE_PHY_2
);
15285 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15286 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15287 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15288 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15290 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15291 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15293 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15294 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15295 if ((tp
->pdev
->subsystem_vendor
==
15296 PCI_VENDOR_ID_ARIMA
) &&
15297 (tp
->pdev
->subsystem_device
== 0x205a ||
15298 tp
->pdev
->subsystem_device
== 0x2063))
15299 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15301 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15302 tg3_flag_set(tp
, IS_NIC
);
15305 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15306 tg3_flag_set(tp
, ENABLE_ASF
);
15307 if (tg3_flag(tp
, 5750_PLUS
))
15308 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15311 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15312 tg3_flag(tp
, 5750_PLUS
))
15313 tg3_flag_set(tp
, ENABLE_APE
);
15315 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15316 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15317 tg3_flag_clear(tp
, WOL_CAP
);
15319 if (tg3_flag(tp
, WOL_CAP
) &&
15320 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15321 tg3_flag_set(tp
, WOL_ENABLE
);
15322 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15325 if (cfg2
& (1 << 17))
15326 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15328 /* serdes signal pre-emphasis in register 0x590 set by */
15329 /* bootcode if bit 18 is set */
15330 if (cfg2
& (1 << 18))
15331 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15333 if ((tg3_flag(tp
, 57765_PLUS
) ||
15334 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15335 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15336 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15337 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15339 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15342 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15343 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15344 !tg3_flag(tp
, 57765_PLUS
) &&
15345 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15346 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15347 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15348 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15349 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15350 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15353 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15354 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15355 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15356 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15357 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15358 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15360 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15361 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15364 if (tg3_flag(tp
, WOL_CAP
))
15365 device_set_wakeup_enable(&tp
->pdev
->dev
,
15366 tg3_flag(tp
, WOL_ENABLE
));
15368 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15371 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15374 u32 val2
, off
= offset
* 8;
15376 err
= tg3_nvram_lock(tp
);
15380 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15381 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15382 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15383 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15386 for (i
= 0; i
< 100; i
++) {
15387 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15388 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15389 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15395 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15397 tg3_nvram_unlock(tp
);
15398 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15404 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15409 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15410 tw32(OTP_CTRL
, cmd
);
15412 /* Wait for up to 1 ms for command to execute. */
15413 for (i
= 0; i
< 100; i
++) {
15414 val
= tr32(OTP_STATUS
);
15415 if (val
& OTP_STATUS_CMD_DONE
)
15420 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15423 /* Read the gphy configuration from the OTP region of the chip. The gphy
15424 * configuration is a 32-bit value that straddles the alignment boundary.
15425 * We do two 32-bit reads and then shift and merge the results.
15427 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15429 u32 bhalf_otp
, thalf_otp
;
15431 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15433 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15436 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15438 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15441 thalf_otp
= tr32(OTP_READ_DATA
);
15443 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15445 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15448 bhalf_otp
= tr32(OTP_READ_DATA
);
15450 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15453 static void tg3_phy_init_link_config(struct tg3
*tp
)
15455 u32 adv
= ADVERTISED_Autoneg
;
15457 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15458 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15459 adv
|= ADVERTISED_1000baseT_Half
;
15460 adv
|= ADVERTISED_1000baseT_Full
;
15463 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15464 adv
|= ADVERTISED_100baseT_Half
|
15465 ADVERTISED_100baseT_Full
|
15466 ADVERTISED_10baseT_Half
|
15467 ADVERTISED_10baseT_Full
|
15470 adv
|= ADVERTISED_FIBRE
;
15472 tp
->link_config
.advertising
= adv
;
15473 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15474 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15475 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15476 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15477 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15482 static int tg3_phy_probe(struct tg3
*tp
)
15484 u32 hw_phy_id_1
, hw_phy_id_2
;
15485 u32 hw_phy_id
, hw_phy_id_masked
;
15488 /* flow control autonegotiation is default behavior */
15489 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15490 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15492 if (tg3_flag(tp
, ENABLE_APE
)) {
15493 switch (tp
->pci_fn
) {
15495 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15498 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15501 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15504 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15509 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15510 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15511 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15512 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15513 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15515 if (tg3_flag(tp
, USE_PHYLIB
))
15516 return tg3_phy_init(tp
);
15518 /* Reading the PHY ID register can conflict with ASF
15519 * firmware access to the PHY hardware.
15522 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15523 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15525 /* Now read the physical PHY_ID from the chip and verify
15526 * that it is sane. If it doesn't look good, we fall back
15527 * to either the hard-coded table based PHY_ID and failing
15528 * that the value found in the eeprom area.
15530 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15531 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15533 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15534 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15535 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15537 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15540 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15541 tp
->phy_id
= hw_phy_id
;
15542 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15543 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15545 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15547 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15548 /* Do nothing, phy ID already set up in
15549 * tg3_get_eeprom_hw_cfg().
15552 struct subsys_tbl_ent
*p
;
15554 /* No eeprom signature? Try the hardcoded
15555 * subsys device table.
15557 p
= tg3_lookup_by_subsys(tp
);
15559 tp
->phy_id
= p
->phy_id
;
15560 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15561 /* For now we saw the IDs 0xbc050cd0,
15562 * 0xbc050f80 and 0xbc050c30 on devices
15563 * connected to an BCM4785 and there are
15564 * probably more. Just assume that the phy is
15565 * supported when it is connected to a SSB core
15572 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15573 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15577 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15578 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15579 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15580 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15581 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15582 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15583 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15584 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15585 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15586 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15588 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15589 SUPPORTED_1000baseT_Full
;
15590 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15591 ADVERTISED_1000baseT_Full
;
15592 tp
->eee
.eee_enabled
= 1;
15593 tp
->eee
.tx_lpi_enabled
= 1;
15594 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15597 tg3_phy_init_link_config(tp
);
15599 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15600 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15601 !tg3_flag(tp
, ENABLE_APE
) &&
15602 !tg3_flag(tp
, ENABLE_ASF
)) {
15605 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15606 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15607 (bmsr
& BMSR_LSTATUS
))
15608 goto skip_phy_reset
;
15610 err
= tg3_phy_reset(tp
);
15614 tg3_phy_set_wirespeed(tp
);
15616 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15617 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15618 tp
->link_config
.flowctrl
);
15620 tg3_writephy(tp
, MII_BMCR
,
15621 BMCR_ANENABLE
| BMCR_ANRESTART
);
15626 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15627 err
= tg3_init_5401phy_dsp(tp
);
15631 err
= tg3_init_5401phy_dsp(tp
);
15637 static void tg3_read_vpd(struct tg3
*tp
)
15640 unsigned int block_end
, rosize
, len
;
15644 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15648 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15650 goto out_not_found
;
15652 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15653 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15654 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15656 if (block_end
> vpdlen
)
15657 goto out_not_found
;
15659 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15660 PCI_VPD_RO_KEYWORD_MFR_ID
);
15662 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15664 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15665 if (j
+ len
> block_end
|| len
!= 4 ||
15666 memcmp(&vpd_data
[j
], "1028", 4))
15669 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15670 PCI_VPD_RO_KEYWORD_VENDOR0
);
15674 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15676 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15677 if (j
+ len
> block_end
)
15680 if (len
>= sizeof(tp
->fw_ver
))
15681 len
= sizeof(tp
->fw_ver
) - 1;
15682 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15683 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15688 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15689 PCI_VPD_RO_KEYWORD_PARTNO
);
15691 goto out_not_found
;
15693 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15695 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15696 if (len
> TG3_BPN_SIZE
||
15697 (len
+ i
) > vpdlen
)
15698 goto out_not_found
;
15700 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15704 if (tp
->board_part_number
[0])
15708 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15709 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15710 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15711 strcpy(tp
->board_part_number
, "BCM5717");
15712 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15713 strcpy(tp
->board_part_number
, "BCM5718");
15716 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15717 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15718 strcpy(tp
->board_part_number
, "BCM57780");
15719 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15720 strcpy(tp
->board_part_number
, "BCM57760");
15721 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15722 strcpy(tp
->board_part_number
, "BCM57790");
15723 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15724 strcpy(tp
->board_part_number
, "BCM57788");
15727 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15728 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15729 strcpy(tp
->board_part_number
, "BCM57761");
15730 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15731 strcpy(tp
->board_part_number
, "BCM57765");
15732 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15733 strcpy(tp
->board_part_number
, "BCM57781");
15734 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15735 strcpy(tp
->board_part_number
, "BCM57785");
15736 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15737 strcpy(tp
->board_part_number
, "BCM57791");
15738 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15739 strcpy(tp
->board_part_number
, "BCM57795");
15742 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15743 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15744 strcpy(tp
->board_part_number
, "BCM57762");
15745 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15746 strcpy(tp
->board_part_number
, "BCM57766");
15747 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15748 strcpy(tp
->board_part_number
, "BCM57782");
15749 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15750 strcpy(tp
->board_part_number
, "BCM57786");
15753 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15754 strcpy(tp
->board_part_number
, "BCM95906");
15757 strcpy(tp
->board_part_number
, "none");
15761 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15765 if (tg3_nvram_read(tp
, offset
, &val
) ||
15766 (val
& 0xfc000000) != 0x0c000000 ||
15767 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15774 static void tg3_read_bc_ver(struct tg3
*tp
)
15776 u32 val
, offset
, start
, ver_offset
;
15778 bool newver
= false;
15780 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15781 tg3_nvram_read(tp
, 0x4, &start
))
15784 offset
= tg3_nvram_logical_addr(tp
, offset
);
15786 if (tg3_nvram_read(tp
, offset
, &val
))
15789 if ((val
& 0xfc000000) == 0x0c000000) {
15790 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15797 dst_off
= strlen(tp
->fw_ver
);
15800 if (TG3_VER_SIZE
- dst_off
< 16 ||
15801 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15804 offset
= offset
+ ver_offset
- start
;
15805 for (i
= 0; i
< 16; i
+= 4) {
15807 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15810 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15815 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15818 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15819 TG3_NVM_BCVER_MAJSFT
;
15820 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15821 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15822 "v%d.%02d", major
, minor
);
15826 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15828 u32 val
, major
, minor
;
15830 /* Use native endian representation */
15831 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15834 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15835 TG3_NVM_HWSB_CFG1_MAJSFT
;
15836 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15837 TG3_NVM_HWSB_CFG1_MINSFT
;
15839 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15842 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15844 u32 offset
, major
, minor
, build
;
15846 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15848 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15851 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15852 case TG3_EEPROM_SB_REVISION_0
:
15853 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15855 case TG3_EEPROM_SB_REVISION_2
:
15856 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15858 case TG3_EEPROM_SB_REVISION_3
:
15859 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15861 case TG3_EEPROM_SB_REVISION_4
:
15862 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15864 case TG3_EEPROM_SB_REVISION_5
:
15865 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15867 case TG3_EEPROM_SB_REVISION_6
:
15868 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15874 if (tg3_nvram_read(tp
, offset
, &val
))
15877 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15878 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15879 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15880 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15881 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15883 if (minor
> 99 || build
> 26)
15886 offset
= strlen(tp
->fw_ver
);
15887 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15888 " v%d.%02d", major
, minor
);
15891 offset
= strlen(tp
->fw_ver
);
15892 if (offset
< TG3_VER_SIZE
- 1)
15893 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15897 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15899 u32 val
, offset
, start
;
15902 for (offset
= TG3_NVM_DIR_START
;
15903 offset
< TG3_NVM_DIR_END
;
15904 offset
+= TG3_NVM_DIRENT_SIZE
) {
15905 if (tg3_nvram_read(tp
, offset
, &val
))
15908 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15912 if (offset
== TG3_NVM_DIR_END
)
15915 if (!tg3_flag(tp
, 5705_PLUS
))
15916 start
= 0x08000000;
15917 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15920 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15921 !tg3_fw_img_is_valid(tp
, offset
) ||
15922 tg3_nvram_read(tp
, offset
+ 8, &val
))
15925 offset
+= val
- start
;
15927 vlen
= strlen(tp
->fw_ver
);
15929 tp
->fw_ver
[vlen
++] = ',';
15930 tp
->fw_ver
[vlen
++] = ' ';
15932 for (i
= 0; i
< 4; i
++) {
15934 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15937 offset
+= sizeof(v
);
15939 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15940 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15944 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15949 static void tg3_probe_ncsi(struct tg3
*tp
)
15953 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15954 if (apedata
!= APE_SEG_SIG_MAGIC
)
15957 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15958 if (!(apedata
& APE_FW_STATUS_READY
))
15961 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15962 tg3_flag_set(tp
, APE_HAS_NCSI
);
15965 static void tg3_read_dash_ver(struct tg3
*tp
)
15971 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15973 if (tg3_flag(tp
, APE_HAS_NCSI
))
15975 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15980 vlen
= strlen(tp
->fw_ver
);
15982 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15984 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15985 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15986 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15987 (apedata
& APE_FW_VERSION_BLDMSK
));
15990 static void tg3_read_otp_ver(struct tg3
*tp
)
15994 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15997 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15998 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15999 TG3_OTP_MAGIC0_VALID(val
)) {
16000 u64 val64
= (u64
) val
<< 32 | val2
;
16004 for (i
= 0; i
< 7; i
++) {
16005 if ((val64
& 0xff) == 0)
16007 ver
= val64
& 0xff;
16010 vlen
= strlen(tp
->fw_ver
);
16011 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
16015 static void tg3_read_fw_ver(struct tg3
*tp
)
16018 bool vpd_vers
= false;
16020 if (tp
->fw_ver
[0] != 0)
16023 if (tg3_flag(tp
, NO_NVRAM
)) {
16024 strcat(tp
->fw_ver
, "sb");
16025 tg3_read_otp_ver(tp
);
16029 if (tg3_nvram_read(tp
, 0, &val
))
16032 if (val
== TG3_EEPROM_MAGIC
)
16033 tg3_read_bc_ver(tp
);
16034 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
16035 tg3_read_sb_ver(tp
, val
);
16036 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
16037 tg3_read_hwsb_ver(tp
);
16039 if (tg3_flag(tp
, ENABLE_ASF
)) {
16040 if (tg3_flag(tp
, ENABLE_APE
)) {
16041 tg3_probe_ncsi(tp
);
16043 tg3_read_dash_ver(tp
);
16044 } else if (!vpd_vers
) {
16045 tg3_read_mgmtfw_ver(tp
);
16049 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
16052 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
16054 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
16055 return TG3_RX_RET_MAX_SIZE_5717
;
16056 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
16057 return TG3_RX_RET_MAX_SIZE_5700
;
16059 return TG3_RX_RET_MAX_SIZE_5705
;
16062 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
16063 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
16064 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
16065 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
16069 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
16071 struct pci_dev
*peer
;
16072 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
16074 for (func
= 0; func
< 8; func
++) {
16075 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
16076 if (peer
&& peer
!= tp
->pdev
)
16080 /* 5704 can be configured in single-port mode, set peer to
16081 * tp->pdev in that case.
16089 * We don't need to keep the refcount elevated; there's no way
16090 * to remove one half of this device without removing the other
16097 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16099 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16100 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16103 /* All devices that use the alternate
16104 * ASIC REV location have a CPMU.
16106 tg3_flag_set(tp
, CPMU_PRESENT
);
16108 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16109 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16110 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16111 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16112 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16113 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16114 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16115 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16116 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16117 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16118 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16119 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16120 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16121 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16122 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16123 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16124 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16125 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16126 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16127 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16128 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16129 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16130 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16132 reg
= TG3PCI_PRODID_ASICREV
;
16134 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16137 /* Wrong chip ID in 5752 A0. This code can be removed later
16138 * as A0 is not in production.
16140 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16141 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16143 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16144 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16146 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16147 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16148 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16149 tg3_flag_set(tp
, 5717_PLUS
);
16151 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16152 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16153 tg3_flag_set(tp
, 57765_CLASS
);
16155 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16156 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16157 tg3_flag_set(tp
, 57765_PLUS
);
16159 /* Intentionally exclude ASIC_REV_5906 */
16160 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16161 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16162 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16163 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16164 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16165 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16166 tg3_flag(tp
, 57765_PLUS
))
16167 tg3_flag_set(tp
, 5755_PLUS
);
16169 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16170 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16171 tg3_flag_set(tp
, 5780_CLASS
);
16173 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16174 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16175 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16176 tg3_flag(tp
, 5755_PLUS
) ||
16177 tg3_flag(tp
, 5780_CLASS
))
16178 tg3_flag_set(tp
, 5750_PLUS
);
16180 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16181 tg3_flag(tp
, 5750_PLUS
))
16182 tg3_flag_set(tp
, 5705_PLUS
);
16185 static bool tg3_10_100_only_device(struct tg3
*tp
,
16186 const struct pci_device_id
*ent
)
16188 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16190 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16191 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16192 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16195 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16196 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16197 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16207 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16210 u32 pci_state_reg
, grc_misc_cfg
;
16215 /* Force memory write invalidate off. If we leave it on,
16216 * then on 5700_BX chips we have to enable a workaround.
16217 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16218 * to match the cacheline size. The Broadcom driver have this
16219 * workaround but turns MWI off all the times so never uses
16220 * it. This seems to suggest that the workaround is insufficient.
16222 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16223 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16224 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16226 /* Important! -- Make sure register accesses are byteswapped
16227 * correctly. Also, for those chips that require it, make
16228 * sure that indirect register accesses are enabled before
16229 * the first operation.
16231 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16233 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16234 MISC_HOST_CTRL_CHIPREV
);
16235 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16236 tp
->misc_host_ctrl
);
16238 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16240 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16241 * we need to disable memory and use config. cycles
16242 * only to access all registers. The 5702/03 chips
16243 * can mistakenly decode the special cycles from the
16244 * ICH chipsets as memory write cycles, causing corruption
16245 * of register and memory space. Only certain ICH bridges
16246 * will drive special cycles with non-zero data during the
16247 * address phase which can fall within the 5703's address
16248 * range. This is not an ICH bug as the PCI spec allows
16249 * non-zero address during special cycles. However, only
16250 * these ICH bridges are known to drive non-zero addresses
16251 * during special cycles.
16253 * Since special cycles do not cross PCI bridges, we only
16254 * enable this workaround if the 5703 is on the secondary
16255 * bus of these ICH bridges.
16257 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16258 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16259 static struct tg3_dev_id
{
16263 } ich_chipsets
[] = {
16264 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16266 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16268 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16270 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16274 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16275 struct pci_dev
*bridge
= NULL
;
16277 while (pci_id
->vendor
!= 0) {
16278 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16284 if (pci_id
->rev
!= PCI_ANY_ID
) {
16285 if (bridge
->revision
> pci_id
->rev
)
16288 if (bridge
->subordinate
&&
16289 (bridge
->subordinate
->number
==
16290 tp
->pdev
->bus
->number
)) {
16291 tg3_flag_set(tp
, ICH_WORKAROUND
);
16292 pci_dev_put(bridge
);
16298 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16299 static struct tg3_dev_id
{
16302 } bridge_chipsets
[] = {
16303 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16304 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16307 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16308 struct pci_dev
*bridge
= NULL
;
16310 while (pci_id
->vendor
!= 0) {
16311 bridge
= pci_get_device(pci_id
->vendor
,
16318 if (bridge
->subordinate
&&
16319 (bridge
->subordinate
->number
<=
16320 tp
->pdev
->bus
->number
) &&
16321 (bridge
->subordinate
->busn_res
.end
>=
16322 tp
->pdev
->bus
->number
)) {
16323 tg3_flag_set(tp
, 5701_DMA_BUG
);
16324 pci_dev_put(bridge
);
16330 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16331 * DMA addresses > 40-bit. This bridge may have other additional
16332 * 57xx devices behind it in some 4-port NIC designs for example.
16333 * Any tg3 device found behind the bridge will also need the 40-bit
16336 if (tg3_flag(tp
, 5780_CLASS
)) {
16337 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16338 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16340 struct pci_dev
*bridge
= NULL
;
16343 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16344 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16346 if (bridge
&& bridge
->subordinate
&&
16347 (bridge
->subordinate
->number
<=
16348 tp
->pdev
->bus
->number
) &&
16349 (bridge
->subordinate
->busn_res
.end
>=
16350 tp
->pdev
->bus
->number
)) {
16351 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16352 pci_dev_put(bridge
);
16358 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16359 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16360 tp
->pdev_peer
= tg3_find_peer(tp
);
16362 /* Determine TSO capabilities */
16363 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16364 ; /* Do nothing. HW bug. */
16365 else if (tg3_flag(tp
, 57765_PLUS
))
16366 tg3_flag_set(tp
, HW_TSO_3
);
16367 else if (tg3_flag(tp
, 5755_PLUS
) ||
16368 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16369 tg3_flag_set(tp
, HW_TSO_2
);
16370 else if (tg3_flag(tp
, 5750_PLUS
)) {
16371 tg3_flag_set(tp
, HW_TSO_1
);
16372 tg3_flag_set(tp
, TSO_BUG
);
16373 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16374 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16375 tg3_flag_clear(tp
, TSO_BUG
);
16376 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16377 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16378 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16379 tg3_flag_set(tp
, FW_TSO
);
16380 tg3_flag_set(tp
, TSO_BUG
);
16381 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16382 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16384 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16387 /* Selectively allow TSO based on operating conditions */
16388 if (tg3_flag(tp
, HW_TSO_1
) ||
16389 tg3_flag(tp
, HW_TSO_2
) ||
16390 tg3_flag(tp
, HW_TSO_3
) ||
16391 tg3_flag(tp
, FW_TSO
)) {
16392 /* For firmware TSO, assume ASF is disabled.
16393 * We'll disable TSO later if we discover ASF
16394 * is enabled in tg3_get_eeprom_hw_cfg().
16396 tg3_flag_set(tp
, TSO_CAPABLE
);
16398 tg3_flag_clear(tp
, TSO_CAPABLE
);
16399 tg3_flag_clear(tp
, TSO_BUG
);
16400 tp
->fw_needed
= NULL
;
16403 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16404 tp
->fw_needed
= FIRMWARE_TG3
;
16406 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16407 tp
->fw_needed
= FIRMWARE_TG357766
;
16411 if (tg3_flag(tp
, 5750_PLUS
)) {
16412 tg3_flag_set(tp
, SUPPORT_MSI
);
16413 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16414 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16415 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16416 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16417 tp
->pdev_peer
== tp
->pdev
))
16418 tg3_flag_clear(tp
, SUPPORT_MSI
);
16420 if (tg3_flag(tp
, 5755_PLUS
) ||
16421 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16422 tg3_flag_set(tp
, 1SHOT_MSI
);
16425 if (tg3_flag(tp
, 57765_PLUS
)) {
16426 tg3_flag_set(tp
, SUPPORT_MSIX
);
16427 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16433 if (tp
->irq_max
> 1) {
16434 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16435 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16437 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16438 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16439 tp
->txq_max
= tp
->irq_max
- 1;
16442 if (tg3_flag(tp
, 5755_PLUS
) ||
16443 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16444 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16446 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16447 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16449 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16450 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16451 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16452 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16453 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16455 if (tg3_flag(tp
, 57765_PLUS
) &&
16456 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16457 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16459 if (!tg3_flag(tp
, 5705_PLUS
) ||
16460 tg3_flag(tp
, 5780_CLASS
) ||
16461 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16462 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16464 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16467 if (pci_is_pcie(tp
->pdev
)) {
16470 tg3_flag_set(tp
, PCI_EXPRESS
);
16472 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16473 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16474 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16475 tg3_flag_clear(tp
, HW_TSO_2
);
16476 tg3_flag_clear(tp
, TSO_CAPABLE
);
16478 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16479 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16480 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16481 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16482 tg3_flag_set(tp
, CLKREQ_BUG
);
16483 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16484 tg3_flag_set(tp
, L1PLLPD_EN
);
16486 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16487 /* BCM5785 devices are effectively PCIe devices, and should
16488 * follow PCIe codepaths, but do not have a PCIe capabilities
16491 tg3_flag_set(tp
, PCI_EXPRESS
);
16492 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16493 tg3_flag(tp
, 5780_CLASS
)) {
16494 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16495 if (!tp
->pcix_cap
) {
16496 dev_err(&tp
->pdev
->dev
,
16497 "Cannot find PCI-X capability, aborting\n");
16501 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16502 tg3_flag_set(tp
, PCIX_MODE
);
16505 /* If we have an AMD 762 or VIA K8T800 chipset, write
16506 * reordering to the mailbox registers done by the host
16507 * controller can cause major troubles. We read back from
16508 * every mailbox register write to force the writes to be
16509 * posted to the chip in order.
16511 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16512 !tg3_flag(tp
, PCI_EXPRESS
))
16513 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16515 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16516 &tp
->pci_cacheline_sz
);
16517 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16518 &tp
->pci_lat_timer
);
16519 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16520 tp
->pci_lat_timer
< 64) {
16521 tp
->pci_lat_timer
= 64;
16522 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16523 tp
->pci_lat_timer
);
16526 /* Important! -- It is critical that the PCI-X hw workaround
16527 * situation is decided before the first MMIO register access.
16529 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16530 /* 5700 BX chips need to have their TX producer index
16531 * mailboxes written twice to workaround a bug.
16533 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16535 /* If we are in PCI-X mode, enable register write workaround.
16537 * The workaround is to use indirect register accesses
16538 * for all chip writes not to mailbox registers.
16540 if (tg3_flag(tp
, PCIX_MODE
)) {
16543 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16545 /* The chip can have it's power management PCI config
16546 * space registers clobbered due to this bug.
16547 * So explicitly force the chip into D0 here.
16549 pci_read_config_dword(tp
->pdev
,
16550 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16552 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16553 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16554 pci_write_config_dword(tp
->pdev
,
16555 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16558 /* Also, force SERR#/PERR# in PCI command. */
16559 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16560 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16561 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16565 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16566 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16567 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16568 tg3_flag_set(tp
, PCI_32BIT
);
16570 /* Chip-specific fixup from Broadcom driver */
16571 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16572 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16573 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16574 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16577 /* Default fast path register access methods */
16578 tp
->read32
= tg3_read32
;
16579 tp
->write32
= tg3_write32
;
16580 tp
->read32_mbox
= tg3_read32
;
16581 tp
->write32_mbox
= tg3_write32
;
16582 tp
->write32_tx_mbox
= tg3_write32
;
16583 tp
->write32_rx_mbox
= tg3_write32
;
16585 /* Various workaround register access methods */
16586 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16587 tp
->write32
= tg3_write_indirect_reg32
;
16588 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16589 (tg3_flag(tp
, PCI_EXPRESS
) &&
16590 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16592 * Back to back register writes can cause problems on these
16593 * chips, the workaround is to read back all reg writes
16594 * except those to mailbox regs.
16596 * See tg3_write_indirect_reg32().
16598 tp
->write32
= tg3_write_flush_reg32
;
16601 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16602 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16603 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16604 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16607 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16608 tp
->read32
= tg3_read_indirect_reg32
;
16609 tp
->write32
= tg3_write_indirect_reg32
;
16610 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16611 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16612 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16613 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16618 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16619 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16620 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16622 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16623 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16624 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16625 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16626 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16629 if (tp
->write32
== tg3_write_indirect_reg32
||
16630 (tg3_flag(tp
, PCIX_MODE
) &&
16631 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16632 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16633 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16635 /* The memory arbiter has to be enabled in order for SRAM accesses
16636 * to succeed. Normally on powerup the tg3 chip firmware will make
16637 * sure it is enabled, but other entities such as system netboot
16638 * code might disable it.
16640 val
= tr32(MEMARB_MODE
);
16641 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16643 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16644 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16645 tg3_flag(tp
, 5780_CLASS
)) {
16646 if (tg3_flag(tp
, PCIX_MODE
)) {
16647 pci_read_config_dword(tp
->pdev
,
16648 tp
->pcix_cap
+ PCI_X_STATUS
,
16650 tp
->pci_fn
= val
& 0x7;
16652 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16653 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16654 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16655 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16656 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16657 val
= tr32(TG3_CPMU_STATUS
);
16659 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16660 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16662 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16663 TG3_CPMU_STATUS_FSHFT_5719
;
16666 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16667 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16668 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16671 /* Get eeprom hw config before calling tg3_set_power_state().
16672 * In particular, the TG3_FLAG_IS_NIC flag must be
16673 * determined before calling tg3_set_power_state() so that
16674 * we know whether or not to switch out of Vaux power.
16675 * When the flag is set, it means that GPIO1 is used for eeprom
16676 * write protect and also implies that it is a LOM where GPIOs
16677 * are not used to switch power.
16679 tg3_get_eeprom_hw_cfg(tp
);
16681 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16682 tg3_flag_clear(tp
, TSO_CAPABLE
);
16683 tg3_flag_clear(tp
, TSO_BUG
);
16684 tp
->fw_needed
= NULL
;
16687 if (tg3_flag(tp
, ENABLE_APE
)) {
16688 /* Allow reads and writes to the
16689 * APE register and memory space.
16691 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16692 PCISTATE_ALLOW_APE_SHMEM_WR
|
16693 PCISTATE_ALLOW_APE_PSPACE_WR
;
16694 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16697 tg3_ape_lock_init(tp
);
16698 tp
->ape_hb_interval
=
16699 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC
);
16702 /* Set up tp->grc_local_ctrl before calling
16703 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16704 * will bring 5700's external PHY out of reset.
16705 * It is also used as eeprom write protect on LOMs.
16707 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16708 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16709 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16710 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16711 GRC_LCLCTRL_GPIO_OUTPUT1
);
16712 /* Unused GPIO3 must be driven as output on 5752 because there
16713 * are no pull-up resistors on unused GPIO pins.
16715 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16716 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16718 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16719 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16720 tg3_flag(tp
, 57765_CLASS
))
16721 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16723 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16724 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16725 /* Turn off the debug UART. */
16726 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16727 if (tg3_flag(tp
, IS_NIC
))
16728 /* Keep VMain power. */
16729 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16730 GRC_LCLCTRL_GPIO_OUTPUT0
;
16733 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16734 tp
->grc_local_ctrl
|=
16735 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16737 /* Switch out of Vaux if it is a NIC */
16738 tg3_pwrsrc_switch_to_vmain(tp
);
16740 /* Derive initial jumbo mode from MTU assigned in
16741 * ether_setup() via the alloc_etherdev() call
16743 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16744 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16746 /* Determine WakeOnLan speed to use. */
16747 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16748 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16749 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16750 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16751 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16753 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16756 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16757 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16759 /* A few boards don't want Ethernet@WireSpeed phy feature */
16760 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16761 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16762 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16763 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16764 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16765 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16766 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16768 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16769 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16770 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16771 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16772 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16774 if (tg3_flag(tp
, 5705_PLUS
) &&
16775 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16776 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16777 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16778 !tg3_flag(tp
, 57765_PLUS
)) {
16779 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16780 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16781 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16782 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16783 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16784 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16785 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16786 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16787 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16789 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16792 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16793 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16794 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16795 if (tp
->phy_otp
== 0)
16796 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16799 if (tg3_flag(tp
, CPMU_PRESENT
))
16800 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16802 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16804 tp
->coalesce_mode
= 0;
16805 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16806 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16807 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16809 /* Set these bits to enable statistics workaround. */
16810 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16811 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16812 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16813 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16814 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16815 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16818 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16819 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16820 tg3_flag_set(tp
, USE_PHYLIB
);
16822 err
= tg3_mdio_init(tp
);
16826 /* Initialize data/descriptor byte/word swapping. */
16827 val
= tr32(GRC_MODE
);
16828 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16829 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16830 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16831 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16832 GRC_MODE_B2HRX_ENABLE
|
16833 GRC_MODE_HTX2B_ENABLE
|
16834 GRC_MODE_HOST_STACKUP
);
16836 val
&= GRC_MODE_HOST_STACKUP
;
16838 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16840 tg3_switch_clocks(tp
);
16842 /* Clear this out for sanity. */
16843 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16845 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16846 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16848 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16850 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16851 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16852 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16853 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16854 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16855 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16856 void __iomem
*sram_base
;
16858 /* Write some dummy words into the SRAM status block
16859 * area, see if it reads back correctly. If the return
16860 * value is bad, force enable the PCIX workaround.
16862 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16864 writel(0x00000000, sram_base
);
16865 writel(0x00000000, sram_base
+ 4);
16866 writel(0xffffffff, sram_base
+ 4);
16867 if (readl(sram_base
) != 0x00000000)
16868 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16873 tg3_nvram_init(tp
);
16875 /* If the device has an NVRAM, no need to load patch firmware */
16876 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16877 !tg3_flag(tp
, NO_NVRAM
))
16878 tp
->fw_needed
= NULL
;
16880 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16881 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16883 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16884 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16885 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16886 tg3_flag_set(tp
, IS_5788
);
16888 if (!tg3_flag(tp
, IS_5788
) &&
16889 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16890 tg3_flag_set(tp
, TAGGED_STATUS
);
16891 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16892 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16893 HOSTCC_MODE_CLRTICK_TXBD
);
16895 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16896 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16897 tp
->misc_host_ctrl
);
16900 /* Preserve the APE MAC_MODE bits */
16901 if (tg3_flag(tp
, ENABLE_APE
))
16902 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16906 if (tg3_10_100_only_device(tp
, ent
))
16907 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16909 err
= tg3_phy_probe(tp
);
16911 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16912 /* ... but do not return immediately ... */
16917 tg3_read_fw_ver(tp
);
16919 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16920 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16922 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16923 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16925 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16928 /* 5700 {AX,BX} chips have a broken status block link
16929 * change bit implementation, so we must use the
16930 * status register in those cases.
16932 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16933 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16935 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16937 /* The led_ctrl is set during tg3_phy_probe, here we might
16938 * have to force the link status polling mechanism based
16939 * upon subsystem IDs.
16941 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16942 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16943 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16944 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16945 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16948 /* For all SERDES we poll the MAC status register. */
16949 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16950 tg3_flag_set(tp
, POLL_SERDES
);
16952 tg3_flag_clear(tp
, POLL_SERDES
);
16954 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16955 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16957 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16958 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16959 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16960 tg3_flag(tp
, PCIX_MODE
)) {
16961 tp
->rx_offset
= NET_SKB_PAD
;
16962 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16963 tp
->rx_copy_thresh
= ~(u16
)0;
16967 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16968 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16969 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16971 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16973 /* Increment the rx prod index on the rx std ring by at most
16974 * 8 for these chips to workaround hw errata.
16976 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16977 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16978 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16979 tp
->rx_std_max_post
= 8;
16981 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16982 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16983 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16988 static int tg3_get_device_address(struct tg3
*tp
)
16990 struct net_device
*dev
= tp
->dev
;
16991 u32 hi
, lo
, mac_offset
;
16995 if (!eth_platform_get_mac_address(&tp
->pdev
->dev
, dev
->dev_addr
))
16998 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16999 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
17000 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
17005 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
17006 tg3_flag(tp
, 5780_CLASS
)) {
17007 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
17009 if (tg3_nvram_lock(tp
))
17010 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
17012 tg3_nvram_unlock(tp
);
17013 } else if (tg3_flag(tp
, 5717_PLUS
)) {
17014 if (tp
->pci_fn
& 1)
17016 if (tp
->pci_fn
> 1)
17017 mac_offset
+= 0x18c;
17018 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
17021 /* First try to get it from MAC address mailbox. */
17022 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
17023 if ((hi
>> 16) == 0x484b) {
17024 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17025 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
17027 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
17028 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17029 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17030 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17031 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
17033 /* Some old bootcode may report a 0 MAC address in SRAM */
17034 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
17037 /* Next, try NVRAM. */
17038 if (!tg3_flag(tp
, NO_NVRAM
) &&
17039 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
17040 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
17041 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
17042 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
17044 /* Finally just fetch it out of the MAC control regs. */
17046 hi
= tr32(MAC_ADDR_0_HIGH
);
17047 lo
= tr32(MAC_ADDR_0_LOW
);
17049 dev
->dev_addr
[5] = lo
& 0xff;
17050 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17051 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17052 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17053 dev
->dev_addr
[1] = hi
& 0xff;
17054 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17058 if (!is_valid_ether_addr(&dev
->dev_addr
[0]))
17063 #define BOUNDARY_SINGLE_CACHELINE 1
17064 #define BOUNDARY_MULTI_CACHELINE 2
17066 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17068 int cacheline_size
;
17072 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17074 cacheline_size
= 1024;
17076 cacheline_size
= (int) byte
* 4;
17078 /* On 5703 and later chips, the boundary bits have no
17081 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17082 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17083 !tg3_flag(tp
, PCI_EXPRESS
))
17086 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17087 goal
= BOUNDARY_MULTI_CACHELINE
;
17089 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17090 goal
= BOUNDARY_SINGLE_CACHELINE
;
17096 if (tg3_flag(tp
, 57765_PLUS
)) {
17097 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17104 /* PCI controllers on most RISC systems tend to disconnect
17105 * when a device tries to burst across a cache-line boundary.
17106 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17108 * Unfortunately, for PCI-E there are only limited
17109 * write-side controls for this, and thus for reads
17110 * we will still get the disconnects. We'll also waste
17111 * these PCI cycles for both read and write for chips
17112 * other than 5700 and 5701 which do not implement the
17115 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17116 switch (cacheline_size
) {
17121 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17122 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17123 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17125 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17126 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17131 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17132 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17136 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17137 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17140 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17141 switch (cacheline_size
) {
17145 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17146 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17147 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17153 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17154 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17158 switch (cacheline_size
) {
17160 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17161 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17162 DMA_RWCTRL_WRITE_BNDRY_16
);
17167 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17168 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17169 DMA_RWCTRL_WRITE_BNDRY_32
);
17174 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17175 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17176 DMA_RWCTRL_WRITE_BNDRY_64
);
17181 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17182 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17183 DMA_RWCTRL_WRITE_BNDRY_128
);
17188 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17189 DMA_RWCTRL_WRITE_BNDRY_256
);
17192 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17193 DMA_RWCTRL_WRITE_BNDRY_512
);
17197 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17198 DMA_RWCTRL_WRITE_BNDRY_1024
);
17207 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17208 int size
, bool to_device
)
17210 struct tg3_internal_buffer_desc test_desc
;
17211 u32 sram_dma_descs
;
17214 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17216 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17217 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17218 tw32(RDMAC_STATUS
, 0);
17219 tw32(WDMAC_STATUS
, 0);
17221 tw32(BUFMGR_MODE
, 0);
17222 tw32(FTQ_RESET
, 0);
17224 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17225 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17226 test_desc
.nic_mbuf
= 0x00002100;
17227 test_desc
.len
= size
;
17230 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17231 * the *second* time the tg3 driver was getting loaded after an
17234 * Broadcom tells me:
17235 * ...the DMA engine is connected to the GRC block and a DMA
17236 * reset may affect the GRC block in some unpredictable way...
17237 * The behavior of resets to individual blocks has not been tested.
17239 * Broadcom noted the GRC reset will also reset all sub-components.
17242 test_desc
.cqid_sqid
= (13 << 8) | 2;
17244 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17247 test_desc
.cqid_sqid
= (16 << 8) | 7;
17249 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17252 test_desc
.flags
= 0x00000005;
17254 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17257 val
= *(((u32
*)&test_desc
) + i
);
17258 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17259 sram_dma_descs
+ (i
* sizeof(u32
)));
17260 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17262 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17265 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17267 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17270 for (i
= 0; i
< 40; i
++) {
17274 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17276 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17277 if ((val
& 0xffff) == sram_dma_descs
) {
17288 #define TEST_BUFFER_SIZE 0x2000
17290 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17291 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17295 static int tg3_test_dma(struct tg3
*tp
)
17297 dma_addr_t buf_dma
;
17298 u32
*buf
, saved_dma_rwctrl
;
17301 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17302 &buf_dma
, GFP_KERNEL
);
17308 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17309 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17311 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17313 if (tg3_flag(tp
, 57765_PLUS
))
17316 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17317 /* DMA read watermark not used on PCIE */
17318 tp
->dma_rwctrl
|= 0x00180000;
17319 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17320 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17321 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17322 tp
->dma_rwctrl
|= 0x003f0000;
17324 tp
->dma_rwctrl
|= 0x003f000f;
17326 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17327 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17328 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17329 u32 read_water
= 0x7;
17331 /* If the 5704 is behind the EPB bridge, we can
17332 * do the less restrictive ONE_DMA workaround for
17333 * better performance.
17335 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17336 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17337 tp
->dma_rwctrl
|= 0x8000;
17338 else if (ccval
== 0x6 || ccval
== 0x7)
17339 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17341 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17343 /* Set bit 23 to enable PCIX hw bug fix */
17345 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17346 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17348 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17349 /* 5780 always in PCIX mode */
17350 tp
->dma_rwctrl
|= 0x00144000;
17351 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17352 /* 5714 always in PCIX mode */
17353 tp
->dma_rwctrl
|= 0x00148000;
17355 tp
->dma_rwctrl
|= 0x001b000f;
17358 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17359 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17361 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17362 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17363 tp
->dma_rwctrl
&= 0xfffffff0;
17365 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17366 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17367 /* Remove this if it causes problems for some boards. */
17368 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17370 /* On 5700/5701 chips, we need to set this bit.
17371 * Otherwise the chip will issue cacheline transactions
17372 * to streamable DMA memory with not all the byte
17373 * enables turned on. This is an error on several
17374 * RISC PCI controllers, in particular sparc64.
17376 * On 5703/5704 chips, this bit has been reassigned
17377 * a different meaning. In particular, it is used
17378 * on those chips to enable a PCI-X workaround.
17380 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17383 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17386 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17387 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17390 /* It is best to perform DMA test with maximum write burst size
17391 * to expose the 5700/5701 write DMA bug.
17393 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17394 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17395 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17400 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17403 /* Send the buffer to the chip. */
17404 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17406 dev_err(&tp
->pdev
->dev
,
17407 "%s: Buffer write failed. err = %d\n",
17412 /* Now read it back. */
17413 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17415 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17416 "err = %d\n", __func__
, ret
);
17421 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17425 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17426 DMA_RWCTRL_WRITE_BNDRY_16
) {
17427 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17428 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17429 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17432 dev_err(&tp
->pdev
->dev
,
17433 "%s: Buffer corrupted on read back! "
17434 "(%d != %d)\n", __func__
, p
[i
], i
);
17440 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17446 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17447 DMA_RWCTRL_WRITE_BNDRY_16
) {
17448 /* DMA test passed without adjusting DMA boundary,
17449 * now look for chipsets that are known to expose the
17450 * DMA bug without failing the test.
17452 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17453 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17454 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17456 /* Safe to use the calculated DMA boundary. */
17457 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17460 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17464 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17469 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17471 if (tg3_flag(tp
, 57765_PLUS
)) {
17472 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17473 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17474 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17475 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17476 tp
->bufmgr_config
.mbuf_high_water
=
17477 DEFAULT_MB_HIGH_WATER_57765
;
17479 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17480 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17481 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17482 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17483 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17484 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17485 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17486 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17487 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17488 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17489 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17490 tp
->bufmgr_config
.mbuf_high_water
=
17491 DEFAULT_MB_HIGH_WATER_5705
;
17492 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17493 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17494 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17495 tp
->bufmgr_config
.mbuf_high_water
=
17496 DEFAULT_MB_HIGH_WATER_5906
;
17499 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17500 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17501 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17502 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17503 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17504 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17506 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17507 DEFAULT_MB_RDMA_LOW_WATER
;
17508 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17509 DEFAULT_MB_MACRX_LOW_WATER
;
17510 tp
->bufmgr_config
.mbuf_high_water
=
17511 DEFAULT_MB_HIGH_WATER
;
17513 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17514 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17515 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17516 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17517 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17518 DEFAULT_MB_HIGH_WATER_JUMBO
;
17521 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17522 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17525 static char *tg3_phy_string(struct tg3
*tp
)
17527 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17528 case TG3_PHY_ID_BCM5400
: return "5400";
17529 case TG3_PHY_ID_BCM5401
: return "5401";
17530 case TG3_PHY_ID_BCM5411
: return "5411";
17531 case TG3_PHY_ID_BCM5701
: return "5701";
17532 case TG3_PHY_ID_BCM5703
: return "5703";
17533 case TG3_PHY_ID_BCM5704
: return "5704";
17534 case TG3_PHY_ID_BCM5705
: return "5705";
17535 case TG3_PHY_ID_BCM5750
: return "5750";
17536 case TG3_PHY_ID_BCM5752
: return "5752";
17537 case TG3_PHY_ID_BCM5714
: return "5714";
17538 case TG3_PHY_ID_BCM5780
: return "5780";
17539 case TG3_PHY_ID_BCM5755
: return "5755";
17540 case TG3_PHY_ID_BCM5787
: return "5787";
17541 case TG3_PHY_ID_BCM5784
: return "5784";
17542 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17543 case TG3_PHY_ID_BCM5906
: return "5906";
17544 case TG3_PHY_ID_BCM5761
: return "5761";
17545 case TG3_PHY_ID_BCM5718C
: return "5718C";
17546 case TG3_PHY_ID_BCM5718S
: return "5718S";
17547 case TG3_PHY_ID_BCM57765
: return "57765";
17548 case TG3_PHY_ID_BCM5719C
: return "5719C";
17549 case TG3_PHY_ID_BCM5720C
: return "5720C";
17550 case TG3_PHY_ID_BCM5762
: return "5762C";
17551 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17552 case 0: return "serdes";
17553 default: return "unknown";
17557 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17559 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17560 strcpy(str
, "PCI Express");
17562 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17563 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17565 strcpy(str
, "PCIX:");
17567 if ((clock_ctrl
== 7) ||
17568 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17569 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17570 strcat(str
, "133MHz");
17571 else if (clock_ctrl
== 0)
17572 strcat(str
, "33MHz");
17573 else if (clock_ctrl
== 2)
17574 strcat(str
, "50MHz");
17575 else if (clock_ctrl
== 4)
17576 strcat(str
, "66MHz");
17577 else if (clock_ctrl
== 6)
17578 strcat(str
, "100MHz");
17580 strcpy(str
, "PCI:");
17581 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17582 strcat(str
, "66MHz");
17584 strcat(str
, "33MHz");
17586 if (tg3_flag(tp
, PCI_32BIT
))
17587 strcat(str
, ":32-bit");
17589 strcat(str
, ":64-bit");
17593 static void tg3_init_coal(struct tg3
*tp
)
17595 struct ethtool_coalesce
*ec
= &tp
->coal
;
17597 memset(ec
, 0, sizeof(*ec
));
17598 ec
->cmd
= ETHTOOL_GCOALESCE
;
17599 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17600 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17601 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17602 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17603 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17604 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17605 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17606 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17607 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17609 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17610 HOSTCC_MODE_CLRTICK_TXBD
)) {
17611 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17612 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17613 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17614 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17617 if (tg3_flag(tp
, 5705_PLUS
)) {
17618 ec
->rx_coalesce_usecs_irq
= 0;
17619 ec
->tx_coalesce_usecs_irq
= 0;
17620 ec
->stats_block_coalesce_usecs
= 0;
17624 static int tg3_init_one(struct pci_dev
*pdev
,
17625 const struct pci_device_id
*ent
)
17627 struct net_device
*dev
;
17630 u32 sndmbx
, rcvmbx
, intmbx
;
17632 u64 dma_mask
, persist_dma_mask
;
17633 netdev_features_t features
= 0;
17635 printk_once(KERN_INFO
"%s\n", version
);
17637 err
= pci_enable_device(pdev
);
17639 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17643 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17645 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17646 goto err_out_disable_pdev
;
17649 pci_set_master(pdev
);
17651 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17654 goto err_out_free_res
;
17657 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17659 tp
= netdev_priv(dev
);
17662 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17663 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17665 tp
->pcierr_recovery
= false;
17668 tp
->msg_enable
= tg3_debug
;
17670 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17672 if (pdev_is_ssb_gige_core(pdev
)) {
17673 tg3_flag_set(tp
, IS_SSB_CORE
);
17674 if (ssb_gige_must_flush_posted_writes(pdev
))
17675 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17676 if (ssb_gige_one_dma_at_once(pdev
))
17677 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17678 if (ssb_gige_have_roboswitch(pdev
)) {
17679 tg3_flag_set(tp
, USE_PHYLIB
);
17680 tg3_flag_set(tp
, ROBOSWITCH
);
17682 if (ssb_gige_is_rgmii(pdev
))
17683 tg3_flag_set(tp
, RGMII_MODE
);
17686 /* The word/byte swap controls here control register access byte
17687 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17690 tp
->misc_host_ctrl
=
17691 MISC_HOST_CTRL_MASK_PCI_INT
|
17692 MISC_HOST_CTRL_WORD_SWAP
|
17693 MISC_HOST_CTRL_INDIR_ACCESS
|
17694 MISC_HOST_CTRL_PCISTATE_RW
;
17696 /* The NONFRM (non-frame) byte/word swap controls take effect
17697 * on descriptor entries, anything which isn't packet data.
17699 * The StrongARM chips on the board (one for tx, one for rx)
17700 * are running in big-endian mode.
17702 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17703 GRC_MODE_WSWAP_NONFRM_DATA
);
17704 #ifdef __BIG_ENDIAN
17705 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17707 spin_lock_init(&tp
->lock
);
17708 spin_lock_init(&tp
->indirect_lock
);
17709 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17711 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17713 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17715 goto err_out_free_dev
;
17718 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17719 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17720 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17721 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17722 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17723 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17724 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17725 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17726 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17727 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17728 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17729 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17730 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17731 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17732 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17733 tg3_flag_set(tp
, ENABLE_APE
);
17734 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17735 if (!tp
->aperegs
) {
17736 dev_err(&pdev
->dev
,
17737 "Cannot map APE registers, aborting\n");
17739 goto err_out_iounmap
;
17743 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17744 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17746 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17747 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17748 dev
->netdev_ops
= &tg3_netdev_ops
;
17749 dev
->irq
= pdev
->irq
;
17751 err
= tg3_get_invariants(tp
, ent
);
17753 dev_err(&pdev
->dev
,
17754 "Problem fetching invariants of chip, aborting\n");
17755 goto err_out_apeunmap
;
17758 /* The EPB bridge inside 5714, 5715, and 5780 and any
17759 * device behind the EPB cannot support DMA addresses > 40-bit.
17760 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17761 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17762 * do DMA address check in tg3_start_xmit().
17764 if (tg3_flag(tp
, IS_5788
))
17765 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17766 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17767 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17768 #ifdef CONFIG_HIGHMEM
17769 dma_mask
= DMA_BIT_MASK(64);
17772 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17774 /* Configure DMA attributes. */
17775 if (dma_mask
> DMA_BIT_MASK(32)) {
17776 err
= pci_set_dma_mask(pdev
, dma_mask
);
17778 features
|= NETIF_F_HIGHDMA
;
17779 err
= pci_set_consistent_dma_mask(pdev
,
17782 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17783 "DMA for consistent allocations\n");
17784 goto err_out_apeunmap
;
17788 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17789 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17791 dev_err(&pdev
->dev
,
17792 "No usable DMA configuration, aborting\n");
17793 goto err_out_apeunmap
;
17797 tg3_init_bufmgr_config(tp
);
17799 /* 5700 B0 chips do not support checksumming correctly due
17800 * to hardware bugs.
17802 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17803 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17805 if (tg3_flag(tp
, 5755_PLUS
))
17806 features
|= NETIF_F_IPV6_CSUM
;
17809 /* TSO is on by default on chips that support hardware TSO.
17810 * Firmware TSO on older chips gives lower performance, so it
17811 * is off by default, but can be enabled using ethtool.
17813 if ((tg3_flag(tp
, HW_TSO_1
) ||
17814 tg3_flag(tp
, HW_TSO_2
) ||
17815 tg3_flag(tp
, HW_TSO_3
)) &&
17816 (features
& NETIF_F_IP_CSUM
))
17817 features
|= NETIF_F_TSO
;
17818 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17819 if (features
& NETIF_F_IPV6_CSUM
)
17820 features
|= NETIF_F_TSO6
;
17821 if (tg3_flag(tp
, HW_TSO_3
) ||
17822 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17823 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17824 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17825 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17826 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17827 features
|= NETIF_F_TSO_ECN
;
17830 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17831 NETIF_F_HW_VLAN_CTAG_RX
;
17832 dev
->vlan_features
|= features
;
17835 * Add loopback capability only for a subset of devices that support
17836 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17837 * loopback for the remaining devices.
17839 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17840 !tg3_flag(tp
, CPMU_PRESENT
))
17841 /* Add the loopback capability */
17842 features
|= NETIF_F_LOOPBACK
;
17844 dev
->hw_features
|= features
;
17845 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17847 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17848 dev
->min_mtu
= TG3_MIN_MTU
;
17849 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17851 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17852 !tg3_flag(tp
, TSO_CAPABLE
) &&
17853 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17854 tg3_flag_set(tp
, MAX_RXPEND_64
);
17855 tp
->rx_pending
= 63;
17858 err
= tg3_get_device_address(tp
);
17860 dev_err(&pdev
->dev
,
17861 "Could not obtain valid ethernet address, aborting\n");
17862 goto err_out_apeunmap
;
17865 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17866 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17867 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17868 for (i
= 0; i
< tp
->irq_max
; i
++) {
17869 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17872 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17874 tnapi
->int_mbox
= intmbx
;
17880 tnapi
->consmbox
= rcvmbx
;
17881 tnapi
->prodmbox
= sndmbx
;
17884 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17886 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17888 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17892 * If we support MSIX, we'll be using RSS. If we're using
17893 * RSS, the first vector only handles link interrupts and the
17894 * remaining vectors handle rx and tx interrupts. Reuse the
17895 * mailbox values for the next iteration. The values we setup
17896 * above are still useful for the single vectored mode.
17910 * Reset chip in case UNDI or EFI driver did not shutdown
17911 * DMA self test will enable WDMAC and we'll see (spurious)
17912 * pending DMA on the PCI bus at that point.
17914 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17915 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17916 tg3_full_lock(tp
, 0);
17917 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17918 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17919 tg3_full_unlock(tp
);
17922 err
= tg3_test_dma(tp
);
17924 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17925 goto err_out_apeunmap
;
17930 pci_set_drvdata(pdev
, dev
);
17932 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17933 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17934 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17935 tg3_flag_set(tp
, PTP_CAPABLE
);
17937 tg3_timer_init(tp
);
17939 tg3_carrier_off(tp
);
17941 err
= register_netdev(dev
);
17943 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17944 goto err_out_apeunmap
;
17947 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17949 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17951 if (IS_ERR(tp
->ptp_clock
))
17952 tp
->ptp_clock
= NULL
;
17955 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17956 tp
->board_part_number
,
17957 tg3_chip_rev_id(tp
),
17958 tg3_bus_string(tp
, str
),
17961 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
17964 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17965 ethtype
= "10/100Base-TX";
17966 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17967 ethtype
= "1000Base-SX";
17969 ethtype
= "10/100/1000Base-T";
17971 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17972 "(WireSpeed[%d], EEE[%d])\n",
17973 tg3_phy_string(tp
), ethtype
,
17974 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17975 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17978 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17979 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17980 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17981 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17982 tg3_flag(tp
, ENABLE_ASF
) != 0,
17983 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17984 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17986 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17987 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17989 pci_save_state(pdev
);
17995 iounmap(tp
->aperegs
);
17996 tp
->aperegs
= NULL
;
18009 pci_release_regions(pdev
);
18011 err_out_disable_pdev
:
18012 if (pci_is_enabled(pdev
))
18013 pci_disable_device(pdev
);
18017 static void tg3_remove_one(struct pci_dev
*pdev
)
18019 struct net_device
*dev
= pci_get_drvdata(pdev
);
18022 struct tg3
*tp
= netdev_priv(dev
);
18026 release_firmware(tp
->fw
);
18028 tg3_reset_task_cancel(tp
);
18030 if (tg3_flag(tp
, USE_PHYLIB
)) {
18035 unregister_netdev(dev
);
18037 iounmap(tp
->aperegs
);
18038 tp
->aperegs
= NULL
;
18045 pci_release_regions(pdev
);
18046 pci_disable_device(pdev
);
18050 #ifdef CONFIG_PM_SLEEP
18051 static int tg3_suspend(struct device
*device
)
18053 struct pci_dev
*pdev
= to_pci_dev(device
);
18054 struct net_device
*dev
= pci_get_drvdata(pdev
);
18055 struct tg3
*tp
= netdev_priv(dev
);
18060 if (!netif_running(dev
))
18063 tg3_reset_task_cancel(tp
);
18065 tg3_netif_stop(tp
);
18067 tg3_timer_stop(tp
);
18069 tg3_full_lock(tp
, 1);
18070 tg3_disable_ints(tp
);
18071 tg3_full_unlock(tp
);
18073 netif_device_detach(dev
);
18075 tg3_full_lock(tp
, 0);
18076 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18077 tg3_flag_clear(tp
, INIT_COMPLETE
);
18078 tg3_full_unlock(tp
);
18080 err
= tg3_power_down_prepare(tp
);
18084 tg3_full_lock(tp
, 0);
18086 tg3_flag_set(tp
, INIT_COMPLETE
);
18087 err2
= tg3_restart_hw(tp
, true);
18091 tg3_timer_start(tp
);
18093 netif_device_attach(dev
);
18094 tg3_netif_start(tp
);
18097 tg3_full_unlock(tp
);
18108 static int tg3_resume(struct device
*device
)
18110 struct pci_dev
*pdev
= to_pci_dev(device
);
18111 struct net_device
*dev
= pci_get_drvdata(pdev
);
18112 struct tg3
*tp
= netdev_priv(dev
);
18117 if (!netif_running(dev
))
18120 netif_device_attach(dev
);
18122 tg3_full_lock(tp
, 0);
18124 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18126 tg3_flag_set(tp
, INIT_COMPLETE
);
18127 err
= tg3_restart_hw(tp
,
18128 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18132 tg3_timer_start(tp
);
18134 tg3_netif_start(tp
);
18137 tg3_full_unlock(tp
);
18146 #endif /* CONFIG_PM_SLEEP */
18148 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18150 static void tg3_shutdown(struct pci_dev
*pdev
)
18152 struct net_device
*dev
= pci_get_drvdata(pdev
);
18153 struct tg3
*tp
= netdev_priv(dev
);
18156 netif_device_detach(dev
);
18158 if (netif_running(dev
))
18161 if (system_state
== SYSTEM_POWER_OFF
)
18162 tg3_power_down(tp
);
18168 * tg3_io_error_detected - called when PCI error is detected
18169 * @pdev: Pointer to PCI device
18170 * @state: The current pci connection state
18172 * This function is called after a PCI bus error affecting
18173 * this device has been detected.
18175 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18176 pci_channel_state_t state
)
18178 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18179 struct tg3
*tp
= netdev_priv(netdev
);
18180 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18182 netdev_info(netdev
, "PCI I/O error detected\n");
18186 /* We probably don't have netdev yet */
18187 if (!netdev
|| !netif_running(netdev
))
18190 /* We needn't recover from permanent error */
18191 if (state
== pci_channel_io_frozen
)
18192 tp
->pcierr_recovery
= true;
18196 tg3_netif_stop(tp
);
18198 tg3_timer_stop(tp
);
18200 /* Want to make sure that the reset task doesn't run */
18201 tg3_reset_task_cancel(tp
);
18203 netif_device_detach(netdev
);
18205 /* Clean up software state, even if MMIO is blocked */
18206 tg3_full_lock(tp
, 0);
18207 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18208 tg3_full_unlock(tp
);
18211 if (state
== pci_channel_io_perm_failure
) {
18213 tg3_napi_enable(tp
);
18216 err
= PCI_ERS_RESULT_DISCONNECT
;
18218 pci_disable_device(pdev
);
18227 * tg3_io_slot_reset - called after the pci bus has been reset.
18228 * @pdev: Pointer to PCI device
18230 * Restart the card from scratch, as if from a cold-boot.
18231 * At this point, the card has exprienced a hard reset,
18232 * followed by fixups by BIOS, and has its config space
18233 * set up identically to what it was at cold boot.
18235 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18237 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18238 struct tg3
*tp
= netdev_priv(netdev
);
18239 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18244 if (pci_enable_device(pdev
)) {
18245 dev_err(&pdev
->dev
,
18246 "Cannot re-enable PCI device after reset.\n");
18250 pci_set_master(pdev
);
18251 pci_restore_state(pdev
);
18252 pci_save_state(pdev
);
18254 if (!netdev
|| !netif_running(netdev
)) {
18255 rc
= PCI_ERS_RESULT_RECOVERED
;
18259 err
= tg3_power_up(tp
);
18263 rc
= PCI_ERS_RESULT_RECOVERED
;
18266 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18267 tg3_napi_enable(tp
);
18276 * tg3_io_resume - called when traffic can start flowing again.
18277 * @pdev: Pointer to PCI device
18279 * This callback is called when the error recovery driver tells
18280 * us that its OK to resume normal operation.
18282 static void tg3_io_resume(struct pci_dev
*pdev
)
18284 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18285 struct tg3
*tp
= netdev_priv(netdev
);
18290 if (!netdev
|| !netif_running(netdev
))
18293 tg3_full_lock(tp
, 0);
18294 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18295 tg3_flag_set(tp
, INIT_COMPLETE
);
18296 err
= tg3_restart_hw(tp
, true);
18298 tg3_full_unlock(tp
);
18299 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18303 netif_device_attach(netdev
);
18305 tg3_timer_start(tp
);
18307 tg3_netif_start(tp
);
18309 tg3_full_unlock(tp
);
18314 tp
->pcierr_recovery
= false;
18318 static const struct pci_error_handlers tg3_err_handler
= {
18319 .error_detected
= tg3_io_error_detected
,
18320 .slot_reset
= tg3_io_slot_reset
,
18321 .resume
= tg3_io_resume
18324 static struct pci_driver tg3_driver
= {
18325 .name
= DRV_MODULE_NAME
,
18326 .id_table
= tg3_pci_tbl
,
18327 .probe
= tg3_init_one
,
18328 .remove
= tg3_remove_one
,
18329 .err_handler
= &tg3_err_handler
,
18330 .driver
.pm
= &tg3_pm_ops
,
18331 .shutdown
= tg3_shutdown
,
18334 module_pci_driver(tg3_driver
);