2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
78 return test_bit(flag
, bits
);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
88 clear_bit(flag
, bits
);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version
[] =
226 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION
);
232 MODULE_FIRMWARE(FIRMWARE_TG3
);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
236 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug
, int, 0);
238 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl
[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
266 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
267 TG3_DRV_DATA_FLAG_5705_10_100
},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
270 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
271 TG3_DRV_DATA_FLAG_5705_10_100
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
284 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
293 PCI_VENDOR_ID_LENOVO
,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
295 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
298 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
318 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
321 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
322 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
326 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
336 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
338 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
364 static const struct {
365 const char string
[ETH_GSTRING_LEN
];
366 } ethtool_stats_keys
[] = {
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
399 { "tx_flow_control" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string
[ETH_GSTRING_LEN
];
460 } ethtool_test_keys
[] = {
461 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
462 [TG3_LINK_TEST
] = { "link test (online) " },
463 [TG3_REGISTER_TEST
] = { "register test (offline)" },
464 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
476 writel(val
, tp
->regs
+ off
);
479 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
481 return readl(tp
->regs
+ off
);
484 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
486 writel(val
, tp
->aperegs
+ off
);
489 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
491 return readl(tp
->aperegs
+ off
);
494 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
498 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
499 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
501 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
504 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
506 writel(val
, tp
->regs
+ off
);
507 readl(tp
->regs
+ off
);
510 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
515 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
516 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
517 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
518 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
522 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
526 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
527 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
528 TG3_64BIT_REG_LOW
, val
);
531 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
532 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
533 TG3_64BIT_REG_LOW
, val
);
537 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
538 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
539 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
540 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
547 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
548 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
552 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
557 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
558 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
559 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
560 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
571 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
572 /* Non-posted methods */
573 tp
->write32(tp
, off
, val
);
576 tg3_write32(tp
, off
, val
);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
588 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
590 tp
->write32_mbox(tp
, off
, val
);
591 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
592 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
593 !tg3_flag(tp
, ICH_WORKAROUND
)))
594 tp
->read32_mbox(tp
, off
);
597 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
599 void __iomem
*mbox
= tp
->regs
+ off
;
601 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
603 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
604 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
608 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
610 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
613 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
615 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
633 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
634 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
637 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
638 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
639 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
640 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
646 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
651 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
654 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
658 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
659 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
664 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
665 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
666 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
667 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
673 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
678 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
681 static void tg3_ape_lock_init(struct tg3
*tp
)
686 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
687 regbase
= TG3_APE_LOCK_GRANT
;
689 regbase
= TG3_APE_PER_LOCK_GRANT
;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
694 case TG3_APE_LOCK_PHY0
:
695 case TG3_APE_LOCK_PHY1
:
696 case TG3_APE_LOCK_PHY2
:
697 case TG3_APE_LOCK_PHY3
:
698 bit
= APE_LOCK_GRANT_DRIVER
;
702 bit
= APE_LOCK_GRANT_DRIVER
;
704 bit
= 1 << tp
->pci_fn
;
706 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
711 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
715 u32 status
, req
, gnt
, bit
;
717 if (!tg3_flag(tp
, ENABLE_APE
))
721 case TG3_APE_LOCK_GPIO
:
722 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
725 case TG3_APE_LOCK_GRC
:
726 case TG3_APE_LOCK_MEM
:
728 bit
= APE_LOCK_REQ_DRIVER
;
730 bit
= 1 << tp
->pci_fn
;
732 case TG3_APE_LOCK_PHY0
:
733 case TG3_APE_LOCK_PHY1
:
734 case TG3_APE_LOCK_PHY2
:
735 case TG3_APE_LOCK_PHY3
:
736 bit
= APE_LOCK_REQ_DRIVER
;
742 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
743 req
= TG3_APE_LOCK_REQ
;
744 gnt
= TG3_APE_LOCK_GRANT
;
746 req
= TG3_APE_PER_LOCK_REQ
;
747 gnt
= TG3_APE_PER_LOCK_GRANT
;
752 tg3_ape_write32(tp
, req
+ off
, bit
);
754 /* Wait for up to 1 millisecond to acquire lock. */
755 for (i
= 0; i
< 100; i
++) {
756 status
= tg3_ape_read32(tp
, gnt
+ off
);
759 if (pci_channel_offline(tp
->pdev
))
766 /* Revoke the lock request. */
767 tg3_ape_write32(tp
, gnt
+ off
, bit
);
774 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case TG3_APE_LOCK_GPIO
:
783 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
786 case TG3_APE_LOCK_GRC
:
787 case TG3_APE_LOCK_MEM
:
789 bit
= APE_LOCK_GRANT_DRIVER
;
791 bit
= 1 << tp
->pci_fn
;
793 case TG3_APE_LOCK_PHY0
:
794 case TG3_APE_LOCK_PHY1
:
795 case TG3_APE_LOCK_PHY2
:
796 case TG3_APE_LOCK_PHY3
:
797 bit
= APE_LOCK_GRANT_DRIVER
;
803 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
804 gnt
= TG3_APE_LOCK_GRANT
;
806 gnt
= TG3_APE_PER_LOCK_GRANT
;
808 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
811 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
816 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
819 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
820 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
823 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
826 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
829 return timeout_us
? 0 : -EBUSY
;
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
837 for (i
= 0; i
< timeout_us
/ 10; i
++) {
838 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
840 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
846 return i
== timeout_us
/ 10;
849 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
853 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
855 if (!tg3_flag(tp
, APE_HAS_NCSI
))
858 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
859 if (apedata
!= APE_SEG_SIG_MAGIC
)
862 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
863 if (!(apedata
& APE_FW_STATUS_READY
))
866 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
868 msgoff
= bufoff
+ 2 * sizeof(u32
);
869 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
874 /* Cap xfer sizes to scratchpad limits. */
875 length
= (len
> maxlen
) ? maxlen
: len
;
878 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
879 if (!(apedata
& APE_FW_STATUS_READY
))
882 /* Wait for up to 1 msec for APE to service previous event. */
883 err
= tg3_ape_event_lock(tp
, 1000);
887 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
888 APE_EVENT_STATUS_SCRTCHPD_READ
|
889 APE_EVENT_STATUS_EVENT_PENDING
;
890 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
892 tg3_ape_write32(tp
, bufoff
, base_off
);
893 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
895 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
896 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
900 if (tg3_ape_wait_for_event(tp
, 30000))
903 for (i
= 0; length
; i
+= 4, length
-= 4) {
904 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
905 memcpy(data
, &val
, sizeof(u32
));
914 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
919 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
920 if (apedata
!= APE_SEG_SIG_MAGIC
)
923 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
924 if (!(apedata
& APE_FW_STATUS_READY
))
927 /* Wait for up to 20 millisecond for APE to service previous event. */
928 err
= tg3_ape_event_lock(tp
, 20000);
932 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
933 event
| APE_EVENT_STATUS_EVENT_PENDING
);
935 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
936 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
941 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
946 if (!tg3_flag(tp
, ENABLE_APE
))
950 case RESET_KIND_INIT
:
951 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
952 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
953 APE_HOST_SEG_SIG_MAGIC
);
954 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
955 APE_HOST_SEG_LEN_MAGIC
);
956 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
957 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
958 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
959 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
960 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
961 APE_HOST_BEHAV_NO_PHYLOCK
);
962 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
963 TG3_APE_HOST_DRVR_STATE_START
);
965 event
= APE_EVENT_STATUS_STATE_START
;
967 case RESET_KIND_SHUTDOWN
:
968 if (device_may_wakeup(&tp
->pdev
->dev
) &&
969 tg3_flag(tp
, WOL_ENABLE
)) {
970 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
971 TG3_APE_HOST_WOL_SPEED_AUTO
);
972 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
974 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
976 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
978 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
984 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
986 tg3_ape_send_event(tp
, event
);
989 static void tg3_send_ape_heartbeat(struct tg3
*tp
,
990 unsigned long interval
)
992 /* Check if hb interval has exceeded */
993 if (!tg3_flag(tp
, ENABLE_APE
) ||
994 time_before(jiffies
, tp
->ape_hb_jiffies
+ interval
))
997 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
998 tp
->ape_hb_jiffies
= jiffies
;
1001 static void tg3_disable_ints(struct tg3
*tp
)
1005 tw32(TG3PCI_MISC_HOST_CTRL
,
1006 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
1007 for (i
= 0; i
< tp
->irq_max
; i
++)
1008 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
1011 static void tg3_enable_ints(struct tg3
*tp
)
1018 tw32(TG3PCI_MISC_HOST_CTRL
,
1019 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1021 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1022 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1023 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1025 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1026 if (tg3_flag(tp
, 1SHOT_MSI
))
1027 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1029 tp
->coal_now
|= tnapi
->coal_now
;
1032 /* Force an initial interrupt */
1033 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1034 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1035 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1037 tw32(HOSTCC_MODE
, tp
->coal_now
);
1039 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1042 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1044 struct tg3
*tp
= tnapi
->tp
;
1045 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1046 unsigned int work_exists
= 0;
1048 /* check for phy events */
1049 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1050 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1054 /* check for TX work to do */
1055 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1058 /* check for RX work to do */
1059 if (tnapi
->rx_rcb_prod_idx
&&
1060 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1067 * similar to tg3_enable_ints, but it accurately determines whether there
1068 * is new work pending and can return without flushing the PIO write
1069 * which reenables interrupts
1071 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1073 struct tg3
*tp
= tnapi
->tp
;
1075 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1077 /* When doing tagged status, this work check is unnecessary.
1078 * The last_tag we write above tells the chip which piece of
1079 * work we've completed.
1081 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1082 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1083 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1086 static void tg3_switch_clocks(struct tg3
*tp
)
1089 u32 orig_clock_ctrl
;
1091 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1094 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1096 orig_clock_ctrl
= clock_ctrl
;
1097 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1098 CLOCK_CTRL_CLKRUN_OENABLE
|
1100 tp
->pci_clock_ctrl
= clock_ctrl
;
1102 if (tg3_flag(tp
, 5705_PLUS
)) {
1103 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1105 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1107 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1110 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1113 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1116 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1119 #define PHY_BUSY_LOOPS 5000
1121 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1128 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1130 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1134 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1138 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1139 MI_COM_PHY_ADDR_MASK
);
1140 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1141 MI_COM_REG_ADDR_MASK
);
1142 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1144 tw32_f(MAC_MI_COM
, frame_val
);
1146 loops
= PHY_BUSY_LOOPS
;
1147 while (loops
!= 0) {
1149 frame_val
= tr32(MAC_MI_COM
);
1151 if ((frame_val
& MI_COM_BUSY
) == 0) {
1153 frame_val
= tr32(MAC_MI_COM
);
1161 *val
= frame_val
& MI_COM_DATA_MASK
;
1165 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1166 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1170 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1175 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1177 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1180 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1187 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1188 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1191 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1193 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1197 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1199 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1200 MI_COM_PHY_ADDR_MASK
);
1201 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1202 MI_COM_REG_ADDR_MASK
);
1203 frame_val
|= (val
& MI_COM_DATA_MASK
);
1204 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1206 tw32_f(MAC_MI_COM
, frame_val
);
1208 loops
= PHY_BUSY_LOOPS
;
1209 while (loops
!= 0) {
1211 frame_val
= tr32(MAC_MI_COM
);
1212 if ((frame_val
& MI_COM_BUSY
) == 0) {
1214 frame_val
= tr32(MAC_MI_COM
);
1224 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1225 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1229 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1234 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1236 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1239 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1243 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1247 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1251 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1252 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1256 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1262 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1266 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1270 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1274 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1275 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1279 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1285 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1289 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1291 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1296 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1300 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1302 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1307 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1311 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1312 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1313 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1315 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1320 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1322 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1323 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1325 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1333 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1339 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1341 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1343 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1344 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1349 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1351 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1352 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1355 static int tg3_bmcr_reset(struct tg3
*tp
)
1360 /* OK, reset it, and poll the BMCR_RESET bit until it
1361 * clears or we time out.
1363 phy_control
= BMCR_RESET
;
1364 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1370 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1374 if ((phy_control
& BMCR_RESET
) == 0) {
1386 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1388 struct tg3
*tp
= bp
->priv
;
1391 spin_lock_bh(&tp
->lock
);
1393 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1396 spin_unlock_bh(&tp
->lock
);
1401 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1403 struct tg3
*tp
= bp
->priv
;
1406 spin_lock_bh(&tp
->lock
);
1408 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1411 spin_unlock_bh(&tp
->lock
);
1416 static void tg3_mdio_config_5785(struct tg3
*tp
)
1419 struct phy_device
*phydev
;
1421 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1422 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1423 case PHY_ID_BCM50610
:
1424 case PHY_ID_BCM50610M
:
1425 val
= MAC_PHYCFG2_50610_LED_MODES
;
1427 case PHY_ID_BCMAC131
:
1428 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1430 case PHY_ID_RTL8211C
:
1431 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1433 case PHY_ID_RTL8201E
:
1434 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1440 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1441 tw32(MAC_PHYCFG2
, val
);
1443 val
= tr32(MAC_PHYCFG1
);
1444 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1445 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1446 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1447 tw32(MAC_PHYCFG1
, val
);
1452 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1453 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1454 MAC_PHYCFG2_FMODE_MASK_MASK
|
1455 MAC_PHYCFG2_GMODE_MASK_MASK
|
1456 MAC_PHYCFG2_ACT_MASK_MASK
|
1457 MAC_PHYCFG2_QUAL_MASK_MASK
|
1458 MAC_PHYCFG2_INBAND_ENABLE
;
1460 tw32(MAC_PHYCFG2
, val
);
1462 val
= tr32(MAC_PHYCFG1
);
1463 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1464 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1465 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1466 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1467 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1468 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1469 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1471 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1472 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1473 tw32(MAC_PHYCFG1
, val
);
1475 val
= tr32(MAC_EXT_RGMII_MODE
);
1476 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1477 MAC_RGMII_MODE_RX_QUALITY
|
1478 MAC_RGMII_MODE_RX_ACTIVITY
|
1479 MAC_RGMII_MODE_RX_ENG_DET
|
1480 MAC_RGMII_MODE_TX_ENABLE
|
1481 MAC_RGMII_MODE_TX_LOWPWR
|
1482 MAC_RGMII_MODE_TX_RESET
);
1483 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1484 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1485 val
|= MAC_RGMII_MODE_RX_INT_B
|
1486 MAC_RGMII_MODE_RX_QUALITY
|
1487 MAC_RGMII_MODE_RX_ACTIVITY
|
1488 MAC_RGMII_MODE_RX_ENG_DET
;
1489 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1490 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1491 MAC_RGMII_MODE_TX_LOWPWR
|
1492 MAC_RGMII_MODE_TX_RESET
;
1494 tw32(MAC_EXT_RGMII_MODE
, val
);
1497 static void tg3_mdio_start(struct tg3
*tp
)
1499 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1500 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1503 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1504 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1505 tg3_mdio_config_5785(tp
);
1508 static int tg3_mdio_init(struct tg3
*tp
)
1512 struct phy_device
*phydev
;
1514 if (tg3_flag(tp
, 5717_PLUS
)) {
1517 tp
->phy_addr
= tp
->pci_fn
+ 1;
1519 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1520 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1522 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1523 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1526 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1529 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1532 tp
->phy_addr
= addr
;
1534 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1538 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1541 tp
->mdio_bus
= mdiobus_alloc();
1542 if (tp
->mdio_bus
== NULL
)
1545 tp
->mdio_bus
->name
= "tg3 mdio bus";
1546 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1547 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1548 tp
->mdio_bus
->priv
= tp
;
1549 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1550 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1551 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1552 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1554 /* The bus registration will look for all the PHYs on the mdio bus.
1555 * Unfortunately, it does not ensure the PHY is powered up before
1556 * accessing the PHY ID registers. A chip reset is the
1557 * quickest way to bring the device back to an operational state..
1559 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1562 i
= mdiobus_register(tp
->mdio_bus
);
1564 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1565 mdiobus_free(tp
->mdio_bus
);
1569 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1571 if (!phydev
|| !phydev
->drv
) {
1572 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1573 mdiobus_unregister(tp
->mdio_bus
);
1574 mdiobus_free(tp
->mdio_bus
);
1578 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1579 case PHY_ID_BCM57780
:
1580 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1581 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1583 case PHY_ID_BCM50610
:
1584 case PHY_ID_BCM50610M
:
1585 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1586 PHY_BRCM_RX_REFCLK_UNUSED
|
1587 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1588 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1589 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1590 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1591 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1592 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1593 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1594 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1596 case PHY_ID_RTL8211C
:
1597 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1599 case PHY_ID_RTL8201E
:
1600 case PHY_ID_BCMAC131
:
1601 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1602 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1603 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1607 tg3_flag_set(tp
, MDIOBUS_INITED
);
1609 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1610 tg3_mdio_config_5785(tp
);
1615 static void tg3_mdio_fini(struct tg3
*tp
)
1617 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1618 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1619 mdiobus_unregister(tp
->mdio_bus
);
1620 mdiobus_free(tp
->mdio_bus
);
1624 /* tp->lock is held. */
1625 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1629 val
= tr32(GRC_RX_CPU_EVENT
);
1630 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1631 tw32_f(GRC_RX_CPU_EVENT
, val
);
1633 tp
->last_event_jiffies
= jiffies
;
1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1638 /* tp->lock is held. */
1639 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1642 unsigned int delay_cnt
;
1645 /* If enough time has passed, no wait is necessary. */
1646 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1647 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1649 if (time_remain
< 0)
1652 /* Check if we can shorten the wait time. */
1653 delay_cnt
= jiffies_to_usecs(time_remain
);
1654 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1655 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1656 delay_cnt
= (delay_cnt
>> 3) + 1;
1658 for (i
= 0; i
< delay_cnt
; i
++) {
1659 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1661 if (pci_channel_offline(tp
->pdev
))
1668 /* tp->lock is held. */
1669 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1674 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1676 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1677 val
|= (reg
& 0xffff);
1681 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1683 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1684 val
|= (reg
& 0xffff);
1688 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1689 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1691 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1692 val
|= (reg
& 0xffff);
1696 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1703 /* tp->lock is held. */
1704 static void tg3_ump_link_report(struct tg3
*tp
)
1708 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1711 tg3_phy_gather_ump_data(tp
, data
);
1713 tg3_wait_for_event_ack(tp
);
1715 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1716 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1717 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1718 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1719 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1720 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1722 tg3_generate_fw_event(tp
);
1725 /* tp->lock is held. */
1726 static void tg3_stop_fw(struct tg3
*tp
)
1728 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1729 /* Wait for RX cpu to ACK the previous event. */
1730 tg3_wait_for_event_ack(tp
);
1732 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1734 tg3_generate_fw_event(tp
);
1736 /* Wait for RX cpu to ACK this event. */
1737 tg3_wait_for_event_ack(tp
);
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1744 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1745 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1747 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1749 case RESET_KIND_INIT
:
1750 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 case RESET_KIND_SHUTDOWN
:
1755 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 case RESET_KIND_SUSPEND
:
1760 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1773 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1775 case RESET_KIND_INIT
:
1776 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1777 DRV_STATE_START_DONE
);
1780 case RESET_KIND_SHUTDOWN
:
1781 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1782 DRV_STATE_UNLOAD_DONE
);
1791 /* tp->lock is held. */
1792 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1794 if (tg3_flag(tp
, ENABLE_ASF
)) {
1796 case RESET_KIND_INIT
:
1797 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1801 case RESET_KIND_SHUTDOWN
:
1802 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1806 case RESET_KIND_SUSPEND
:
1807 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1817 static int tg3_poll_fw(struct tg3
*tp
)
1822 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1825 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1826 /* We don't use firmware. */
1830 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1831 /* Wait up to 20ms for init done. */
1832 for (i
= 0; i
< 200; i
++) {
1833 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1835 if (pci_channel_offline(tp
->pdev
))
1843 /* Wait for firmware initialization to complete. */
1844 for (i
= 0; i
< 100000; i
++) {
1845 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1846 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1848 if (pci_channel_offline(tp
->pdev
)) {
1849 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1850 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1851 netdev_info(tp
->dev
, "No firmware running\n");
1860 /* Chip might not be fitted with firmware. Some Sun onboard
1861 * parts are configured like that. So don't signal the timeout
1862 * of the above loop as an error, but do report the lack of
1863 * running firmware once.
1865 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1866 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1868 netdev_info(tp
->dev
, "No firmware running\n");
1871 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1872 /* The 57765 A0 needs a little more
1873 * time to do some important work.
1881 static void tg3_link_report(struct tg3
*tp
)
1883 if (!netif_carrier_ok(tp
->dev
)) {
1884 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1885 tg3_ump_link_report(tp
);
1886 } else if (netif_msg_link(tp
)) {
1887 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1888 (tp
->link_config
.active_speed
== SPEED_1000
?
1890 (tp
->link_config
.active_speed
== SPEED_100
?
1892 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1895 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1896 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1898 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1901 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1902 netdev_info(tp
->dev
, "EEE is %s\n",
1903 tp
->setlpicnt
? "enabled" : "disabled");
1905 tg3_ump_link_report(tp
);
1908 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1911 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1915 if (adv
& ADVERTISE_PAUSE_CAP
) {
1916 flowctrl
|= FLOW_CTRL_RX
;
1917 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1918 flowctrl
|= FLOW_CTRL_TX
;
1919 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1920 flowctrl
|= FLOW_CTRL_TX
;
1925 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1929 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1930 miireg
= ADVERTISE_1000XPAUSE
;
1931 else if (flow_ctrl
& FLOW_CTRL_TX
)
1932 miireg
= ADVERTISE_1000XPSE_ASYM
;
1933 else if (flow_ctrl
& FLOW_CTRL_RX
)
1934 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1941 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1945 if (adv
& ADVERTISE_1000XPAUSE
) {
1946 flowctrl
|= FLOW_CTRL_RX
;
1947 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1948 flowctrl
|= FLOW_CTRL_TX
;
1949 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1950 flowctrl
|= FLOW_CTRL_TX
;
1955 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1959 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1960 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1961 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1962 if (lcladv
& ADVERTISE_1000XPAUSE
)
1964 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1971 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1975 u32 old_rx_mode
= tp
->rx_mode
;
1976 u32 old_tx_mode
= tp
->tx_mode
;
1978 if (tg3_flag(tp
, USE_PHYLIB
))
1979 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1981 autoneg
= tp
->link_config
.autoneg
;
1983 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1984 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1985 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1987 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1989 flowctrl
= tp
->link_config
.flowctrl
;
1991 tp
->link_config
.active_flowctrl
= flowctrl
;
1993 if (flowctrl
& FLOW_CTRL_RX
)
1994 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1996 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1998 if (old_rx_mode
!= tp
->rx_mode
)
1999 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
2001 if (flowctrl
& FLOW_CTRL_TX
)
2002 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
2004 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2006 if (old_tx_mode
!= tp
->tx_mode
)
2007 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2010 static void tg3_adjust_link(struct net_device
*dev
)
2012 u8 oldflowctrl
, linkmesg
= 0;
2013 u32 mac_mode
, lcl_adv
, rmt_adv
;
2014 struct tg3
*tp
= netdev_priv(dev
);
2015 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2017 spin_lock_bh(&tp
->lock
);
2019 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2020 MAC_MODE_HALF_DUPLEX
);
2022 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2028 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2029 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2030 else if (phydev
->speed
== SPEED_1000
||
2031 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2032 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2034 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2036 if (phydev
->duplex
== DUPLEX_HALF
)
2037 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2039 lcl_adv
= mii_advertise_flowctrl(
2040 tp
->link_config
.flowctrl
);
2043 rmt_adv
= LPA_PAUSE_CAP
;
2044 if (phydev
->asym_pause
)
2045 rmt_adv
|= LPA_PAUSE_ASYM
;
2048 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2050 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2052 if (mac_mode
!= tp
->mac_mode
) {
2053 tp
->mac_mode
= mac_mode
;
2054 tw32_f(MAC_MODE
, tp
->mac_mode
);
2058 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2059 if (phydev
->speed
== SPEED_10
)
2061 MAC_MI_STAT_10MBPS_MODE
|
2062 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2064 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2067 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2068 tw32(MAC_TX_LENGTHS
,
2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2070 (6 << TX_LENGTHS_IPG_SHIFT
) |
2071 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2073 tw32(MAC_TX_LENGTHS
,
2074 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2075 (6 << TX_LENGTHS_IPG_SHIFT
) |
2076 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2078 if (phydev
->link
!= tp
->old_link
||
2079 phydev
->speed
!= tp
->link_config
.active_speed
||
2080 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2081 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2084 tp
->old_link
= phydev
->link
;
2085 tp
->link_config
.active_speed
= phydev
->speed
;
2086 tp
->link_config
.active_duplex
= phydev
->duplex
;
2088 spin_unlock_bh(&tp
->lock
);
2091 tg3_link_report(tp
);
2094 static int tg3_phy_init(struct tg3
*tp
)
2096 struct phy_device
*phydev
;
2098 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2101 /* Bring the PHY back to a known state. */
2104 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2106 /* Attach the MAC to the PHY. */
2107 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2108 tg3_adjust_link
, phydev
->interface
);
2109 if (IS_ERR(phydev
)) {
2110 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2111 return PTR_ERR(phydev
);
2114 /* Mask with MAC supported features. */
2115 switch (phydev
->interface
) {
2116 case PHY_INTERFACE_MODE_GMII
:
2117 case PHY_INTERFACE_MODE_RGMII
:
2118 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2119 phy_set_max_speed(phydev
, SPEED_1000
);
2120 phy_support_asym_pause(phydev
);
2124 case PHY_INTERFACE_MODE_MII
:
2125 phy_set_max_speed(phydev
, SPEED_100
);
2126 phy_support_asym_pause(phydev
);
2129 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2133 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2135 phy_attached_info(phydev
);
2140 static void tg3_phy_start(struct tg3
*tp
)
2142 struct phy_device
*phydev
;
2144 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2147 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2149 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2150 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2151 phydev
->speed
= tp
->link_config
.speed
;
2152 phydev
->duplex
= tp
->link_config
.duplex
;
2153 phydev
->autoneg
= tp
->link_config
.autoneg
;
2154 ethtool_convert_legacy_u32_to_link_mode(
2155 phydev
->advertising
, tp
->link_config
.advertising
);
2160 phy_start_aneg(phydev
);
2163 static void tg3_phy_stop(struct tg3
*tp
)
2165 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2168 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2171 static void tg3_phy_fini(struct tg3
*tp
)
2173 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2174 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2175 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2179 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2184 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2187 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2188 /* Cannot do read-modify-write on 5401 */
2189 err
= tg3_phy_auxctl_write(tp
,
2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2191 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2196 err
= tg3_phy_auxctl_read(tp
,
2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2201 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2202 err
= tg3_phy_auxctl_write(tp
,
2203 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2209 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2213 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2216 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2217 phytest
| MII_TG3_FET_SHADOW_EN
);
2218 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2220 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2222 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2223 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2225 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2229 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2233 if (!tg3_flag(tp
, 5705_PLUS
) ||
2234 (tg3_flag(tp
, 5717_PLUS
) &&
2235 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2238 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2239 tg3_phy_fet_toggle_apd(tp
, enable
);
2243 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2244 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2245 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2246 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2247 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2248 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2250 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2253 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2255 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2257 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2260 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2264 if (!tg3_flag(tp
, 5705_PLUS
) ||
2265 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2268 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2271 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2272 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2274 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2275 ephy
| MII_TG3_FET_SHADOW_EN
);
2276 if (!tg3_readphy(tp
, reg
, &phy
)) {
2278 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2280 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2281 tg3_writephy(tp
, reg
, phy
);
2283 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2288 ret
= tg3_phy_auxctl_read(tp
,
2289 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2292 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2294 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2295 tg3_phy_auxctl_write(tp
,
2296 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2301 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2306 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2309 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2311 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2312 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2315 static void tg3_phy_apply_otp(struct tg3
*tp
)
2324 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2327 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2328 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2331 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2332 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2333 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2335 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2336 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2337 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2339 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2340 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2342 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2343 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2345 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2346 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2347 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2349 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2352 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2355 struct ethtool_eee
*dest
= &tp
->eee
;
2357 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2363 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2366 /* Pull eee_active */
2367 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2368 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2369 dest
->eee_active
= 1;
2371 dest
->eee_active
= 0;
2373 /* Pull lp advertised settings */
2374 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2376 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2378 /* Pull advertised and eee_enabled settings */
2379 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2381 dest
->eee_enabled
= !!val
;
2382 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2384 /* Pull tx_lpi_enabled */
2385 val
= tr32(TG3_CPMU_EEE_MODE
);
2386 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2388 /* Pull lpi timer value */
2389 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2392 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2396 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2401 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2403 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2404 (tp
->link_config
.active_speed
== SPEED_100
||
2405 tp
->link_config
.active_speed
== SPEED_1000
)) {
2408 if (tp
->link_config
.active_speed
== SPEED_1000
)
2409 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2411 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2413 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2415 tg3_eee_pull_config(tp
, NULL
);
2416 if (tp
->eee
.eee_active
)
2420 if (!tp
->setlpicnt
) {
2421 if (current_link_up
&&
2422 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2423 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2424 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2427 val
= tr32(TG3_CPMU_EEE_MODE
);
2428 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2432 static void tg3_phy_eee_enable(struct tg3
*tp
)
2436 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2437 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2438 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2439 tg3_flag(tp
, 57765_CLASS
)) &&
2440 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2441 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2442 MII_TG3_DSP_TAP26_RMRXSTO
;
2443 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2444 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2447 val
= tr32(TG3_CPMU_EEE_MODE
);
2448 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2451 static int tg3_wait_macro_done(struct tg3
*tp
)
2458 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2459 if ((tmp32
& 0x1000) == 0)
2469 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2471 static const u32 test_pat
[4][6] = {
2472 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2473 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2474 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2475 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2479 for (chan
= 0; chan
< 4; chan
++) {
2482 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2483 (chan
* 0x2000) | 0x0200);
2484 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2486 for (i
= 0; i
< 6; i
++)
2487 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2490 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2491 if (tg3_wait_macro_done(tp
)) {
2496 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2497 (chan
* 0x2000) | 0x0200);
2498 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2499 if (tg3_wait_macro_done(tp
)) {
2504 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2505 if (tg3_wait_macro_done(tp
)) {
2510 for (i
= 0; i
< 6; i
+= 2) {
2513 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2514 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2515 tg3_wait_macro_done(tp
)) {
2521 if (low
!= test_pat
[chan
][i
] ||
2522 high
!= test_pat
[chan
][i
+1]) {
2523 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2524 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2525 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2535 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2539 for (chan
= 0; chan
< 4; chan
++) {
2542 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2543 (chan
* 0x2000) | 0x0200);
2544 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2545 for (i
= 0; i
< 6; i
++)
2546 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2547 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2548 if (tg3_wait_macro_done(tp
))
2555 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2557 u32 reg32
, phy9_orig
;
2558 int retries
, do_phy_reset
, err
;
2564 err
= tg3_bmcr_reset(tp
);
2570 /* Disable transmitter and interrupt. */
2571 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2575 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2577 /* Set full-duplex, 1000 mbps. */
2578 tg3_writephy(tp
, MII_BMCR
,
2579 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2581 /* Set to master mode. */
2582 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2585 tg3_writephy(tp
, MII_CTRL1000
,
2586 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2588 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2592 /* Block the PHY control access. */
2593 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2595 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2598 } while (--retries
);
2600 err
= tg3_phy_reset_chanpat(tp
);
2604 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2606 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2607 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2609 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2611 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2613 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2618 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2623 static void tg3_carrier_off(struct tg3
*tp
)
2625 netif_carrier_off(tp
->dev
);
2626 tp
->link_up
= false;
2629 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2631 if (tg3_flag(tp
, ENABLE_ASF
))
2632 netdev_warn(tp
->dev
,
2633 "Management side-band traffic will be interrupted during phy settings change\n");
2636 /* This will reset the tigon3 PHY if there is no valid
2637 * link unless the FORCE argument is non-zero.
2639 static int tg3_phy_reset(struct tg3
*tp
)
2644 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2645 val
= tr32(GRC_MISC_CFG
);
2646 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2649 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2650 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2654 if (netif_running(tp
->dev
) && tp
->link_up
) {
2655 netif_carrier_off(tp
->dev
);
2656 tg3_link_report(tp
);
2659 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2660 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2661 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2662 err
= tg3_phy_reset_5703_4_5(tp
);
2669 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2670 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2671 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2672 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2674 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2677 err
= tg3_bmcr_reset(tp
);
2681 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2682 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2683 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2685 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2688 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2689 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2690 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2691 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2692 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2693 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2695 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2699 if (tg3_flag(tp
, 5717_PLUS
) &&
2700 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2703 tg3_phy_apply_otp(tp
);
2705 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2706 tg3_phy_toggle_apd(tp
, true);
2708 tg3_phy_toggle_apd(tp
, false);
2711 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2712 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2713 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2714 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2715 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2718 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2719 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2720 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2723 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2724 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2725 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2726 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2727 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2728 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2730 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2731 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2732 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2733 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2734 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2735 tg3_writephy(tp
, MII_TG3_TEST1
,
2736 MII_TG3_TEST1_TRIM_EN
| 0x4);
2738 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2740 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2744 /* Set Extended packet length bit (bit 14) on all chips that */
2745 /* support jumbo frames */
2746 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2747 /* Cannot do read-modify-write on 5401 */
2748 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2749 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2750 /* Set bit 14 with read-modify-write to preserve other bits */
2751 err
= tg3_phy_auxctl_read(tp
,
2752 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2754 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2755 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2758 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2759 * jumbo frames transmission.
2761 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2762 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2763 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2764 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2767 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2768 /* adjust output voltage */
2769 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2772 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2773 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2775 tg3_phy_toggle_automdix(tp
, true);
2776 tg3_phy_set_wirespeed(tp
);
2780 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2781 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2782 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2783 TG3_GPIO_MSG_NEED_VAUX)
2784 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2785 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2786 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2787 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2788 (TG3_GPIO_MSG_DRVR_PRES << 12))
2790 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2791 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2792 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2793 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2794 (TG3_GPIO_MSG_NEED_VAUX << 12))
2796 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2800 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2801 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2802 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2804 status
= tr32(TG3_CPMU_DRV_STATUS
);
2806 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2807 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2808 status
|= (newstat
<< shift
);
2810 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2811 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2812 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2814 tw32(TG3_CPMU_DRV_STATUS
, status
);
2816 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2819 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2821 if (!tg3_flag(tp
, IS_NIC
))
2824 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2825 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2826 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2827 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2830 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2832 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2835 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2837 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2844 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2848 if (!tg3_flag(tp
, IS_NIC
) ||
2849 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2850 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2853 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2855 tw32_wait_f(GRC_LOCAL_CTRL
,
2856 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2857 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2859 tw32_wait_f(GRC_LOCAL_CTRL
,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2863 tw32_wait_f(GRC_LOCAL_CTRL
,
2864 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2865 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2868 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2870 if (!tg3_flag(tp
, IS_NIC
))
2873 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2874 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2875 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2876 (GRC_LCLCTRL_GPIO_OE0
|
2877 GRC_LCLCTRL_GPIO_OE1
|
2878 GRC_LCLCTRL_GPIO_OE2
|
2879 GRC_LCLCTRL_GPIO_OUTPUT0
|
2880 GRC_LCLCTRL_GPIO_OUTPUT1
),
2881 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2882 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2883 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2884 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2885 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2886 GRC_LCLCTRL_GPIO_OE1
|
2887 GRC_LCLCTRL_GPIO_OE2
|
2888 GRC_LCLCTRL_GPIO_OUTPUT0
|
2889 GRC_LCLCTRL_GPIO_OUTPUT1
|
2891 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2892 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2894 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2895 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2896 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2898 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2899 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2903 u32 grc_local_ctrl
= 0;
2905 /* Workaround to prevent overdrawing Amps. */
2906 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2907 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2908 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2910 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2913 /* On 5753 and variants, GPIO2 cannot be used. */
2914 no_gpio2
= tp
->nic_sram_data_cfg
&
2915 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2917 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2918 GRC_LCLCTRL_GPIO_OE1
|
2919 GRC_LCLCTRL_GPIO_OE2
|
2920 GRC_LCLCTRL_GPIO_OUTPUT1
|
2921 GRC_LCLCTRL_GPIO_OUTPUT2
;
2923 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2924 GRC_LCLCTRL_GPIO_OUTPUT2
);
2926 tw32_wait_f(GRC_LOCAL_CTRL
,
2927 tp
->grc_local_ctrl
| grc_local_ctrl
,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2930 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2932 tw32_wait_f(GRC_LOCAL_CTRL
,
2933 tp
->grc_local_ctrl
| grc_local_ctrl
,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2937 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2938 tw32_wait_f(GRC_LOCAL_CTRL
,
2939 tp
->grc_local_ctrl
| grc_local_ctrl
,
2940 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2945 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2949 /* Serialize power state transitions */
2950 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2953 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2954 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2956 msg
= tg3_set_function_status(tp
, msg
);
2958 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2961 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2962 tg3_pwrsrc_switch_to_vaux(tp
);
2964 tg3_pwrsrc_die_with_vmain(tp
);
2967 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2970 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2972 bool need_vaux
= false;
2974 /* The GPIOs do something completely different on 57765. */
2975 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2978 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2979 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2980 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2981 tg3_frob_aux_power_5717(tp
, include_wol
?
2982 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2986 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2987 struct net_device
*dev_peer
;
2989 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2991 /* remove_one() may have been run on the peer. */
2993 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2995 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2998 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2999 tg3_flag(tp_peer
, ENABLE_ASF
))
3004 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
3005 tg3_flag(tp
, ENABLE_ASF
))
3009 tg3_pwrsrc_switch_to_vaux(tp
);
3011 tg3_pwrsrc_die_with_vmain(tp
);
3014 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3016 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3018 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3019 if (speed
!= SPEED_10
)
3021 } else if (speed
== SPEED_10
)
3027 static bool tg3_phy_power_bug(struct tg3
*tp
)
3029 switch (tg3_asic_rev(tp
)) {
3034 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3043 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3052 static bool tg3_phy_led_bug(struct tg3
*tp
)
3054 switch (tg3_asic_rev(tp
)) {
3057 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3066 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3070 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3073 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3074 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3075 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3076 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3079 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3080 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3081 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3086 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3088 val
= tr32(GRC_MISC_CFG
);
3089 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3092 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3094 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3097 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3098 tg3_writephy(tp
, MII_BMCR
,
3099 BMCR_ANENABLE
| BMCR_ANRESTART
);
3101 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3102 phytest
| MII_TG3_FET_SHADOW_EN
);
3103 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3104 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3106 MII_TG3_FET_SHDW_AUXMODE4
,
3109 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3112 } else if (do_low_power
) {
3113 if (!tg3_phy_led_bug(tp
))
3114 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3115 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3117 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3118 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3119 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3120 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3123 /* The PHY should not be powered down on some chips because
3126 if (tg3_phy_power_bug(tp
))
3129 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3130 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3131 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3132 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3133 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3134 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3137 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3140 /* tp->lock is held. */
3141 static int tg3_nvram_lock(struct tg3
*tp
)
3143 if (tg3_flag(tp
, NVRAM
)) {
3146 if (tp
->nvram_lock_cnt
== 0) {
3147 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3148 for (i
= 0; i
< 8000; i
++) {
3149 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3154 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3158 tp
->nvram_lock_cnt
++;
3163 /* tp->lock is held. */
3164 static void tg3_nvram_unlock(struct tg3
*tp
)
3166 if (tg3_flag(tp
, NVRAM
)) {
3167 if (tp
->nvram_lock_cnt
> 0)
3168 tp
->nvram_lock_cnt
--;
3169 if (tp
->nvram_lock_cnt
== 0)
3170 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3174 /* tp->lock is held. */
3175 static void tg3_enable_nvram_access(struct tg3
*tp
)
3177 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3178 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3180 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3184 /* tp->lock is held. */
3185 static void tg3_disable_nvram_access(struct tg3
*tp
)
3187 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3188 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3190 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3194 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3195 u32 offset
, u32
*val
)
3200 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3203 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3204 EEPROM_ADDR_DEVID_MASK
|
3206 tw32(GRC_EEPROM_ADDR
,
3208 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3209 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3210 EEPROM_ADDR_ADDR_MASK
) |
3211 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3213 for (i
= 0; i
< 1000; i
++) {
3214 tmp
= tr32(GRC_EEPROM_ADDR
);
3216 if (tmp
& EEPROM_ADDR_COMPLETE
)
3220 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3223 tmp
= tr32(GRC_EEPROM_DATA
);
3226 * The data will always be opposite the native endian
3227 * format. Perform a blind byteswap to compensate.
3234 #define NVRAM_CMD_TIMEOUT 10000
3236 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3240 tw32(NVRAM_CMD
, nvram_cmd
);
3241 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3242 usleep_range(10, 40);
3243 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3249 if (i
== NVRAM_CMD_TIMEOUT
)
3255 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3257 if (tg3_flag(tp
, NVRAM
) &&
3258 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3259 tg3_flag(tp
, FLASH
) &&
3260 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3261 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3263 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3264 ATMEL_AT45DB0X1B_PAGE_POS
) +
3265 (addr
% tp
->nvram_pagesize
);
3270 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3272 if (tg3_flag(tp
, NVRAM
) &&
3273 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3274 tg3_flag(tp
, FLASH
) &&
3275 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3276 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3278 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3279 tp
->nvram_pagesize
) +
3280 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3285 /* NOTE: Data read in from NVRAM is byteswapped according to
3286 * the byteswapping settings for all other register accesses.
3287 * tg3 devices are BE devices, so on a BE machine, the data
3288 * returned will be exactly as it is seen in NVRAM. On a LE
3289 * machine, the 32-bit value will be byteswapped.
3291 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3295 if (!tg3_flag(tp
, NVRAM
))
3296 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3298 offset
= tg3_nvram_phys_addr(tp
, offset
);
3300 if (offset
> NVRAM_ADDR_MSK
)
3303 ret
= tg3_nvram_lock(tp
);
3307 tg3_enable_nvram_access(tp
);
3309 tw32(NVRAM_ADDR
, offset
);
3310 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3311 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3314 *val
= tr32(NVRAM_RDDATA
);
3316 tg3_disable_nvram_access(tp
);
3318 tg3_nvram_unlock(tp
);
3323 /* Ensures NVRAM data is in bytestream format. */
3324 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3327 int res
= tg3_nvram_read(tp
, offset
, &v
);
3329 *val
= cpu_to_be32(v
);
3333 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3334 u32 offset
, u32 len
, u8
*buf
)
3339 for (i
= 0; i
< len
; i
+= 4) {
3345 memcpy(&data
, buf
+ i
, 4);
3348 * The SEEPROM interface expects the data to always be opposite
3349 * the native endian format. We accomplish this by reversing
3350 * all the operations that would have been performed on the
3351 * data from a call to tg3_nvram_read_be32().
3353 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3355 val
= tr32(GRC_EEPROM_ADDR
);
3356 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3358 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3360 tw32(GRC_EEPROM_ADDR
, val
|
3361 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3362 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3366 for (j
= 0; j
< 1000; j
++) {
3367 val
= tr32(GRC_EEPROM_ADDR
);
3369 if (val
& EEPROM_ADDR_COMPLETE
)
3373 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3382 /* offset and length are dword aligned */
3383 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3387 u32 pagesize
= tp
->nvram_pagesize
;
3388 u32 pagemask
= pagesize
- 1;
3392 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3398 u32 phy_addr
, page_off
, size
;
3400 phy_addr
= offset
& ~pagemask
;
3402 for (j
= 0; j
< pagesize
; j
+= 4) {
3403 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3404 (__be32
*) (tmp
+ j
));
3411 page_off
= offset
& pagemask
;
3418 memcpy(tmp
+ page_off
, buf
, size
);
3420 offset
= offset
+ (pagesize
- page_off
);
3422 tg3_enable_nvram_access(tp
);
3425 * Before we can erase the flash page, we need
3426 * to issue a special "write enable" command.
3428 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3430 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3433 /* Erase the target page */
3434 tw32(NVRAM_ADDR
, phy_addr
);
3436 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3437 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3439 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3442 /* Issue another write enable to start the write. */
3443 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3445 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3448 for (j
= 0; j
< pagesize
; j
+= 4) {
3451 data
= *((__be32
*) (tmp
+ j
));
3453 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3455 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3457 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3461 nvram_cmd
|= NVRAM_CMD_FIRST
;
3462 else if (j
== (pagesize
- 4))
3463 nvram_cmd
|= NVRAM_CMD_LAST
;
3465 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3473 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3474 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3487 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3488 u32 page_off
, phy_addr
, nvram_cmd
;
3491 memcpy(&data
, buf
+ i
, 4);
3492 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3494 page_off
= offset
% tp
->nvram_pagesize
;
3496 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3498 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3500 if (page_off
== 0 || i
== 0)
3501 nvram_cmd
|= NVRAM_CMD_FIRST
;
3502 if (page_off
== (tp
->nvram_pagesize
- 4))
3503 nvram_cmd
|= NVRAM_CMD_LAST
;
3506 nvram_cmd
|= NVRAM_CMD_LAST
;
3508 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3509 !tg3_flag(tp
, FLASH
) ||
3510 !tg3_flag(tp
, 57765_PLUS
))
3511 tw32(NVRAM_ADDR
, phy_addr
);
3513 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3514 !tg3_flag(tp
, 5755_PLUS
) &&
3515 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3516 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3519 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3520 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3524 if (!tg3_flag(tp
, FLASH
)) {
3525 /* We always do complete word writes to eeprom. */
3526 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3529 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3536 /* offset and length are dword aligned */
3537 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3541 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3542 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3543 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3547 if (!tg3_flag(tp
, NVRAM
)) {
3548 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3552 ret
= tg3_nvram_lock(tp
);
3556 tg3_enable_nvram_access(tp
);
3557 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3558 tw32(NVRAM_WRITE1
, 0x406);
3560 grc_mode
= tr32(GRC_MODE
);
3561 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3563 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3564 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3567 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3571 grc_mode
= tr32(GRC_MODE
);
3572 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3574 tg3_disable_nvram_access(tp
);
3575 tg3_nvram_unlock(tp
);
3578 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3579 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3586 #define RX_CPU_SCRATCH_BASE 0x30000
3587 #define RX_CPU_SCRATCH_SIZE 0x04000
3588 #define TX_CPU_SCRATCH_BASE 0x34000
3589 #define TX_CPU_SCRATCH_SIZE 0x04000
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3595 const int iters
= 10000;
3597 for (i
= 0; i
< iters
; i
++) {
3598 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3599 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3600 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3602 if (pci_channel_offline(tp
->pdev
))
3606 return (i
== iters
) ? -EBUSY
: 0;
3609 /* tp->lock is held. */
3610 static int tg3_rxcpu_pause(struct tg3
*tp
)
3612 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3614 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3615 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3621 /* tp->lock is held. */
3622 static int tg3_txcpu_pause(struct tg3
*tp
)
3624 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3627 /* tp->lock is held. */
3628 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3630 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3631 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3634 /* tp->lock is held. */
3635 static void tg3_rxcpu_resume(struct tg3
*tp
)
3637 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3640 /* tp->lock is held. */
3641 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3645 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3647 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3648 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3650 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3653 if (cpu_base
== RX_CPU_BASE
) {
3654 rc
= tg3_rxcpu_pause(tp
);
3657 * There is only an Rx CPU for the 5750 derivative in the
3660 if (tg3_flag(tp
, IS_SSB_CORE
))
3663 rc
= tg3_txcpu_pause(tp
);
3667 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3668 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3672 /* Clear firmware's nvram arbitration. */
3673 if (tg3_flag(tp
, NVRAM
))
3674 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3678 static int tg3_fw_data_len(struct tg3
*tp
,
3679 const struct tg3_firmware_hdr
*fw_hdr
)
3683 /* Non fragmented firmware have one firmware header followed by a
3684 * contiguous chunk of data to be written. The length field in that
3685 * header is not the length of data to be written but the complete
3686 * length of the bss. The data length is determined based on
3687 * tp->fw->size minus headers.
3689 * Fragmented firmware have a main header followed by multiple
3690 * fragments. Each fragment is identical to non fragmented firmware
3691 * with a firmware header followed by a contiguous chunk of data. In
3692 * the main header, the length field is unused and set to 0xffffffff.
3693 * In each fragment header the length is the entire size of that
3694 * fragment i.e. fragment data + header length. Data length is
3695 * therefore length field in the header minus TG3_FW_HDR_LEN.
3697 if (tp
->fw_len
== 0xffffffff)
3698 fw_len
= be32_to_cpu(fw_hdr
->len
);
3700 fw_len
= tp
->fw
->size
;
3702 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3705 /* tp->lock is held. */
3706 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3707 u32 cpu_scratch_base
, int cpu_scratch_size
,
3708 const struct tg3_firmware_hdr
*fw_hdr
)
3711 void (*write_op
)(struct tg3
*, u32
, u32
);
3712 int total_len
= tp
->fw
->size
;
3714 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3716 "%s: Trying to load TX cpu firmware which is 5705\n",
3721 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3722 write_op
= tg3_write_mem
;
3724 write_op
= tg3_write_indirect_reg32
;
3726 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3727 /* It is possible that bootcode is still loading at this point.
3728 * Get the nvram lock first before halting the cpu.
3730 int lock_err
= tg3_nvram_lock(tp
);
3731 err
= tg3_halt_cpu(tp
, cpu_base
);
3733 tg3_nvram_unlock(tp
);
3737 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3738 write_op(tp
, cpu_scratch_base
+ i
, 0);
3739 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3740 tw32(cpu_base
+ CPU_MODE
,
3741 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3743 /* Subtract additional main header for fragmented firmware and
3744 * advance to the first fragment
3746 total_len
-= TG3_FW_HDR_LEN
;
3751 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3752 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3753 write_op(tp
, cpu_scratch_base
+
3754 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3756 be32_to_cpu(fw_data
[i
]));
3758 total_len
-= be32_to_cpu(fw_hdr
->len
);
3760 /* Advance to next fragment */
3761 fw_hdr
= (struct tg3_firmware_hdr
*)
3762 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3763 } while (total_len
> 0);
3771 /* tp->lock is held. */
3772 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3775 const int iters
= 5;
3777 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3778 tw32_f(cpu_base
+ CPU_PC
, pc
);
3780 for (i
= 0; i
< iters
; i
++) {
3781 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3783 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3784 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3785 tw32_f(cpu_base
+ CPU_PC
, pc
);
3789 return (i
== iters
) ? -EBUSY
: 0;
3792 /* tp->lock is held. */
3793 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3795 const struct tg3_firmware_hdr
*fw_hdr
;
3798 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3800 /* Firmware blob starts with version numbers, followed by
3801 start address and length. We are setting complete length.
3802 length = end_address_of_bss - start_address_of_text.
3803 Remainder is the blob to be loaded contiguously
3804 from start address. */
3806 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3807 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3812 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3813 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3818 /* Now startup only the RX cpu. */
3819 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3820 be32_to_cpu(fw_hdr
->base_addr
));
3822 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3823 "should be %08x\n", __func__
,
3824 tr32(RX_CPU_BASE
+ CPU_PC
),
3825 be32_to_cpu(fw_hdr
->base_addr
));
3829 tg3_rxcpu_resume(tp
);
3834 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3836 const int iters
= 1000;
3840 /* Wait for boot code to complete initialization and enter service
3841 * loop. It is then safe to download service patches
3843 for (i
= 0; i
< iters
; i
++) {
3844 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3851 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3855 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3857 netdev_warn(tp
->dev
,
3858 "Other patches exist. Not downloading EEE patch\n");
3865 /* tp->lock is held. */
3866 static void tg3_load_57766_firmware(struct tg3
*tp
)
3868 struct tg3_firmware_hdr
*fw_hdr
;
3870 if (!tg3_flag(tp
, NO_NVRAM
))
3873 if (tg3_validate_rxcpu_state(tp
))
3879 /* This firmware blob has a different format than older firmware
3880 * releases as given below. The main difference is we have fragmented
3881 * data to be written to non-contiguous locations.
3883 * In the beginning we have a firmware header identical to other
3884 * firmware which consists of version, base addr and length. The length
3885 * here is unused and set to 0xffffffff.
3887 * This is followed by a series of firmware fragments which are
3888 * individually identical to previous firmware. i.e. they have the
3889 * firmware header and followed by data for that fragment. The version
3890 * field of the individual fragment header is unused.
3893 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3894 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3897 if (tg3_rxcpu_pause(tp
))
3900 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3901 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3903 tg3_rxcpu_resume(tp
);
3906 /* tp->lock is held. */
3907 static int tg3_load_tso_firmware(struct tg3
*tp
)
3909 const struct tg3_firmware_hdr
*fw_hdr
;
3910 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3913 if (!tg3_flag(tp
, FW_TSO
))
3916 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3918 /* Firmware blob starts with version numbers, followed by
3919 start address and length. We are setting complete length.
3920 length = end_address_of_bss - start_address_of_text.
3921 Remainder is the blob to be loaded contiguously
3922 from start address. */
3924 cpu_scratch_size
= tp
->fw_len
;
3926 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3927 cpu_base
= RX_CPU_BASE
;
3928 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3930 cpu_base
= TX_CPU_BASE
;
3931 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3932 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3935 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3936 cpu_scratch_base
, cpu_scratch_size
,
3941 /* Now startup the cpu. */
3942 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3943 be32_to_cpu(fw_hdr
->base_addr
));
3946 "%s fails to set CPU PC, is %08x should be %08x\n",
3947 __func__
, tr32(cpu_base
+ CPU_PC
),
3948 be32_to_cpu(fw_hdr
->base_addr
));
3952 tg3_resume_cpu(tp
, cpu_base
);
3956 /* tp->lock is held. */
3957 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3959 u32 addr_high
, addr_low
;
3961 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3962 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3963 (mac_addr
[4] << 8) | mac_addr
[5]);
3966 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3967 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3970 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3971 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3975 /* tp->lock is held. */
3976 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3981 for (i
= 0; i
< 4; i
++) {
3982 if (i
== 1 && skip_mac_1
)
3984 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3987 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3988 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3989 for (i
= 4; i
< 16; i
++)
3990 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3993 addr_high
= (tp
->dev
->dev_addr
[0] +
3994 tp
->dev
->dev_addr
[1] +
3995 tp
->dev
->dev_addr
[2] +
3996 tp
->dev
->dev_addr
[3] +
3997 tp
->dev
->dev_addr
[4] +
3998 tp
->dev
->dev_addr
[5]) &
3999 TX_BACKOFF_SEED_MASK
;
4000 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
4003 static void tg3_enable_register_access(struct tg3
*tp
)
4006 * Make sure register accesses (indirect or otherwise) will function
4009 pci_write_config_dword(tp
->pdev
,
4010 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4013 static int tg3_power_up(struct tg3
*tp
)
4017 tg3_enable_register_access(tp
);
4019 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4021 /* Switch out of Vaux if it is a NIC */
4022 tg3_pwrsrc_switch_to_vmain(tp
);
4024 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4030 static int tg3_setup_phy(struct tg3
*, bool);
4032 static int tg3_power_down_prepare(struct tg3
*tp
)
4035 bool device_should_wake
, do_low_power
;
4037 tg3_enable_register_access(tp
);
4039 /* Restore the CLKREQ setting. */
4040 if (tg3_flag(tp
, CLKREQ_BUG
))
4041 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4042 PCI_EXP_LNKCTL_CLKREQ_EN
);
4044 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4045 tw32(TG3PCI_MISC_HOST_CTRL
,
4046 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4048 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4049 tg3_flag(tp
, WOL_ENABLE
);
4051 if (tg3_flag(tp
, USE_PHYLIB
)) {
4052 do_low_power
= false;
4053 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4054 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4055 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising
) = { 0, };
4056 struct phy_device
*phydev
;
4059 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4061 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4063 tp
->link_config
.speed
= phydev
->speed
;
4064 tp
->link_config
.duplex
= phydev
->duplex
;
4065 tp
->link_config
.autoneg
= phydev
->autoneg
;
4066 ethtool_convert_link_mode_to_legacy_u32(
4067 &tp
->link_config
.advertising
,
4068 phydev
->advertising
);
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, advertising
);
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
,
4078 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4079 if (tg3_flag(tp
, WOL_SPEED_100MB
)) {
4080 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
4082 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
4084 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4087 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4092 linkmode_copy(phydev
->advertising
, advertising
);
4093 phy_start_aneg(phydev
);
4095 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4096 if (phyid
!= PHY_ID_BCMAC131
) {
4097 phyid
&= PHY_BCM_OUI_MASK
;
4098 if (phyid
== PHY_BCM_OUI_1
||
4099 phyid
== PHY_BCM_OUI_2
||
4100 phyid
== PHY_BCM_OUI_3
)
4101 do_low_power
= true;
4105 do_low_power
= true;
4107 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4108 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4110 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4111 tg3_setup_phy(tp
, false);
4114 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4117 val
= tr32(GRC_VCPU_EXT_CTRL
);
4118 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4119 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4123 for (i
= 0; i
< 200; i
++) {
4124 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4125 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4130 if (tg3_flag(tp
, WOL_CAP
))
4131 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4132 WOL_DRV_STATE_SHUTDOWN
|
4136 if (device_should_wake
) {
4139 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4141 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4142 tg3_phy_auxctl_write(tp
,
4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4144 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4150 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4151 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4152 else if (tp
->phy_flags
&
4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4154 if (tp
->link_config
.active_speed
== SPEED_1000
)
4155 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4157 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4159 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4161 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4162 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4163 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4164 SPEED_100
: SPEED_10
;
4165 if (tg3_5700_link_polarity(tp
, speed
))
4166 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4168 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4171 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4174 if (!tg3_flag(tp
, 5750_PLUS
))
4175 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4177 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4178 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4179 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4180 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4182 if (tg3_flag(tp
, ENABLE_APE
))
4183 mac_mode
|= MAC_MODE_APE_TX_EN
|
4184 MAC_MODE_APE_RX_EN
|
4185 MAC_MODE_TDE_ENABLE
;
4187 tw32_f(MAC_MODE
, mac_mode
);
4190 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4194 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4195 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4196 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4199 base_val
= tp
->pci_clock_ctrl
;
4200 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4201 CLOCK_CTRL_TXCLK_DISABLE
);
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4204 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4205 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4206 tg3_flag(tp
, CPMU_PRESENT
) ||
4207 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4209 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4210 u32 newbits1
, newbits2
;
4212 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4213 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4214 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4215 CLOCK_CTRL_TXCLK_DISABLE
|
4217 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4218 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4219 newbits1
= CLOCK_CTRL_625_CORE
;
4220 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4222 newbits1
= CLOCK_CTRL_ALTCLK
;
4223 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4232 if (!tg3_flag(tp
, 5705_PLUS
)) {
4235 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4236 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4237 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4238 CLOCK_CTRL_TXCLK_DISABLE
|
4239 CLOCK_CTRL_44MHZ_CORE
);
4241 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4244 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4245 tp
->pci_clock_ctrl
| newbits3
, 40);
4249 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4250 tg3_power_down_phy(tp
, do_low_power
);
4252 tg3_frob_aux_power(tp
, true);
4254 /* Workaround for unstable PLL clock */
4255 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4256 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4257 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4258 u32 val
= tr32(0x7d00);
4260 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4262 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4265 err
= tg3_nvram_lock(tp
);
4266 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4268 tg3_nvram_unlock(tp
);
4272 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4274 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4279 static void tg3_power_down(struct tg3
*tp
)
4281 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4282 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4285 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u32
*speed
, u8
*duplex
)
4287 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4288 case MII_TG3_AUX_STAT_10HALF
:
4290 *duplex
= DUPLEX_HALF
;
4293 case MII_TG3_AUX_STAT_10FULL
:
4295 *duplex
= DUPLEX_FULL
;
4298 case MII_TG3_AUX_STAT_100HALF
:
4300 *duplex
= DUPLEX_HALF
;
4303 case MII_TG3_AUX_STAT_100FULL
:
4305 *duplex
= DUPLEX_FULL
;
4308 case MII_TG3_AUX_STAT_1000HALF
:
4309 *speed
= SPEED_1000
;
4310 *duplex
= DUPLEX_HALF
;
4313 case MII_TG3_AUX_STAT_1000FULL
:
4314 *speed
= SPEED_1000
;
4315 *duplex
= DUPLEX_FULL
;
4319 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4320 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4322 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4326 *speed
= SPEED_UNKNOWN
;
4327 *duplex
= DUPLEX_UNKNOWN
;
4332 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4337 new_adv
= ADVERTISE_CSMA
;
4338 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4339 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4341 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4345 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4346 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4348 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4349 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4350 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4352 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4357 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4360 tw32(TG3_CPMU_EEE_MODE
,
4361 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4363 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4368 /* Advertise 100-BaseTX EEE ability */
4369 if (advertise
& ADVERTISED_100baseT_Full
)
4370 val
|= MDIO_AN_EEE_ADV_100TX
;
4371 /* Advertise 1000-BaseT EEE ability */
4372 if (advertise
& ADVERTISED_1000baseT_Full
)
4373 val
|= MDIO_AN_EEE_ADV_1000T
;
4375 if (!tp
->eee
.eee_enabled
) {
4377 tp
->eee
.advertised
= 0;
4379 tp
->eee
.advertised
= advertise
&
4380 (ADVERTISED_100baseT_Full
|
4381 ADVERTISED_1000baseT_Full
);
4384 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4388 switch (tg3_asic_rev(tp
)) {
4390 case ASIC_REV_57765
:
4391 case ASIC_REV_57766
:
4393 /* If we advertised any eee advertisements above... */
4395 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4396 MII_TG3_DSP_TAP26_RMRXSTO
|
4397 MII_TG3_DSP_TAP26_OPCSINPT
;
4398 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4402 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4403 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4404 MII_TG3_DSP_CH34TP2_HIBW01
);
4407 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4416 static void tg3_phy_copper_begin(struct tg3
*tp
)
4418 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4419 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4422 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4423 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4424 adv
= ADVERTISED_10baseT_Half
|
4425 ADVERTISED_10baseT_Full
;
4426 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4427 adv
|= ADVERTISED_100baseT_Half
|
4428 ADVERTISED_100baseT_Full
;
4429 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4430 if (!(tp
->phy_flags
&
4431 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4432 adv
|= ADVERTISED_1000baseT_Half
;
4433 adv
|= ADVERTISED_1000baseT_Full
;
4436 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4438 adv
= tp
->link_config
.advertising
;
4439 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4440 adv
&= ~(ADVERTISED_1000baseT_Half
|
4441 ADVERTISED_1000baseT_Full
);
4443 fc
= tp
->link_config
.flowctrl
;
4446 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4448 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4449 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4450 /* Normally during power down we want to autonegotiate
4451 * the lowest possible speed for WOL. However, to avoid
4452 * link flap, we leave it untouched.
4457 tg3_writephy(tp
, MII_BMCR
,
4458 BMCR_ANENABLE
| BMCR_ANRESTART
);
4461 u32 bmcr
, orig_bmcr
;
4463 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4464 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4466 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4467 /* With autoneg disabled, 5715 only links up when the
4468 * advertisement register has the configured speed
4471 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4475 switch (tp
->link_config
.speed
) {
4481 bmcr
|= BMCR_SPEED100
;
4485 bmcr
|= BMCR_SPEED1000
;
4489 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4490 bmcr
|= BMCR_FULLDPLX
;
4492 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4493 (bmcr
!= orig_bmcr
)) {
4494 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4495 for (i
= 0; i
< 1500; i
++) {
4499 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4500 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4502 if (!(tmp
& BMSR_LSTATUS
)) {
4507 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4513 static int tg3_phy_pull_config(struct tg3
*tp
)
4518 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4522 if (!(val
& BMCR_ANENABLE
)) {
4523 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4524 tp
->link_config
.advertising
= 0;
4525 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4529 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4531 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4534 tp
->link_config
.speed
= SPEED_10
;
4537 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4540 tp
->link_config
.speed
= SPEED_100
;
4542 case BMCR_SPEED1000
:
4543 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4544 tp
->link_config
.speed
= SPEED_1000
;
4552 if (val
& BMCR_FULLDPLX
)
4553 tp
->link_config
.duplex
= DUPLEX_FULL
;
4555 tp
->link_config
.duplex
= DUPLEX_HALF
;
4557 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4563 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4564 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4565 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4567 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4570 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4574 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4575 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4577 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4579 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4582 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4585 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4586 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4590 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4592 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4596 adv
= tg3_decode_flowctrl_1000X(val
);
4597 tp
->link_config
.flowctrl
= adv
;
4599 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4600 adv
= mii_adv_to_ethtool_adv_x(val
);
4603 tp
->link_config
.advertising
|= adv
;
4610 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4614 /* Turn off tap power management. */
4615 /* Set Extended packet length bit */
4616 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4618 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4619 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4620 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4621 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4622 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4629 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4631 struct ethtool_eee eee
;
4633 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4636 tg3_eee_pull_config(tp
, &eee
);
4638 if (tp
->eee
.eee_enabled
) {
4639 if (tp
->eee
.advertised
!= eee
.advertised
||
4640 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4641 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4644 /* EEE is disabled but we're advertising */
4652 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4654 u32 advmsk
, tgtadv
, advertising
;
4656 advertising
= tp
->link_config
.advertising
;
4657 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4659 advmsk
= ADVERTISE_ALL
;
4660 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4661 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4662 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4665 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4668 if ((*lcladv
& advmsk
) != tgtadv
)
4671 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4674 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4676 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4680 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4681 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4682 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4683 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4684 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4686 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4689 if (tg3_ctrl
!= tgtadv
)
4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4700 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4703 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4706 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4709 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4712 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4713 tp
->link_config
.rmt_adv
= lpeth
;
4718 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4720 if (curr_link_up
!= tp
->link_up
) {
4722 netif_carrier_on(tp
->dev
);
4724 netif_carrier_off(tp
->dev
);
4725 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4726 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4729 tg3_link_report(tp
);
4736 static void tg3_clear_mac_status(struct tg3
*tp
)
4741 MAC_STATUS_SYNC_CHANGED
|
4742 MAC_STATUS_CFG_CHANGED
|
4743 MAC_STATUS_MI_COMPLETION
|
4744 MAC_STATUS_LNKSTATE_CHANGED
);
4748 static void tg3_setup_eee(struct tg3
*tp
)
4752 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4753 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4754 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4755 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4759 tw32_f(TG3_CPMU_EEE_CTRL
,
4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4762 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4763 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4764 TG3_CPMU_EEEMD_LPI_IN_RX
|
4765 TG3_CPMU_EEEMD_EEE_ENABLE
;
4767 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4768 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4770 if (tg3_flag(tp
, ENABLE_APE
))
4771 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4773 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4775 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4777 (tp
->eee
.tx_lpi_timer
& 0xffff));
4779 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4780 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4784 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4786 bool current_link_up
;
4788 u32 lcl_adv
, rmt_adv
;
4793 tg3_clear_mac_status(tp
);
4795 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4797 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4801 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4803 /* Some third-party PHYs need to be reset on link going
4806 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4807 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4808 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4810 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4811 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4812 !(bmsr
& BMSR_LSTATUS
))
4818 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4819 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4820 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4821 !tg3_flag(tp
, INIT_COMPLETE
))
4824 if (!(bmsr
& BMSR_LSTATUS
)) {
4825 err
= tg3_init_5401phy_dsp(tp
);
4829 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4830 for (i
= 0; i
< 1000; i
++) {
4832 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4833 (bmsr
& BMSR_LSTATUS
)) {
4839 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4840 TG3_PHY_REV_BCM5401_B0
&&
4841 !(bmsr
& BMSR_LSTATUS
) &&
4842 tp
->link_config
.active_speed
== SPEED_1000
) {
4843 err
= tg3_phy_reset(tp
);
4845 err
= tg3_init_5401phy_dsp(tp
);
4850 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4851 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4852 /* 5701 {A0,B0} CRC bug workaround */
4853 tg3_writephy(tp
, 0x15, 0x0a75);
4854 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4855 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4856 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4859 /* Clear pending interrupts... */
4860 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4861 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4863 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4864 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4865 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4866 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4868 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4869 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4870 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4871 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4874 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4877 current_link_up
= false;
4878 current_speed
= SPEED_UNKNOWN
;
4879 current_duplex
= DUPLEX_UNKNOWN
;
4880 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4881 tp
->link_config
.rmt_adv
= 0;
4883 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4884 err
= tg3_phy_auxctl_read(tp
,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4887 if (!err
&& !(val
& (1 << 10))) {
4888 tg3_phy_auxctl_write(tp
,
4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4896 for (i
= 0; i
< 100; i
++) {
4897 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4898 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4899 (bmsr
& BMSR_LSTATUS
))
4904 if (bmsr
& BMSR_LSTATUS
) {
4907 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4908 for (i
= 0; i
< 2000; i
++) {
4910 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4915 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4920 for (i
= 0; i
< 200; i
++) {
4921 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4922 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4924 if (bmcr
&& bmcr
!= 0x7fff)
4932 tp
->link_config
.active_speed
= current_speed
;
4933 tp
->link_config
.active_duplex
= current_duplex
;
4935 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4936 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4938 if ((bmcr
& BMCR_ANENABLE
) &&
4940 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4941 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4942 current_link_up
= true;
4944 /* EEE settings changes take effect only after a phy
4945 * reset. If we have skipped a reset due to Link Flap
4946 * Avoidance being enabled, do it now.
4948 if (!eee_config_ok
&&
4949 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4955 if (!(bmcr
& BMCR_ANENABLE
) &&
4956 tp
->link_config
.speed
== current_speed
&&
4957 tp
->link_config
.duplex
== current_duplex
) {
4958 current_link_up
= true;
4962 if (current_link_up
&&
4963 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4966 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4967 reg
= MII_TG3_FET_GEN_STAT
;
4968 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4970 reg
= MII_TG3_EXT_STAT
;
4971 bit
= MII_TG3_EXT_STAT_MDIX
;
4974 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4975 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4977 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4982 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4983 tg3_phy_copper_begin(tp
);
4985 if (tg3_flag(tp
, ROBOSWITCH
)) {
4986 current_link_up
= true;
4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 current_speed
= SPEED_1000
;
4989 current_duplex
= DUPLEX_FULL
;
4990 tp
->link_config
.active_speed
= current_speed
;
4991 tp
->link_config
.active_duplex
= current_duplex
;
4994 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4995 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4996 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4997 current_link_up
= true;
5000 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5001 if (current_link_up
) {
5002 if (tp
->link_config
.active_speed
== SPEED_100
||
5003 tp
->link_config
.active_speed
== SPEED_10
)
5004 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5006 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5007 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
5008 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5010 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5012 /* In order for the 5750 core in BCM4785 chip to work properly
5013 * in RGMII mode, the Led Control Register must be set up.
5015 if (tg3_flag(tp
, RGMII_MODE
)) {
5016 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5017 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5019 if (tp
->link_config
.active_speed
== SPEED_10
)
5020 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5021 else if (tp
->link_config
.active_speed
== SPEED_100
)
5022 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5023 LED_CTRL_100MBPS_ON
);
5024 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5025 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5026 LED_CTRL_1000MBPS_ON
);
5028 tw32(MAC_LED_CTRL
, led_ctrl
);
5032 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5033 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5034 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5036 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5037 if (current_link_up
&&
5038 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5039 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5041 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5044 /* ??? Without this setting Netgear GA302T PHY does not
5045 * ??? send/receive packets...
5047 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5048 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5049 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5050 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5054 tw32_f(MAC_MODE
, tp
->mac_mode
);
5057 tg3_phy_eee_adjust(tp
, current_link_up
);
5059 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5060 /* Polled via timer. */
5061 tw32_f(MAC_EVENT
, 0);
5063 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5067 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5069 tp
->link_config
.active_speed
== SPEED_1000
&&
5070 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5073 (MAC_STATUS_SYNC_CHANGED
|
5074 MAC_STATUS_CFG_CHANGED
));
5077 NIC_SRAM_FIRMWARE_MBOX
,
5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5081 /* Prevent send BD corruption. */
5082 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5083 if (tp
->link_config
.active_speed
== SPEED_100
||
5084 tp
->link_config
.active_speed
== SPEED_10
)
5085 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5086 PCI_EXP_LNKCTL_CLKREQ_EN
);
5088 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5089 PCI_EXP_LNKCTL_CLKREQ_EN
);
5092 tg3_test_and_report_link_chg(tp
, current_link_up
);
5097 struct tg3_fiber_aneginfo
{
5099 #define ANEG_STATE_UNKNOWN 0
5100 #define ANEG_STATE_AN_ENABLE 1
5101 #define ANEG_STATE_RESTART_INIT 2
5102 #define ANEG_STATE_RESTART 3
5103 #define ANEG_STATE_DISABLE_LINK_OK 4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5105 #define ANEG_STATE_ABILITY_DETECT 6
5106 #define ANEG_STATE_ACK_DETECT_INIT 7
5107 #define ANEG_STATE_ACK_DETECT 8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5109 #define ANEG_STATE_COMPLETE_ACK 10
5110 #define ANEG_STATE_IDLE_DETECT_INIT 11
5111 #define ANEG_STATE_IDLE_DETECT 12
5112 #define ANEG_STATE_LINK_OK 13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5117 #define MR_AN_ENABLE 0x00000001
5118 #define MR_RESTART_AN 0x00000002
5119 #define MR_AN_COMPLETE 0x00000004
5120 #define MR_PAGE_RX 0x00000008
5121 #define MR_NP_LOADED 0x00000010
5122 #define MR_TOGGLE_TX 0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5130 #define MR_TOGGLE_RX 0x00002000
5131 #define MR_NP_RX 0x00004000
5133 #define MR_LINK_OK 0x80000000
5135 unsigned long link_time
, cur_time
;
5137 u32 ability_match_cfg
;
5138 int ability_match_count
;
5140 char ability_match
, idle_match
, ack_match
;
5142 u32 txconfig
, rxconfig
;
5143 #define ANEG_CFG_NP 0x00000080
5144 #define ANEG_CFG_ACK 0x00000040
5145 #define ANEG_CFG_RF2 0x00000020
5146 #define ANEG_CFG_RF1 0x00000010
5147 #define ANEG_CFG_PS2 0x00000001
5148 #define ANEG_CFG_PS1 0x00008000
5149 #define ANEG_CFG_HD 0x00004000
5150 #define ANEG_CFG_FD 0x00002000
5151 #define ANEG_CFG_INVAL 0x00001f06
5156 #define ANEG_TIMER_ENAB 2
5157 #define ANEG_FAILED -1
5159 #define ANEG_STATE_SETTLE_TIME 10000
5161 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5162 struct tg3_fiber_aneginfo
*ap
)
5165 unsigned long delta
;
5169 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5173 ap
->ability_match_cfg
= 0;
5174 ap
->ability_match_count
= 0;
5175 ap
->ability_match
= 0;
5181 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5182 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5184 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5185 ap
->ability_match_cfg
= rx_cfg_reg
;
5186 ap
->ability_match
= 0;
5187 ap
->ability_match_count
= 0;
5189 if (++ap
->ability_match_count
> 1) {
5190 ap
->ability_match
= 1;
5191 ap
->ability_match_cfg
= rx_cfg_reg
;
5194 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5202 ap
->ability_match_cfg
= 0;
5203 ap
->ability_match_count
= 0;
5204 ap
->ability_match
= 0;
5210 ap
->rxconfig
= rx_cfg_reg
;
5213 switch (ap
->state
) {
5214 case ANEG_STATE_UNKNOWN
:
5215 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5216 ap
->state
= ANEG_STATE_AN_ENABLE
;
5219 case ANEG_STATE_AN_ENABLE
:
5220 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5221 if (ap
->flags
& MR_AN_ENABLE
) {
5224 ap
->ability_match_cfg
= 0;
5225 ap
->ability_match_count
= 0;
5226 ap
->ability_match
= 0;
5230 ap
->state
= ANEG_STATE_RESTART_INIT
;
5232 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5236 case ANEG_STATE_RESTART_INIT
:
5237 ap
->link_time
= ap
->cur_time
;
5238 ap
->flags
&= ~(MR_NP_LOADED
);
5240 tw32(MAC_TX_AUTO_NEG
, 0);
5241 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5242 tw32_f(MAC_MODE
, tp
->mac_mode
);
5245 ret
= ANEG_TIMER_ENAB
;
5246 ap
->state
= ANEG_STATE_RESTART
;
5249 case ANEG_STATE_RESTART
:
5250 delta
= ap
->cur_time
- ap
->link_time
;
5251 if (delta
> ANEG_STATE_SETTLE_TIME
)
5252 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5254 ret
= ANEG_TIMER_ENAB
;
5257 case ANEG_STATE_DISABLE_LINK_OK
:
5261 case ANEG_STATE_ABILITY_DETECT_INIT
:
5262 ap
->flags
&= ~(MR_TOGGLE_TX
);
5263 ap
->txconfig
= ANEG_CFG_FD
;
5264 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5265 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5266 ap
->txconfig
|= ANEG_CFG_PS1
;
5267 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5268 ap
->txconfig
|= ANEG_CFG_PS2
;
5269 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5270 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5271 tw32_f(MAC_MODE
, tp
->mac_mode
);
5274 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5277 case ANEG_STATE_ABILITY_DETECT
:
5278 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5279 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5282 case ANEG_STATE_ACK_DETECT_INIT
:
5283 ap
->txconfig
|= ANEG_CFG_ACK
;
5284 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5285 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5286 tw32_f(MAC_MODE
, tp
->mac_mode
);
5289 ap
->state
= ANEG_STATE_ACK_DETECT
;
5292 case ANEG_STATE_ACK_DETECT
:
5293 if (ap
->ack_match
!= 0) {
5294 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5295 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5296 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5298 ap
->state
= ANEG_STATE_AN_ENABLE
;
5300 } else if (ap
->ability_match
!= 0 &&
5301 ap
->rxconfig
== 0) {
5302 ap
->state
= ANEG_STATE_AN_ENABLE
;
5306 case ANEG_STATE_COMPLETE_ACK_INIT
:
5307 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5311 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5312 MR_LP_ADV_HALF_DUPLEX
|
5313 MR_LP_ADV_SYM_PAUSE
|
5314 MR_LP_ADV_ASYM_PAUSE
|
5315 MR_LP_ADV_REMOTE_FAULT1
|
5316 MR_LP_ADV_REMOTE_FAULT2
|
5317 MR_LP_ADV_NEXT_PAGE
|
5320 if (ap
->rxconfig
& ANEG_CFG_FD
)
5321 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5322 if (ap
->rxconfig
& ANEG_CFG_HD
)
5323 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5324 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5325 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5326 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5327 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5328 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5329 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5330 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5331 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5332 if (ap
->rxconfig
& ANEG_CFG_NP
)
5333 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5335 ap
->link_time
= ap
->cur_time
;
5337 ap
->flags
^= (MR_TOGGLE_TX
);
5338 if (ap
->rxconfig
& 0x0008)
5339 ap
->flags
|= MR_TOGGLE_RX
;
5340 if (ap
->rxconfig
& ANEG_CFG_NP
)
5341 ap
->flags
|= MR_NP_RX
;
5342 ap
->flags
|= MR_PAGE_RX
;
5344 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5345 ret
= ANEG_TIMER_ENAB
;
5348 case ANEG_STATE_COMPLETE_ACK
:
5349 if (ap
->ability_match
!= 0 &&
5350 ap
->rxconfig
== 0) {
5351 ap
->state
= ANEG_STATE_AN_ENABLE
;
5354 delta
= ap
->cur_time
- ap
->link_time
;
5355 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5356 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5357 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5359 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5360 !(ap
->flags
& MR_NP_RX
)) {
5361 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5369 case ANEG_STATE_IDLE_DETECT_INIT
:
5370 ap
->link_time
= ap
->cur_time
;
5371 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5372 tw32_f(MAC_MODE
, tp
->mac_mode
);
5375 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5376 ret
= ANEG_TIMER_ENAB
;
5379 case ANEG_STATE_IDLE_DETECT
:
5380 if (ap
->ability_match
!= 0 &&
5381 ap
->rxconfig
== 0) {
5382 ap
->state
= ANEG_STATE_AN_ENABLE
;
5385 delta
= ap
->cur_time
- ap
->link_time
;
5386 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5387 /* XXX another gem from the Broadcom driver :( */
5388 ap
->state
= ANEG_STATE_LINK_OK
;
5392 case ANEG_STATE_LINK_OK
:
5393 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5398 /* ??? unimplemented */
5401 case ANEG_STATE_NEXT_PAGE_WAIT
:
5402 /* ??? unimplemented */
5413 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5416 struct tg3_fiber_aneginfo aninfo
;
5417 int status
= ANEG_FAILED
;
5421 tw32_f(MAC_TX_AUTO_NEG
, 0);
5423 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5424 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5427 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5430 memset(&aninfo
, 0, sizeof(aninfo
));
5431 aninfo
.flags
|= MR_AN_ENABLE
;
5432 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5433 aninfo
.cur_time
= 0;
5435 while (++tick
< 195000) {
5436 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5437 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5443 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5444 tw32_f(MAC_MODE
, tp
->mac_mode
);
5447 *txflags
= aninfo
.txconfig
;
5448 *rxflags
= aninfo
.flags
;
5450 if (status
== ANEG_DONE
&&
5451 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5452 MR_LP_ADV_FULL_DUPLEX
)))
5458 static void tg3_init_bcm8002(struct tg3
*tp
)
5460 u32 mac_status
= tr32(MAC_STATUS
);
5463 /* Reset when initting first time or we have a link. */
5464 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5465 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5468 /* Set PLL lock range. */
5469 tg3_writephy(tp
, 0x16, 0x8007);
5472 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5474 /* Wait for reset to complete. */
5475 /* XXX schedule_timeout() ... */
5476 for (i
= 0; i
< 500; i
++)
5479 /* Config mode; select PMA/Ch 1 regs. */
5480 tg3_writephy(tp
, 0x10, 0x8411);
5482 /* Enable auto-lock and comdet, select txclk for tx. */
5483 tg3_writephy(tp
, 0x11, 0x0a10);
5485 tg3_writephy(tp
, 0x18, 0x00a0);
5486 tg3_writephy(tp
, 0x16, 0x41ff);
5488 /* Assert and deassert POR. */
5489 tg3_writephy(tp
, 0x13, 0x0400);
5491 tg3_writephy(tp
, 0x13, 0x0000);
5493 tg3_writephy(tp
, 0x11, 0x0a50);
5495 tg3_writephy(tp
, 0x11, 0x0a10);
5497 /* Wait for signal to stabilize */
5498 /* XXX schedule_timeout() ... */
5499 for (i
= 0; i
< 15000; i
++)
5502 /* Deselect the channel register so we can read the PHYID
5505 tg3_writephy(tp
, 0x10, 0x8011);
5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5511 bool current_link_up
;
5512 u32 sg_dig_ctrl
, sg_dig_status
;
5513 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5514 int workaround
, port_a
;
5517 expected_sg_dig_ctrl
= 0;
5520 current_link_up
= false;
5522 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5523 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5525 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 /* preserve bits 20-23 for voltage regulator */
5530 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5533 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5535 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5536 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5538 u32 val
= serdes_cfg
;
5544 tw32_f(MAC_SERDES_CFG
, val
);
5547 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5549 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5550 tg3_setup_flow_control(tp
, 0, 0);
5551 current_link_up
= true;
5556 /* Want auto-negotiation. */
5557 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5559 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5560 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5561 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5562 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5563 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5565 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5566 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5567 tp
->serdes_counter
&&
5568 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5569 MAC_STATUS_RCVD_CFG
)) ==
5570 MAC_STATUS_PCS_SYNCED
)) {
5571 tp
->serdes_counter
--;
5572 current_link_up
= true;
5577 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5578 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5580 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5582 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5583 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5584 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5585 MAC_STATUS_SIGNAL_DET
)) {
5586 sg_dig_status
= tr32(SG_DIG_STATUS
);
5587 mac_status
= tr32(MAC_STATUS
);
5589 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5590 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5591 u32 local_adv
= 0, remote_adv
= 0;
5593 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5594 local_adv
|= ADVERTISE_1000XPAUSE
;
5595 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5596 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5598 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5599 remote_adv
|= LPA_1000XPAUSE
;
5600 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5601 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5603 tp
->link_config
.rmt_adv
=
5604 mii_adv_to_ethtool_adv_x(remote_adv
);
5606 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5607 current_link_up
= true;
5608 tp
->serdes_counter
= 0;
5609 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5610 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5611 if (tp
->serdes_counter
)
5612 tp
->serdes_counter
--;
5615 u32 val
= serdes_cfg
;
5622 tw32_f(MAC_SERDES_CFG
, val
);
5625 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5628 /* Link parallel detection - link is up */
5629 /* only if we have PCS_SYNC and not */
5630 /* receiving config code words */
5631 mac_status
= tr32(MAC_STATUS
);
5632 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5633 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5634 tg3_setup_flow_control(tp
, 0, 0);
5635 current_link_up
= true;
5637 TG3_PHYFLG_PARALLEL_DETECT
;
5638 tp
->serdes_counter
=
5639 SERDES_PARALLEL_DET_TIMEOUT
;
5641 goto restart_autoneg
;
5645 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5646 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5650 return current_link_up
;
5653 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5655 bool current_link_up
= false;
5657 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5660 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5661 u32 txflags
, rxflags
;
5664 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5665 u32 local_adv
= 0, remote_adv
= 0;
5667 if (txflags
& ANEG_CFG_PS1
)
5668 local_adv
|= ADVERTISE_1000XPAUSE
;
5669 if (txflags
& ANEG_CFG_PS2
)
5670 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5672 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5673 remote_adv
|= LPA_1000XPAUSE
;
5674 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5675 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5677 tp
->link_config
.rmt_adv
=
5678 mii_adv_to_ethtool_adv_x(remote_adv
);
5680 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5682 current_link_up
= true;
5684 for (i
= 0; i
< 30; i
++) {
5687 (MAC_STATUS_SYNC_CHANGED
|
5688 MAC_STATUS_CFG_CHANGED
));
5690 if ((tr32(MAC_STATUS
) &
5691 (MAC_STATUS_SYNC_CHANGED
|
5692 MAC_STATUS_CFG_CHANGED
)) == 0)
5696 mac_status
= tr32(MAC_STATUS
);
5697 if (!current_link_up
&&
5698 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5699 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5700 current_link_up
= true;
5702 tg3_setup_flow_control(tp
, 0, 0);
5704 /* Forcing 1000FD link up. */
5705 current_link_up
= true;
5707 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5710 tw32_f(MAC_MODE
, tp
->mac_mode
);
5715 return current_link_up
;
5718 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5721 u32 orig_active_speed
;
5722 u8 orig_active_duplex
;
5724 bool current_link_up
;
5727 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5728 orig_active_speed
= tp
->link_config
.active_speed
;
5729 orig_active_duplex
= tp
->link_config
.active_duplex
;
5731 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5733 tg3_flag(tp
, INIT_COMPLETE
)) {
5734 mac_status
= tr32(MAC_STATUS
);
5735 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5736 MAC_STATUS_SIGNAL_DET
|
5737 MAC_STATUS_CFG_CHANGED
|
5738 MAC_STATUS_RCVD_CFG
);
5739 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5740 MAC_STATUS_SIGNAL_DET
)) {
5741 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5742 MAC_STATUS_CFG_CHANGED
));
5747 tw32_f(MAC_TX_AUTO_NEG
, 0);
5749 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5750 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5751 tw32_f(MAC_MODE
, tp
->mac_mode
);
5754 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5755 tg3_init_bcm8002(tp
);
5757 /* Enable link change event even when serdes polling. */
5758 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5761 current_link_up
= false;
5762 tp
->link_config
.rmt_adv
= 0;
5763 mac_status
= tr32(MAC_STATUS
);
5765 if (tg3_flag(tp
, HW_AUTONEG
))
5766 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5768 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5770 tp
->napi
[0].hw_status
->status
=
5771 (SD_STATUS_UPDATED
|
5772 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5774 for (i
= 0; i
< 100; i
++) {
5775 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5776 MAC_STATUS_CFG_CHANGED
));
5778 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5779 MAC_STATUS_CFG_CHANGED
|
5780 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5784 mac_status
= tr32(MAC_STATUS
);
5785 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5786 current_link_up
= false;
5787 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5788 tp
->serdes_counter
== 0) {
5789 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5790 MAC_MODE_SEND_CONFIGS
));
5792 tw32_f(MAC_MODE
, tp
->mac_mode
);
5796 if (current_link_up
) {
5797 tp
->link_config
.active_speed
= SPEED_1000
;
5798 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5799 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5800 LED_CTRL_LNKLED_OVERRIDE
|
5801 LED_CTRL_1000MBPS_ON
));
5803 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5804 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5805 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5806 LED_CTRL_LNKLED_OVERRIDE
|
5807 LED_CTRL_TRAFFIC_OVERRIDE
));
5810 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5811 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5812 if (orig_pause_cfg
!= now_pause_cfg
||
5813 orig_active_speed
!= tp
->link_config
.active_speed
||
5814 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5815 tg3_link_report(tp
);
5821 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5825 u32 current_speed
= SPEED_UNKNOWN
;
5826 u8 current_duplex
= DUPLEX_UNKNOWN
;
5827 bool current_link_up
= false;
5828 u32 local_adv
, remote_adv
, sgsr
;
5830 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5831 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5832 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5833 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5838 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5840 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5841 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5843 current_link_up
= true;
5844 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5845 current_speed
= SPEED_1000
;
5846 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5847 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5848 current_speed
= SPEED_100
;
5849 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5851 current_speed
= SPEED_10
;
5852 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5855 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5856 current_duplex
= DUPLEX_FULL
;
5858 current_duplex
= DUPLEX_HALF
;
5861 tw32_f(MAC_MODE
, tp
->mac_mode
);
5864 tg3_clear_mac_status(tp
);
5866 goto fiber_setup_done
;
5869 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5870 tw32_f(MAC_MODE
, tp
->mac_mode
);
5873 tg3_clear_mac_status(tp
);
5878 tp
->link_config
.rmt_adv
= 0;
5880 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5881 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5882 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5883 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5884 bmsr
|= BMSR_LSTATUS
;
5886 bmsr
&= ~BMSR_LSTATUS
;
5889 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5891 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5892 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5893 /* do nothing, just check for link up at the end */
5894 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5897 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5898 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5899 ADVERTISE_1000XPAUSE
|
5900 ADVERTISE_1000XPSE_ASYM
|
5903 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5904 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5906 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5907 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5908 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5909 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5911 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5912 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5913 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5920 bmcr
&= ~BMCR_SPEED1000
;
5921 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5923 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5924 new_bmcr
|= BMCR_FULLDPLX
;
5926 if (new_bmcr
!= bmcr
) {
5927 /* BMCR_SPEED1000 is a reserved bit that needs
5928 * to be set on write.
5930 new_bmcr
|= BMCR_SPEED1000
;
5932 /* Force a linkdown */
5936 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5937 adv
&= ~(ADVERTISE_1000XFULL
|
5938 ADVERTISE_1000XHALF
|
5940 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5941 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5945 tg3_carrier_off(tp
);
5947 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5949 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5950 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5951 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5952 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5953 bmsr
|= BMSR_LSTATUS
;
5955 bmsr
&= ~BMSR_LSTATUS
;
5957 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5961 if (bmsr
& BMSR_LSTATUS
) {
5962 current_speed
= SPEED_1000
;
5963 current_link_up
= true;
5964 if (bmcr
& BMCR_FULLDPLX
)
5965 current_duplex
= DUPLEX_FULL
;
5967 current_duplex
= DUPLEX_HALF
;
5972 if (bmcr
& BMCR_ANENABLE
) {
5975 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5976 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5977 common
= local_adv
& remote_adv
;
5978 if (common
& (ADVERTISE_1000XHALF
|
5979 ADVERTISE_1000XFULL
)) {
5980 if (common
& ADVERTISE_1000XFULL
)
5981 current_duplex
= DUPLEX_FULL
;
5983 current_duplex
= DUPLEX_HALF
;
5985 tp
->link_config
.rmt_adv
=
5986 mii_adv_to_ethtool_adv_x(remote_adv
);
5987 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5988 /* Link is up via parallel detect */
5990 current_link_up
= false;
5996 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5997 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5999 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6000 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6001 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
6003 tw32_f(MAC_MODE
, tp
->mac_mode
);
6006 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
6008 tp
->link_config
.active_speed
= current_speed
;
6009 tp
->link_config
.active_duplex
= current_duplex
;
6011 tg3_test_and_report_link_chg(tp
, current_link_up
);
6015 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6017 if (tp
->serdes_counter
) {
6018 /* Give autoneg time to complete. */
6019 tp
->serdes_counter
--;
6024 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6027 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6028 if (bmcr
& BMCR_ANENABLE
) {
6031 /* Select shadow register 0x1f */
6032 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6033 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6037 MII_TG3_DSP_EXP1_INT_STAT
);
6038 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6039 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6041 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6042 /* We have signal detect and not receiving
6043 * config code words, link is up by parallel
6047 bmcr
&= ~BMCR_ANENABLE
;
6048 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6049 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6050 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6053 } else if (tp
->link_up
&&
6054 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6055 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6058 /* Select expansion interrupt status register */
6059 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6060 MII_TG3_DSP_EXP1_INT_STAT
);
6061 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6065 /* Config code words received, turn on autoneg. */
6066 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6067 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6069 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6075 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6080 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6081 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6082 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6083 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6085 err
= tg3_setup_copper_phy(tp
, force_reset
);
6087 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6090 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6091 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6093 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6098 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6099 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6100 tw32(GRC_MISC_CFG
, val
);
6103 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6104 (6 << TX_LENGTHS_IPG_SHIFT
);
6105 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6106 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6107 val
|= tr32(MAC_TX_LENGTHS
) &
6108 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6109 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6111 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6112 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6113 tw32(MAC_TX_LENGTHS
, val
|
6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6116 tw32(MAC_TX_LENGTHS
, val
|
6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6119 if (!tg3_flag(tp
, 5705_PLUS
)) {
6121 tw32(HOSTCC_STAT_COAL_TICKS
,
6122 tp
->coal
.stats_block_coalesce_usecs
);
6124 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6128 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6129 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6131 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6134 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6135 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6141 /* tp->lock must be held */
6142 static u64
tg3_refclk_read(struct tg3
*tp
, struct ptp_system_timestamp
*sts
)
6146 ptp_read_system_prets(sts
);
6147 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6148 ptp_read_system_postts(sts
);
6149 stamp
|= (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6154 /* tp->lock must be held */
6155 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6157 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6159 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6160 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6161 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6162 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6165 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6166 static inline void tg3_full_unlock(struct tg3
*tp
);
6167 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6169 struct tg3
*tp
= netdev_priv(dev
);
6171 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6172 SOF_TIMESTAMPING_RX_SOFTWARE
|
6173 SOF_TIMESTAMPING_SOFTWARE
;
6175 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6176 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6177 SOF_TIMESTAMPING_RX_HARDWARE
|
6178 SOF_TIMESTAMPING_RAW_HARDWARE
;
6182 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6184 info
->phc_index
= -1;
6186 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6188 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6189 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6190 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6191 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6195 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6197 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6198 bool neg_adj
= false;
6206 /* Frequency adjustment is performed using hardware with a 24 bit
6207 * accumulator and a programmable correction value. On each clk, the
6208 * correction value gets added to the accumulator and when it
6209 * overflows, the time counter is incremented/decremented.
6211 * So conversion from ppb to correction value is
6212 * ppb * (1 << 24) / 1000000000
6214 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6215 TG3_EAV_REF_CLK_CORRECT_MASK
;
6217 tg3_full_lock(tp
, 0);
6220 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6221 TG3_EAV_REF_CLK_CORRECT_EN
|
6222 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6224 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6226 tg3_full_unlock(tp
);
6231 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6233 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6235 tg3_full_lock(tp
, 0);
6236 tp
->ptp_adjust
+= delta
;
6237 tg3_full_unlock(tp
);
6242 static int tg3_ptp_gettimex(struct ptp_clock_info
*ptp
, struct timespec64
*ts
,
6243 struct ptp_system_timestamp
*sts
)
6246 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6248 tg3_full_lock(tp
, 0);
6249 ns
= tg3_refclk_read(tp
, sts
);
6250 ns
+= tp
->ptp_adjust
;
6251 tg3_full_unlock(tp
);
6253 *ts
= ns_to_timespec64(ns
);
6258 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6259 const struct timespec64
*ts
)
6262 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6264 ns
= timespec64_to_ns(ts
);
6266 tg3_full_lock(tp
, 0);
6267 tg3_refclk_write(tp
, ns
);
6269 tg3_full_unlock(tp
);
6274 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6275 struct ptp_clock_request
*rq
, int on
)
6277 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6282 case PTP_CLK_REQ_PEROUT
:
6283 /* Reject requests with unsupported flags */
6284 if (rq
->perout
.flags
)
6287 if (rq
->perout
.index
!= 0)
6290 tg3_full_lock(tp
, 0);
6291 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6292 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6297 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6298 rq
->perout
.start
.nsec
;
6300 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6301 netdev_warn(tp
->dev
,
6302 "Device supports only a one-shot timesync output, period must be 0\n");
6307 if (nsec
& (1ULL << 63)) {
6308 netdev_warn(tp
->dev
,
6309 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6314 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6315 tw32(TG3_EAV_WATCHDOG0_MSB
,
6316 TG3_EAV_WATCHDOG0_EN
|
6317 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6319 tw32(TG3_EAV_REF_CLCK_CTL
,
6320 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6322 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6323 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6327 tg3_full_unlock(tp
);
6337 static const struct ptp_clock_info tg3_ptp_caps
= {
6338 .owner
= THIS_MODULE
,
6339 .name
= "tg3 clock",
6340 .max_adj
= 250000000,
6346 .adjfreq
= tg3_ptp_adjfreq
,
6347 .adjtime
= tg3_ptp_adjtime
,
6348 .gettimex64
= tg3_ptp_gettimex
,
6349 .settime64
= tg3_ptp_settime
,
6350 .enable
= tg3_ptp_enable
,
6353 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6354 struct skb_shared_hwtstamps
*timestamp
)
6356 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6357 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6361 /* tp->lock must be held */
6362 static void tg3_ptp_init(struct tg3
*tp
)
6364 if (!tg3_flag(tp
, PTP_CAPABLE
))
6367 /* Initialize the hardware clock to the system time. */
6368 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6370 tp
->ptp_info
= tg3_ptp_caps
;
6373 /* tp->lock must be held */
6374 static void tg3_ptp_resume(struct tg3
*tp
)
6376 if (!tg3_flag(tp
, PTP_CAPABLE
))
6379 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6383 static void tg3_ptp_fini(struct tg3
*tp
)
6385 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6388 ptp_clock_unregister(tp
->ptp_clock
);
6389 tp
->ptp_clock
= NULL
;
6393 static inline int tg3_irq_sync(struct tg3
*tp
)
6395 return tp
->irq_sync
;
6398 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6402 dst
= (u32
*)((u8
*)dst
+ off
);
6403 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6404 *dst
++ = tr32(off
+ i
);
6407 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6409 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6410 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6411 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6412 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6413 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6414 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6415 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6416 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6417 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6418 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6419 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6420 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6421 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6422 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6423 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6424 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6425 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6426 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6427 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6429 if (tg3_flag(tp
, SUPPORT_MSIX
))
6430 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6432 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6433 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6434 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6435 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6436 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6437 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6438 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6439 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6441 if (!tg3_flag(tp
, 5705_PLUS
)) {
6442 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6443 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6444 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6447 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6448 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6449 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6450 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6451 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6453 if (tg3_flag(tp
, NVRAM
))
6454 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6457 static void tg3_dump_state(struct tg3
*tp
)
6462 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6466 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6467 /* Read up to but not including private PCI registers */
6468 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6469 regs
[i
/ sizeof(u32
)] = tr32(i
);
6471 tg3_dump_legacy_regs(tp
, regs
);
6473 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6474 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6475 !regs
[i
+ 2] && !regs
[i
+ 3])
6478 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6480 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6485 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6486 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6488 /* SW status block */
6490 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6492 tnapi
->hw_status
->status
,
6493 tnapi
->hw_status
->status_tag
,
6494 tnapi
->hw_status
->rx_jumbo_consumer
,
6495 tnapi
->hw_status
->rx_consumer
,
6496 tnapi
->hw_status
->rx_mini_consumer
,
6497 tnapi
->hw_status
->idx
[0].rx_producer
,
6498 tnapi
->hw_status
->idx
[0].tx_consumer
);
6501 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6503 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6504 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6506 tnapi
->prodring
.rx_std_prod_idx
,
6507 tnapi
->prodring
.rx_std_cons_idx
,
6508 tnapi
->prodring
.rx_jmb_prod_idx
,
6509 tnapi
->prodring
.rx_jmb_cons_idx
);
6513 /* This is called whenever we suspect that the system chipset is re-
6514 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6515 * is bogus tx completions. We try to recover by setting the
6516 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6519 static void tg3_tx_recover(struct tg3
*tp
)
6521 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6522 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6524 netdev_warn(tp
->dev
,
6525 "The system may be re-ordering memory-mapped I/O "
6526 "cycles to the network device, attempting to recover. "
6527 "Please report the problem to the driver maintainer "
6528 "and include system chipset information.\n");
6530 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6533 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6535 /* Tell compiler to fetch tx indices from memory. */
6537 return tnapi
->tx_pending
-
6538 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6541 /* Tigon3 never reports partial packet sends. So we do not
6542 * need special logic to handle SKBs that have not had all
6543 * of their frags sent yet, like SunGEM does.
6545 static void tg3_tx(struct tg3_napi
*tnapi
)
6547 struct tg3
*tp
= tnapi
->tp
;
6548 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6549 u32 sw_idx
= tnapi
->tx_cons
;
6550 struct netdev_queue
*txq
;
6551 int index
= tnapi
- tp
->napi
;
6552 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6554 if (tg3_flag(tp
, ENABLE_TSS
))
6557 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6559 while (sw_idx
!= hw_idx
) {
6560 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6561 struct sk_buff
*skb
= ri
->skb
;
6564 if (unlikely(skb
== NULL
)) {
6569 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6570 struct skb_shared_hwtstamps timestamp
;
6571 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6572 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6574 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6576 skb_tstamp_tx(skb
, ×tamp
);
6579 pci_unmap_single(tp
->pdev
,
6580 dma_unmap_addr(ri
, mapping
),
6586 while (ri
->fragmented
) {
6587 ri
->fragmented
= false;
6588 sw_idx
= NEXT_TX(sw_idx
);
6589 ri
= &tnapi
->tx_buffers
[sw_idx
];
6592 sw_idx
= NEXT_TX(sw_idx
);
6594 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6595 ri
= &tnapi
->tx_buffers
[sw_idx
];
6596 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6599 pci_unmap_page(tp
->pdev
,
6600 dma_unmap_addr(ri
, mapping
),
6601 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6604 while (ri
->fragmented
) {
6605 ri
->fragmented
= false;
6606 sw_idx
= NEXT_TX(sw_idx
);
6607 ri
= &tnapi
->tx_buffers
[sw_idx
];
6610 sw_idx
= NEXT_TX(sw_idx
);
6614 bytes_compl
+= skb
->len
;
6616 dev_consume_skb_any(skb
);
6618 if (unlikely(tx_bug
)) {
6624 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6626 tnapi
->tx_cons
= sw_idx
;
6628 /* Need to make the tx_cons update visible to tg3_start_xmit()
6629 * before checking for netif_queue_stopped(). Without the
6630 * memory barrier, there is a small possibility that tg3_start_xmit()
6631 * will miss it and cause the queue to be stopped forever.
6635 if (unlikely(netif_tx_queue_stopped(txq
) &&
6636 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6637 __netif_tx_lock(txq
, smp_processor_id());
6638 if (netif_tx_queue_stopped(txq
) &&
6639 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6640 netif_tx_wake_queue(txq
);
6641 __netif_tx_unlock(txq
);
6645 static void tg3_frag_free(bool is_frag
, void *data
)
6648 skb_free_frag(data
);
6653 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6655 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6661 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6662 map_sz
, PCI_DMA_FROMDEVICE
);
6663 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6668 /* Returns size of skb allocated or < 0 on error.
6670 * We only need to fill in the address because the other members
6671 * of the RX descriptor are invariant, see tg3_init_rings.
6673 * Note the purposeful assymetry of cpu vs. chip accesses. For
6674 * posting buffers we only dirty the first cache line of the RX
6675 * descriptor (containing the address). Whereas for the RX status
6676 * buffers the cpu only reads the last cacheline of the RX descriptor
6677 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6679 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6680 u32 opaque_key
, u32 dest_idx_unmasked
,
6681 unsigned int *frag_size
)
6683 struct tg3_rx_buffer_desc
*desc
;
6684 struct ring_info
*map
;
6687 int skb_size
, data_size
, dest_idx
;
6689 switch (opaque_key
) {
6690 case RXD_OPAQUE_RING_STD
:
6691 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6692 desc
= &tpr
->rx_std
[dest_idx
];
6693 map
= &tpr
->rx_std_buffers
[dest_idx
];
6694 data_size
= tp
->rx_pkt_map_sz
;
6697 case RXD_OPAQUE_RING_JUMBO
:
6698 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6699 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6700 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6701 data_size
= TG3_RX_JMB_MAP_SZ
;
6708 /* Do not overwrite any of the map or rp information
6709 * until we are sure we can commit to a new buffer.
6711 * Callers depend upon this behavior and assume that
6712 * we leave everything unchanged if we fail.
6714 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6715 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6716 if (skb_size
<= PAGE_SIZE
) {
6717 data
= napi_alloc_frag(skb_size
);
6718 *frag_size
= skb_size
;
6720 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6726 mapping
= pci_map_single(tp
->pdev
,
6727 data
+ TG3_RX_OFFSET(tp
),
6729 PCI_DMA_FROMDEVICE
);
6730 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6731 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6736 dma_unmap_addr_set(map
, mapping
, mapping
);
6738 desc
->addr_hi
= ((u64
)mapping
>> 32);
6739 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6744 /* We only need to move over in the address because the other
6745 * members of the RX descriptor are invariant. See notes above
6746 * tg3_alloc_rx_data for full details.
6748 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6749 struct tg3_rx_prodring_set
*dpr
,
6750 u32 opaque_key
, int src_idx
,
6751 u32 dest_idx_unmasked
)
6753 struct tg3
*tp
= tnapi
->tp
;
6754 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6755 struct ring_info
*src_map
, *dest_map
;
6756 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6759 switch (opaque_key
) {
6760 case RXD_OPAQUE_RING_STD
:
6761 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6762 dest_desc
= &dpr
->rx_std
[dest_idx
];
6763 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6764 src_desc
= &spr
->rx_std
[src_idx
];
6765 src_map
= &spr
->rx_std_buffers
[src_idx
];
6768 case RXD_OPAQUE_RING_JUMBO
:
6769 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6770 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6771 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6772 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6773 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6780 dest_map
->data
= src_map
->data
;
6781 dma_unmap_addr_set(dest_map
, mapping
,
6782 dma_unmap_addr(src_map
, mapping
));
6783 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6784 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6786 /* Ensure that the update to the skb happens after the physical
6787 * addresses have been transferred to the new BD location.
6791 src_map
->data
= NULL
;
6794 /* The RX ring scheme is composed of multiple rings which post fresh
6795 * buffers to the chip, and one special ring the chip uses to report
6796 * status back to the host.
6798 * The special ring reports the status of received packets to the
6799 * host. The chip does not write into the original descriptor the
6800 * RX buffer was obtained from. The chip simply takes the original
6801 * descriptor as provided by the host, updates the status and length
6802 * field, then writes this into the next status ring entry.
6804 * Each ring the host uses to post buffers to the chip is described
6805 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6806 * it is first placed into the on-chip ram. When the packet's length
6807 * is known, it walks down the TG3_BDINFO entries to select the ring.
6808 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6809 * which is within the range of the new packet's length is chosen.
6811 * The "separate ring for rx status" scheme may sound queer, but it makes
6812 * sense from a cache coherency perspective. If only the host writes
6813 * to the buffer post rings, and only the chip writes to the rx status
6814 * rings, then cache lines never move beyond shared-modified state.
6815 * If both the host and chip were to write into the same ring, cache line
6816 * eviction could occur since both entities want it in an exclusive state.
6818 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6820 struct tg3
*tp
= tnapi
->tp
;
6821 u32 work_mask
, rx_std_posted
= 0;
6822 u32 std_prod_idx
, jmb_prod_idx
;
6823 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6826 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6828 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6830 * We need to order the read of hw_idx and the read of
6831 * the opaque cookie.
6836 std_prod_idx
= tpr
->rx_std_prod_idx
;
6837 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6838 while (sw_idx
!= hw_idx
&& budget
> 0) {
6839 struct ring_info
*ri
;
6840 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6842 struct sk_buff
*skb
;
6843 dma_addr_t dma_addr
;
6844 u32 opaque_key
, desc_idx
, *post_ptr
;
6848 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6849 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6850 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6851 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6852 dma_addr
= dma_unmap_addr(ri
, mapping
);
6854 post_ptr
= &std_prod_idx
;
6856 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6857 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6858 dma_addr
= dma_unmap_addr(ri
, mapping
);
6860 post_ptr
= &jmb_prod_idx
;
6862 goto next_pkt_nopost
;
6864 work_mask
|= opaque_key
;
6866 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6868 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6869 desc_idx
, *post_ptr
);
6871 /* Other statistics kept track of by card. */
6876 prefetch(data
+ TG3_RX_OFFSET(tp
));
6877 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6880 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6881 RXD_FLAG_PTPSTAT_PTPV1
||
6882 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6883 RXD_FLAG_PTPSTAT_PTPV2
) {
6884 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6885 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6888 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6890 unsigned int frag_size
;
6892 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6893 *post_ptr
, &frag_size
);
6897 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6898 PCI_DMA_FROMDEVICE
);
6900 /* Ensure that the update to the data happens
6901 * after the usage of the old DMA mapping.
6907 skb
= build_skb(data
, frag_size
);
6909 tg3_frag_free(frag_size
!= 0, data
);
6910 goto drop_it_no_recycle
;
6912 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6914 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6915 desc_idx
, *post_ptr
);
6917 skb
= netdev_alloc_skb(tp
->dev
,
6918 len
+ TG3_RAW_IP_ALIGN
);
6920 goto drop_it_no_recycle
;
6922 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6923 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6925 data
+ TG3_RX_OFFSET(tp
),
6927 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6932 tg3_hwclock_to_timestamp(tp
, tstamp
,
6933 skb_hwtstamps(skb
));
6935 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6936 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6937 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6938 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6939 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6941 skb_checksum_none_assert(skb
);
6943 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6945 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6946 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6947 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6948 dev_kfree_skb_any(skb
);
6949 goto drop_it_no_recycle
;
6952 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6953 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6954 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6955 desc
->err_vlan
& RXD_VLAN_MASK
);
6957 napi_gro_receive(&tnapi
->napi
, skb
);
6965 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6966 tpr
->rx_std_prod_idx
= std_prod_idx
&
6967 tp
->rx_std_ring_mask
;
6968 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6969 tpr
->rx_std_prod_idx
);
6970 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6975 sw_idx
&= tp
->rx_ret_ring_mask
;
6977 /* Refresh hw_idx to see if there is new work */
6978 if (sw_idx
== hw_idx
) {
6979 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6984 /* ACK the status ring. */
6985 tnapi
->rx_rcb_ptr
= sw_idx
;
6986 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6988 /* Refill RX ring(s). */
6989 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6990 /* Sync BD data before updating mailbox */
6993 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6994 tpr
->rx_std_prod_idx
= std_prod_idx
&
6995 tp
->rx_std_ring_mask
;
6996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6997 tpr
->rx_std_prod_idx
);
6999 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
7000 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
7001 tp
->rx_jmb_ring_mask
;
7002 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7003 tpr
->rx_jmb_prod_idx
);
7005 } else if (work_mask
) {
7006 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7007 * updated before the producer indices can be updated.
7011 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
7012 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
7014 if (tnapi
!= &tp
->napi
[1]) {
7015 tp
->rx_refill
= true;
7016 napi_schedule(&tp
->napi
[1].napi
);
7023 static void tg3_poll_link(struct tg3
*tp
)
7025 /* handle link change and other phy events */
7026 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7027 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7029 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7030 sblk
->status
= SD_STATUS_UPDATED
|
7031 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7032 spin_lock(&tp
->lock
);
7033 if (tg3_flag(tp
, USE_PHYLIB
)) {
7035 (MAC_STATUS_SYNC_CHANGED
|
7036 MAC_STATUS_CFG_CHANGED
|
7037 MAC_STATUS_MI_COMPLETION
|
7038 MAC_STATUS_LNKSTATE_CHANGED
));
7041 tg3_setup_phy(tp
, false);
7042 spin_unlock(&tp
->lock
);
7047 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7048 struct tg3_rx_prodring_set
*dpr
,
7049 struct tg3_rx_prodring_set
*spr
)
7051 u32 si
, di
, cpycnt
, src_prod_idx
;
7055 src_prod_idx
= spr
->rx_std_prod_idx
;
7057 /* Make sure updates to the rx_std_buffers[] entries and the
7058 * standard producer index are seen in the correct order.
7062 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7065 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7066 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7068 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7069 spr
->rx_std_cons_idx
;
7071 cpycnt
= min(cpycnt
,
7072 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7074 si
= spr
->rx_std_cons_idx
;
7075 di
= dpr
->rx_std_prod_idx
;
7077 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7078 if (dpr
->rx_std_buffers
[i
].data
) {
7088 /* Ensure that updates to the rx_std_buffers ring and the
7089 * shadowed hardware producer ring from tg3_recycle_skb() are
7090 * ordered correctly WRT the skb check above.
7094 memcpy(&dpr
->rx_std_buffers
[di
],
7095 &spr
->rx_std_buffers
[si
],
7096 cpycnt
* sizeof(struct ring_info
));
7098 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7099 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7100 sbd
= &spr
->rx_std
[si
];
7101 dbd
= &dpr
->rx_std
[di
];
7102 dbd
->addr_hi
= sbd
->addr_hi
;
7103 dbd
->addr_lo
= sbd
->addr_lo
;
7106 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7107 tp
->rx_std_ring_mask
;
7108 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7109 tp
->rx_std_ring_mask
;
7113 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7115 /* Make sure updates to the rx_jmb_buffers[] entries and
7116 * the jumbo producer index are seen in the correct order.
7120 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7123 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7124 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7126 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7127 spr
->rx_jmb_cons_idx
;
7129 cpycnt
= min(cpycnt
,
7130 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7132 si
= spr
->rx_jmb_cons_idx
;
7133 di
= dpr
->rx_jmb_prod_idx
;
7135 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7136 if (dpr
->rx_jmb_buffers
[i
].data
) {
7146 /* Ensure that updates to the rx_jmb_buffers ring and the
7147 * shadowed hardware producer ring from tg3_recycle_skb() are
7148 * ordered correctly WRT the skb check above.
7152 memcpy(&dpr
->rx_jmb_buffers
[di
],
7153 &spr
->rx_jmb_buffers
[si
],
7154 cpycnt
* sizeof(struct ring_info
));
7156 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7157 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7158 sbd
= &spr
->rx_jmb
[si
].std
;
7159 dbd
= &dpr
->rx_jmb
[di
].std
;
7160 dbd
->addr_hi
= sbd
->addr_hi
;
7161 dbd
->addr_lo
= sbd
->addr_lo
;
7164 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7165 tp
->rx_jmb_ring_mask
;
7166 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7167 tp
->rx_jmb_ring_mask
;
7173 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7175 struct tg3
*tp
= tnapi
->tp
;
7177 /* run TX completion thread */
7178 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7180 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7184 if (!tnapi
->rx_rcb_prod_idx
)
7187 /* run RX thread, within the bounds set by NAPI.
7188 * All RX "locking" is done by ensuring outside
7189 * code synchronizes with tg3->napi.poll()
7191 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7192 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7194 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7195 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7197 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7198 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7200 tp
->rx_refill
= false;
7201 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7202 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7203 &tp
->napi
[i
].prodring
);
7207 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7208 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7209 dpr
->rx_std_prod_idx
);
7211 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7212 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7213 dpr
->rx_jmb_prod_idx
);
7216 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7222 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7224 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7225 schedule_work(&tp
->reset_task
);
7228 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7230 cancel_work_sync(&tp
->reset_task
);
7231 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7232 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7235 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7237 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7238 struct tg3
*tp
= tnapi
->tp
;
7240 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7243 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7245 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7248 if (unlikely(work_done
>= budget
))
7251 /* tp->last_tag is used in tg3_int_reenable() below
7252 * to tell the hw how much work has been processed,
7253 * so we must read it before checking for more work.
7255 tnapi
->last_tag
= sblk
->status_tag
;
7256 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7259 /* check for RX/TX work to do */
7260 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7261 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7263 /* This test here is not race free, but will reduce
7264 * the number of interrupts by looping again.
7266 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7269 napi_complete_done(napi
, work_done
);
7270 /* Reenable interrupts. */
7271 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7273 /* This test here is synchronized by napi_schedule()
7274 * and napi_complete() to close the race condition.
7276 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7277 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7278 HOSTCC_MODE_ENABLE
|
7285 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7289 /* work_done is guaranteed to be less than budget. */
7290 napi_complete(napi
);
7291 tg3_reset_task_schedule(tp
);
7295 static void tg3_process_error(struct tg3
*tp
)
7298 bool real_error
= false;
7300 if (tg3_flag(tp
, ERROR_PROCESSED
))
7303 /* Check Flow Attention register */
7304 val
= tr32(HOSTCC_FLOW_ATTN
);
7305 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7306 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7310 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7311 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7315 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7316 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7325 tg3_flag_set(tp
, ERROR_PROCESSED
);
7326 tg3_reset_task_schedule(tp
);
7329 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7331 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7332 struct tg3
*tp
= tnapi
->tp
;
7334 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7337 if (sblk
->status
& SD_STATUS_ERROR
)
7338 tg3_process_error(tp
);
7342 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7344 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7347 if (unlikely(work_done
>= budget
))
7350 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7351 /* tp->last_tag is used in tg3_int_reenable() below
7352 * to tell the hw how much work has been processed,
7353 * so we must read it before checking for more work.
7355 tnapi
->last_tag
= sblk
->status_tag
;
7356 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7359 sblk
->status
&= ~SD_STATUS_UPDATED
;
7361 if (likely(!tg3_has_work(tnapi
))) {
7362 napi_complete_done(napi
, work_done
);
7363 tg3_int_reenable(tnapi
);
7368 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7372 /* work_done is guaranteed to be less than budget. */
7373 napi_complete(napi
);
7374 tg3_reset_task_schedule(tp
);
7378 static void tg3_napi_disable(struct tg3
*tp
)
7382 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7383 napi_disable(&tp
->napi
[i
].napi
);
7386 static void tg3_napi_enable(struct tg3
*tp
)
7390 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7391 napi_enable(&tp
->napi
[i
].napi
);
7394 static void tg3_napi_init(struct tg3
*tp
)
7398 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7399 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7400 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7403 static void tg3_napi_fini(struct tg3
*tp
)
7407 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7408 netif_napi_del(&tp
->napi
[i
].napi
);
7411 static inline void tg3_netif_stop(struct tg3
*tp
)
7413 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7414 tg3_napi_disable(tp
);
7415 netif_carrier_off(tp
->dev
);
7416 netif_tx_disable(tp
->dev
);
7419 /* tp->lock must be held */
7420 static inline void tg3_netif_start(struct tg3
*tp
)
7424 /* NOTE: unconditional netif_tx_wake_all_queues is only
7425 * appropriate so long as all callers are assured to
7426 * have free tx slots (such as after tg3_init_hw)
7428 netif_tx_wake_all_queues(tp
->dev
);
7431 netif_carrier_on(tp
->dev
);
7433 tg3_napi_enable(tp
);
7434 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7435 tg3_enable_ints(tp
);
7438 static void tg3_irq_quiesce(struct tg3
*tp
)
7439 __releases(tp
->lock
)
7440 __acquires(tp
->lock
)
7444 BUG_ON(tp
->irq_sync
);
7449 spin_unlock_bh(&tp
->lock
);
7451 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7452 synchronize_irq(tp
->napi
[i
].irq_vec
);
7454 spin_lock_bh(&tp
->lock
);
7457 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7458 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7459 * with as well. Most of the time, this is not necessary except when
7460 * shutting down the device.
7462 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7464 spin_lock_bh(&tp
->lock
);
7466 tg3_irq_quiesce(tp
);
7469 static inline void tg3_full_unlock(struct tg3
*tp
)
7471 spin_unlock_bh(&tp
->lock
);
7474 /* One-shot MSI handler - Chip automatically disables interrupt
7475 * after sending MSI so driver doesn't have to do it.
7477 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7479 struct tg3_napi
*tnapi
= dev_id
;
7480 struct tg3
*tp
= tnapi
->tp
;
7482 prefetch(tnapi
->hw_status
);
7484 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7486 if (likely(!tg3_irq_sync(tp
)))
7487 napi_schedule(&tnapi
->napi
);
7492 /* MSI ISR - No need to check for interrupt sharing and no need to
7493 * flush status block and interrupt mailbox. PCI ordering rules
7494 * guarantee that MSI will arrive after the status block.
7496 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7498 struct tg3_napi
*tnapi
= dev_id
;
7499 struct tg3
*tp
= tnapi
->tp
;
7501 prefetch(tnapi
->hw_status
);
7503 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7505 * Writing any value to intr-mbox-0 clears PCI INTA# and
7506 * chip-internal interrupt pending events.
7507 * Writing non-zero to intr-mbox-0 additional tells the
7508 * NIC to stop sending us irqs, engaging "in-intr-handler"
7511 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7512 if (likely(!tg3_irq_sync(tp
)))
7513 napi_schedule(&tnapi
->napi
);
7515 return IRQ_RETVAL(1);
7518 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7520 struct tg3_napi
*tnapi
= dev_id
;
7521 struct tg3
*tp
= tnapi
->tp
;
7522 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7523 unsigned int handled
= 1;
7525 /* In INTx mode, it is possible for the interrupt to arrive at
7526 * the CPU before the status block posted prior to the interrupt.
7527 * Reading the PCI State register will confirm whether the
7528 * interrupt is ours and will flush the status block.
7530 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7531 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7532 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7539 * Writing any value to intr-mbox-0 clears PCI INTA# and
7540 * chip-internal interrupt pending events.
7541 * Writing non-zero to intr-mbox-0 additional tells the
7542 * NIC to stop sending us irqs, engaging "in-intr-handler"
7545 * Flush the mailbox to de-assert the IRQ immediately to prevent
7546 * spurious interrupts. The flush impacts performance but
7547 * excessive spurious interrupts can be worse in some cases.
7549 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7550 if (tg3_irq_sync(tp
))
7552 sblk
->status
&= ~SD_STATUS_UPDATED
;
7553 if (likely(tg3_has_work(tnapi
))) {
7554 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7555 napi_schedule(&tnapi
->napi
);
7557 /* No work, shared interrupt perhaps? re-enable
7558 * interrupts, and flush that PCI write
7560 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7564 return IRQ_RETVAL(handled
);
7567 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7569 struct tg3_napi
*tnapi
= dev_id
;
7570 struct tg3
*tp
= tnapi
->tp
;
7571 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7572 unsigned int handled
= 1;
7574 /* In INTx mode, it is possible for the interrupt to arrive at
7575 * the CPU before the status block posted prior to the interrupt.
7576 * Reading the PCI State register will confirm whether the
7577 * interrupt is ours and will flush the status block.
7579 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7580 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7581 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7588 * writing any value to intr-mbox-0 clears PCI INTA# and
7589 * chip-internal interrupt pending events.
7590 * writing non-zero to intr-mbox-0 additional tells the
7591 * NIC to stop sending us irqs, engaging "in-intr-handler"
7594 * Flush the mailbox to de-assert the IRQ immediately to prevent
7595 * spurious interrupts. The flush impacts performance but
7596 * excessive spurious interrupts can be worse in some cases.
7598 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7601 * In a shared interrupt configuration, sometimes other devices'
7602 * interrupts will scream. We record the current status tag here
7603 * so that the above check can report that the screaming interrupts
7604 * are unhandled. Eventually they will be silenced.
7606 tnapi
->last_irq_tag
= sblk
->status_tag
;
7608 if (tg3_irq_sync(tp
))
7611 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7613 napi_schedule(&tnapi
->napi
);
7616 return IRQ_RETVAL(handled
);
7619 /* ISR for interrupt test */
7620 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7622 struct tg3_napi
*tnapi
= dev_id
;
7623 struct tg3
*tp
= tnapi
->tp
;
7624 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7626 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7627 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7628 tg3_disable_ints(tp
);
7629 return IRQ_RETVAL(1);
7631 return IRQ_RETVAL(0);
7634 #ifdef CONFIG_NET_POLL_CONTROLLER
7635 static void tg3_poll_controller(struct net_device
*dev
)
7638 struct tg3
*tp
= netdev_priv(dev
);
7640 if (tg3_irq_sync(tp
))
7643 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7644 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7648 static void tg3_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
7650 struct tg3
*tp
= netdev_priv(dev
);
7652 if (netif_msg_tx_err(tp
)) {
7653 netdev_err(dev
, "transmit timed out, resetting\n");
7657 tg3_reset_task_schedule(tp
);
7660 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7661 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7663 u32 base
= (u32
) mapping
& 0xffffffff;
7665 return base
+ len
+ 8 < base
;
7668 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7669 * of any 4GB boundaries: 4G, 8G, etc
7671 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7674 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7675 u32 base
= (u32
) mapping
& 0xffffffff;
7677 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7682 /* Test for DMA addresses > 40-bit */
7683 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7686 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7687 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7688 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7695 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7696 dma_addr_t mapping
, u32 len
, u32 flags
,
7699 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7700 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7701 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7702 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7705 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7706 dma_addr_t map
, u32 len
, u32 flags
,
7709 struct tg3
*tp
= tnapi
->tp
;
7712 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7715 if (tg3_4g_overflow_test(map
, len
))
7718 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7721 if (tg3_40bit_overflow_test(tp
, map
, len
))
7724 if (tp
->dma_limit
) {
7725 u32 prvidx
= *entry
;
7726 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7727 while (len
> tp
->dma_limit
&& *budget
) {
7728 u32 frag_len
= tp
->dma_limit
;
7729 len
-= tp
->dma_limit
;
7731 /* Avoid the 8byte DMA problem */
7733 len
+= tp
->dma_limit
/ 2;
7734 frag_len
= tp
->dma_limit
/ 2;
7737 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7739 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7740 frag_len
, tmp_flag
, mss
, vlan
);
7743 *entry
= NEXT_TX(*entry
);
7750 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7751 len
, flags
, mss
, vlan
);
7753 *entry
= NEXT_TX(*entry
);
7756 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7760 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7761 len
, flags
, mss
, vlan
);
7762 *entry
= NEXT_TX(*entry
);
7768 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7771 struct sk_buff
*skb
;
7772 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7777 pci_unmap_single(tnapi
->tp
->pdev
,
7778 dma_unmap_addr(txb
, mapping
),
7782 while (txb
->fragmented
) {
7783 txb
->fragmented
= false;
7784 entry
= NEXT_TX(entry
);
7785 txb
= &tnapi
->tx_buffers
[entry
];
7788 for (i
= 0; i
<= last
; i
++) {
7789 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7791 entry
= NEXT_TX(entry
);
7792 txb
= &tnapi
->tx_buffers
[entry
];
7794 pci_unmap_page(tnapi
->tp
->pdev
,
7795 dma_unmap_addr(txb
, mapping
),
7796 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7798 while (txb
->fragmented
) {
7799 txb
->fragmented
= false;
7800 entry
= NEXT_TX(entry
);
7801 txb
= &tnapi
->tx_buffers
[entry
];
7806 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7807 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7808 struct sk_buff
**pskb
,
7809 u32
*entry
, u32
*budget
,
7810 u32 base_flags
, u32 mss
, u32 vlan
)
7812 struct tg3
*tp
= tnapi
->tp
;
7813 struct sk_buff
*new_skb
, *skb
= *pskb
;
7814 dma_addr_t new_addr
= 0;
7817 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7818 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7820 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7822 new_skb
= skb_copy_expand(skb
,
7823 skb_headroom(skb
) + more_headroom
,
7824 skb_tailroom(skb
), GFP_ATOMIC
);
7830 /* New SKB is guaranteed to be linear. */
7831 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7833 /* Make sure the mapping succeeded */
7834 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7835 dev_kfree_skb_any(new_skb
);
7838 u32 save_entry
= *entry
;
7840 base_flags
|= TXD_FLAG_END
;
7842 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7843 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7846 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7847 new_skb
->len
, base_flags
,
7849 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7850 dev_kfree_skb_any(new_skb
);
7856 dev_consume_skb_any(skb
);
7861 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7863 /* Check if we will never have enough descriptors,
7864 * as gso_segs can be more than current ring size
7866 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7869 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7871 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7872 * indicated in tg3_tx_frag_set()
7874 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7875 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7877 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7878 struct sk_buff
*segs
, *seg
, *next
;
7880 /* Estimate the number of fragments in the worst case */
7881 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7882 netif_tx_stop_queue(txq
);
7884 /* netif_tx_stop_queue() must be done before checking
7885 * checking tx index in tg3_tx_avail() below, because in
7886 * tg3_tx(), we update tx index before checking for
7887 * netif_tx_queue_stopped().
7890 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7891 return NETDEV_TX_BUSY
;
7893 netif_tx_wake_queue(txq
);
7896 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7897 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7898 if (IS_ERR(segs
) || !segs
)
7899 goto tg3_tso_bug_end
;
7901 skb_list_walk_safe(segs
, seg
, next
) {
7902 skb_mark_not_on_list(seg
);
7903 tg3_start_xmit(seg
, tp
->dev
);
7907 dev_consume_skb_any(skb
);
7909 return NETDEV_TX_OK
;
7912 /* hard_start_xmit for all devices */
7913 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7915 struct tg3
*tp
= netdev_priv(dev
);
7916 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7918 int i
= -1, would_hit_hwbug
;
7920 struct tg3_napi
*tnapi
;
7921 struct netdev_queue
*txq
;
7923 struct iphdr
*iph
= NULL
;
7924 struct tcphdr
*tcph
= NULL
;
7925 __sum16 tcp_csum
= 0, ip_csum
= 0;
7926 __be16 ip_tot_len
= 0;
7928 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7929 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7930 if (tg3_flag(tp
, ENABLE_TSS
))
7933 budget
= tg3_tx_avail(tnapi
);
7935 /* We are running in BH disabled context with netif_tx_lock
7936 * and TX reclaim runs via tp->napi.poll inside of a software
7937 * interrupt. Furthermore, IRQ processing runs lockless so we have
7938 * no IRQ context deadlocks to worry about either. Rejoice!
7940 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7941 if (!netif_tx_queue_stopped(txq
)) {
7942 netif_tx_stop_queue(txq
);
7944 /* This is a hard error, log it. */
7946 "BUG! Tx Ring full when queue awake!\n");
7948 return NETDEV_TX_BUSY
;
7951 entry
= tnapi
->tx_prod
;
7954 mss
= skb_shinfo(skb
)->gso_size
;
7956 u32 tcp_opt_len
, hdr_len
;
7958 if (skb_cow_head(skb
, 0))
7962 tcp_opt_len
= tcp_optlen(skb
);
7964 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7966 /* HW/FW can not correctly segment packets that have been
7967 * vlan encapsulated.
7969 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7970 skb
->protocol
== htons(ETH_P_8021AD
)) {
7971 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7972 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7976 if (!skb_is_gso_v6(skb
)) {
7977 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7978 tg3_flag(tp
, TSO_BUG
)) {
7979 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7980 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7983 ip_csum
= iph
->check
;
7984 ip_tot_len
= iph
->tot_len
;
7986 iph
->tot_len
= htons(mss
+ hdr_len
);
7989 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7990 TXD_FLAG_CPU_POST_DMA
);
7992 tcph
= tcp_hdr(skb
);
7993 tcp_csum
= tcph
->check
;
7995 if (tg3_flag(tp
, HW_TSO_1
) ||
7996 tg3_flag(tp
, HW_TSO_2
) ||
7997 tg3_flag(tp
, HW_TSO_3
)) {
7999 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
8001 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
8005 if (tg3_flag(tp
, HW_TSO_3
)) {
8006 mss
|= (hdr_len
& 0xc) << 12;
8008 base_flags
|= 0x00000010;
8009 base_flags
|= (hdr_len
& 0x3e0) << 5;
8010 } else if (tg3_flag(tp
, HW_TSO_2
))
8011 mss
|= hdr_len
<< 9;
8012 else if (tg3_flag(tp
, HW_TSO_1
) ||
8013 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8014 if (tcp_opt_len
|| iph
->ihl
> 5) {
8017 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8018 mss
|= (tsflags
<< 11);
8021 if (tcp_opt_len
|| iph
->ihl
> 5) {
8024 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8025 base_flags
|= tsflags
<< 12;
8028 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8029 /* HW/FW can not correctly checksum packets that have been
8030 * vlan encapsulated.
8032 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8033 skb
->protocol
== htons(ETH_P_8021AD
)) {
8034 if (skb_checksum_help(skb
))
8037 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8041 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8042 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8043 base_flags
|= TXD_FLAG_JMB_PKT
;
8045 if (skb_vlan_tag_present(skb
)) {
8046 base_flags
|= TXD_FLAG_VLAN
;
8047 vlan
= skb_vlan_tag_get(skb
);
8050 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8051 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8052 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8053 base_flags
|= TXD_FLAG_HWTSTAMP
;
8056 len
= skb_headlen(skb
);
8058 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8059 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8063 tnapi
->tx_buffers
[entry
].skb
= skb
;
8064 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8066 would_hit_hwbug
= 0;
8068 if (tg3_flag(tp
, 5701_DMA_BUG
))
8069 would_hit_hwbug
= 1;
8071 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8072 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8074 would_hit_hwbug
= 1;
8075 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8078 if (!tg3_flag(tp
, HW_TSO_1
) &&
8079 !tg3_flag(tp
, HW_TSO_2
) &&
8080 !tg3_flag(tp
, HW_TSO_3
))
8083 /* Now loop through additional data
8084 * fragments, and queue them.
8086 last
= skb_shinfo(skb
)->nr_frags
- 1;
8087 for (i
= 0; i
<= last
; i
++) {
8088 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8090 len
= skb_frag_size(frag
);
8091 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8092 len
, DMA_TO_DEVICE
);
8094 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8095 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8097 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8101 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8103 ((i
== last
) ? TXD_FLAG_END
: 0),
8105 would_hit_hwbug
= 1;
8111 if (would_hit_hwbug
) {
8112 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8114 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8115 /* If it's a TSO packet, do GSO instead of
8116 * allocating and copying to a large linear SKB
8119 iph
->check
= ip_csum
;
8120 iph
->tot_len
= ip_tot_len
;
8122 tcph
->check
= tcp_csum
;
8123 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8126 /* If the workaround fails due to memory/mapping
8127 * failure, silently drop this packet.
8129 entry
= tnapi
->tx_prod
;
8130 budget
= tg3_tx_avail(tnapi
);
8131 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8132 base_flags
, mss
, vlan
))
8136 skb_tx_timestamp(skb
);
8137 netdev_tx_sent_queue(txq
, skb
->len
);
8139 /* Sync BD data before updating mailbox */
8142 tnapi
->tx_prod
= entry
;
8143 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8144 netif_tx_stop_queue(txq
);
8146 /* netif_tx_stop_queue() must be done before checking
8147 * checking tx index in tg3_tx_avail() below, because in
8148 * tg3_tx(), we update tx index before checking for
8149 * netif_tx_queue_stopped().
8152 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8153 netif_tx_wake_queue(txq
);
8156 if (!netdev_xmit_more() || netif_xmit_stopped(txq
)) {
8157 /* Packets are ready, update Tx producer idx on card. */
8158 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8161 return NETDEV_TX_OK
;
8164 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8165 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8167 dev_kfree_skb_any(skb
);
8170 return NETDEV_TX_OK
;
8173 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8176 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8177 MAC_MODE_PORT_MODE_MASK
);
8179 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8181 if (!tg3_flag(tp
, 5705_PLUS
))
8182 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8184 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8185 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8187 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8189 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8191 if (tg3_flag(tp
, 5705_PLUS
) ||
8192 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8193 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8194 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8197 tw32(MAC_MODE
, tp
->mac_mode
);
8201 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8203 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8205 tg3_phy_toggle_apd(tp
, false);
8206 tg3_phy_toggle_automdix(tp
, false);
8208 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8211 bmcr
= BMCR_FULLDPLX
;
8216 bmcr
|= BMCR_SPEED100
;
8220 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8222 bmcr
|= BMCR_SPEED100
;
8225 bmcr
|= BMCR_SPEED1000
;
8230 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8231 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8232 val
|= CTL1000_AS_MASTER
|
8233 CTL1000_ENABLE_MASTER
;
8234 tg3_writephy(tp
, MII_CTRL1000
, val
);
8236 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8237 MII_TG3_FET_PTEST_TRIM_2
;
8238 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8241 bmcr
|= BMCR_LOOPBACK
;
8243 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8245 /* The write needs to be flushed for the FETs */
8246 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8247 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8251 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8252 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8253 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8254 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8255 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8257 /* The write needs to be flushed for the AC131 */
8258 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8261 /* Reset to prevent losing 1st rx packet intermittently */
8262 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8263 tg3_flag(tp
, 5780_CLASS
)) {
8264 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8266 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8269 mac_mode
= tp
->mac_mode
&
8270 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8271 if (speed
== SPEED_1000
)
8272 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8274 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8276 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8277 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8279 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8280 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8281 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8282 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8284 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8285 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8288 tw32(MAC_MODE
, mac_mode
);
8294 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8296 struct tg3
*tp
= netdev_priv(dev
);
8298 if (features
& NETIF_F_LOOPBACK
) {
8299 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8302 spin_lock_bh(&tp
->lock
);
8303 tg3_mac_loopback(tp
, true);
8304 netif_carrier_on(tp
->dev
);
8305 spin_unlock_bh(&tp
->lock
);
8306 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8308 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8311 spin_lock_bh(&tp
->lock
);
8312 tg3_mac_loopback(tp
, false);
8313 /* Force link status check */
8314 tg3_setup_phy(tp
, true);
8315 spin_unlock_bh(&tp
->lock
);
8316 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8320 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8321 netdev_features_t features
)
8323 struct tg3
*tp
= netdev_priv(dev
);
8325 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8326 features
&= ~NETIF_F_ALL_TSO
;
8331 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8333 netdev_features_t changed
= dev
->features
^ features
;
8335 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8336 tg3_set_loopback(dev
, features
);
8341 static void tg3_rx_prodring_free(struct tg3
*tp
,
8342 struct tg3_rx_prodring_set
*tpr
)
8346 if (tpr
!= &tp
->napi
[0].prodring
) {
8347 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8348 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8349 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8352 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8353 for (i
= tpr
->rx_jmb_cons_idx
;
8354 i
!= tpr
->rx_jmb_prod_idx
;
8355 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8356 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8364 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8365 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8368 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8369 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8370 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8375 /* Initialize rx rings for packet processing.
8377 * The chip has been shut down and the driver detached from
8378 * the networking, so no interrupts or new tx packets will
8379 * end up in the driver. tp->{tx,}lock are held and thus
8382 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8383 struct tg3_rx_prodring_set
*tpr
)
8385 u32 i
, rx_pkt_dma_sz
;
8387 tpr
->rx_std_cons_idx
= 0;
8388 tpr
->rx_std_prod_idx
= 0;
8389 tpr
->rx_jmb_cons_idx
= 0;
8390 tpr
->rx_jmb_prod_idx
= 0;
8392 if (tpr
!= &tp
->napi
[0].prodring
) {
8393 memset(&tpr
->rx_std_buffers
[0], 0,
8394 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8395 if (tpr
->rx_jmb_buffers
)
8396 memset(&tpr
->rx_jmb_buffers
[0], 0,
8397 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8401 /* Zero out all descriptors. */
8402 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8404 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8405 if (tg3_flag(tp
, 5780_CLASS
) &&
8406 tp
->dev
->mtu
> ETH_DATA_LEN
)
8407 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8408 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8410 /* Initialize invariants of the rings, we only set this
8411 * stuff once. This works because the card does not
8412 * write into the rx buffer posting rings.
8414 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8415 struct tg3_rx_buffer_desc
*rxd
;
8417 rxd
= &tpr
->rx_std
[i
];
8418 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8419 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8420 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8421 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8424 /* Now allocate fresh SKBs for each rx ring. */
8425 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8426 unsigned int frag_size
;
8428 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8430 netdev_warn(tp
->dev
,
8431 "Using a smaller RX standard ring. Only "
8432 "%d out of %d buffers were allocated "
8433 "successfully\n", i
, tp
->rx_pending
);
8441 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8444 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8446 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8449 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8450 struct tg3_rx_buffer_desc
*rxd
;
8452 rxd
= &tpr
->rx_jmb
[i
].std
;
8453 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8454 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8456 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8457 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8460 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8461 unsigned int frag_size
;
8463 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8465 netdev_warn(tp
->dev
,
8466 "Using a smaller RX jumbo ring. Only %d "
8467 "out of %d buffers were allocated "
8468 "successfully\n", i
, tp
->rx_jumbo_pending
);
8471 tp
->rx_jumbo_pending
= i
;
8480 tg3_rx_prodring_free(tp
, tpr
);
8484 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8485 struct tg3_rx_prodring_set
*tpr
)
8487 kfree(tpr
->rx_std_buffers
);
8488 tpr
->rx_std_buffers
= NULL
;
8489 kfree(tpr
->rx_jmb_buffers
);
8490 tpr
->rx_jmb_buffers
= NULL
;
8492 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8493 tpr
->rx_std
, tpr
->rx_std_mapping
);
8497 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8498 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8503 static int tg3_rx_prodring_init(struct tg3
*tp
,
8504 struct tg3_rx_prodring_set
*tpr
)
8506 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8508 if (!tpr
->rx_std_buffers
)
8511 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8512 TG3_RX_STD_RING_BYTES(tp
),
8513 &tpr
->rx_std_mapping
,
8518 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8519 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8521 if (!tpr
->rx_jmb_buffers
)
8524 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8525 TG3_RX_JMB_RING_BYTES(tp
),
8526 &tpr
->rx_jmb_mapping
,
8535 tg3_rx_prodring_fini(tp
, tpr
);
8539 /* Free up pending packets in all rx/tx rings.
8541 * The chip has been shut down and the driver detached from
8542 * the networking, so no interrupts or new tx packets will
8543 * end up in the driver. tp->{tx,}lock is not held and we are not
8544 * in an interrupt context and thus may sleep.
8546 static void tg3_free_rings(struct tg3
*tp
)
8550 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8551 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8553 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8555 if (!tnapi
->tx_buffers
)
8558 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8559 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8564 tg3_tx_skb_unmap(tnapi
, i
,
8565 skb_shinfo(skb
)->nr_frags
- 1);
8567 dev_consume_skb_any(skb
);
8569 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8573 /* Initialize tx/rx rings for packet processing.
8575 * The chip has been shut down and the driver detached from
8576 * the networking, so no interrupts or new tx packets will
8577 * end up in the driver. tp->{tx,}lock are held and thus
8580 static int tg3_init_rings(struct tg3
*tp
)
8584 /* Free up all the SKBs. */
8587 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8588 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8590 tnapi
->last_tag
= 0;
8591 tnapi
->last_irq_tag
= 0;
8592 tnapi
->hw_status
->status
= 0;
8593 tnapi
->hw_status
->status_tag
= 0;
8594 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8599 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8601 tnapi
->rx_rcb_ptr
= 0;
8603 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8605 if (tnapi
->prodring
.rx_std
&&
8606 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8615 static void tg3_mem_tx_release(struct tg3
*tp
)
8619 for (i
= 0; i
< tp
->irq_max
; i
++) {
8620 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8622 if (tnapi
->tx_ring
) {
8623 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8624 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8625 tnapi
->tx_ring
= NULL
;
8628 kfree(tnapi
->tx_buffers
);
8629 tnapi
->tx_buffers
= NULL
;
8633 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8636 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8638 /* If multivector TSS is enabled, vector 0 does not handle
8639 * tx interrupts. Don't allocate any resources for it.
8641 if (tg3_flag(tp
, ENABLE_TSS
))
8644 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8645 tnapi
->tx_buffers
= kcalloc(TG3_TX_RING_SIZE
,
8646 sizeof(struct tg3_tx_ring_info
),
8648 if (!tnapi
->tx_buffers
)
8651 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8653 &tnapi
->tx_desc_mapping
,
8655 if (!tnapi
->tx_ring
)
8662 tg3_mem_tx_release(tp
);
8666 static void tg3_mem_rx_release(struct tg3
*tp
)
8670 for (i
= 0; i
< tp
->irq_max
; i
++) {
8671 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8673 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8678 dma_free_coherent(&tp
->pdev
->dev
,
8679 TG3_RX_RCB_RING_BYTES(tp
),
8681 tnapi
->rx_rcb_mapping
);
8682 tnapi
->rx_rcb
= NULL
;
8686 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8688 unsigned int i
, limit
;
8690 limit
= tp
->rxq_cnt
;
8692 /* If RSS is enabled, we need a (dummy) producer ring
8693 * set on vector zero. This is the true hw prodring.
8695 if (tg3_flag(tp
, ENABLE_RSS
))
8698 for (i
= 0; i
< limit
; i
++) {
8699 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8701 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8704 /* If multivector RSS is enabled, vector 0
8705 * does not handle rx or tx interrupts.
8706 * Don't allocate any resources for it.
8708 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8711 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8712 TG3_RX_RCB_RING_BYTES(tp
),
8713 &tnapi
->rx_rcb_mapping
,
8722 tg3_mem_rx_release(tp
);
8727 * Must not be invoked with interrupt sources disabled and
8728 * the hardware shutdown down.
8730 static void tg3_free_consistent(struct tg3
*tp
)
8734 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8735 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8737 if (tnapi
->hw_status
) {
8738 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8740 tnapi
->status_mapping
);
8741 tnapi
->hw_status
= NULL
;
8745 tg3_mem_rx_release(tp
);
8746 tg3_mem_tx_release(tp
);
8748 /* tp->hw_stats can be referenced safely:
8749 * 1. under rtnl_lock
8750 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8753 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8754 tp
->hw_stats
, tp
->stats_mapping
);
8755 tp
->hw_stats
= NULL
;
8760 * Must not be invoked with interrupt sources disabled and
8761 * the hardware shutdown down. Can sleep.
8763 static int tg3_alloc_consistent(struct tg3
*tp
)
8767 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8768 sizeof(struct tg3_hw_stats
),
8769 &tp
->stats_mapping
, GFP_KERNEL
);
8773 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8774 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8775 struct tg3_hw_status
*sblk
;
8777 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8779 &tnapi
->status_mapping
,
8781 if (!tnapi
->hw_status
)
8784 sblk
= tnapi
->hw_status
;
8786 if (tg3_flag(tp
, ENABLE_RSS
)) {
8787 u16
*prodptr
= NULL
;
8790 * When RSS is enabled, the status block format changes
8791 * slightly. The "rx_jumbo_consumer", "reserved",
8792 * and "rx_mini_consumer" members get mapped to the
8793 * other three rx return ring producer indexes.
8797 prodptr
= &sblk
->idx
[0].rx_producer
;
8800 prodptr
= &sblk
->rx_jumbo_consumer
;
8803 prodptr
= &sblk
->reserved
;
8806 prodptr
= &sblk
->rx_mini_consumer
;
8809 tnapi
->rx_rcb_prod_idx
= prodptr
;
8811 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8815 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8821 tg3_free_consistent(tp
);
8825 #define MAX_WAIT_CNT 1000
8827 /* To stop a block, clear the enable bit and poll till it
8828 * clears. tp->lock is held.
8830 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8835 if (tg3_flag(tp
, 5705_PLUS
)) {
8842 /* We can't enable/disable these bits of the
8843 * 5705/5750, just say success.
8856 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8857 if (pci_channel_offline(tp
->pdev
)) {
8858 dev_err(&tp
->pdev
->dev
,
8859 "tg3_stop_block device offline, "
8860 "ofs=%lx enable_bit=%x\n",
8867 if ((val
& enable_bit
) == 0)
8871 if (i
== MAX_WAIT_CNT
&& !silent
) {
8872 dev_err(&tp
->pdev
->dev
,
8873 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8881 /* tp->lock is held. */
8882 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8886 tg3_disable_ints(tp
);
8888 if (pci_channel_offline(tp
->pdev
)) {
8889 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8890 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8895 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8896 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8899 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8900 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8901 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8902 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8903 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8904 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8906 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8907 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8908 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8909 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8910 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8911 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8912 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8914 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8915 tw32_f(MAC_MODE
, tp
->mac_mode
);
8918 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8919 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8921 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8923 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8926 if (i
>= MAX_WAIT_CNT
) {
8927 dev_err(&tp
->pdev
->dev
,
8928 "%s timed out, TX_MODE_ENABLE will not clear "
8929 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8933 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8934 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8935 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8937 tw32(FTQ_RESET
, 0xffffffff);
8938 tw32(FTQ_RESET
, 0x00000000);
8940 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8941 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8944 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8945 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8946 if (tnapi
->hw_status
)
8947 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8953 /* Save PCI command register before chip reset */
8954 static void tg3_save_pci_state(struct tg3
*tp
)
8956 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8959 /* Restore PCI state after chip reset */
8960 static void tg3_restore_pci_state(struct tg3
*tp
)
8964 /* Re-enable indirect register accesses. */
8965 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8966 tp
->misc_host_ctrl
);
8968 /* Set MAX PCI retry to zero. */
8969 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8970 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8971 tg3_flag(tp
, PCIX_MODE
))
8972 val
|= PCISTATE_RETRY_SAME_DMA
;
8973 /* Allow reads and writes to the APE register and memory space. */
8974 if (tg3_flag(tp
, ENABLE_APE
))
8975 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8976 PCISTATE_ALLOW_APE_SHMEM_WR
|
8977 PCISTATE_ALLOW_APE_PSPACE_WR
;
8978 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8980 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8982 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8983 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8984 tp
->pci_cacheline_sz
);
8985 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8989 /* Make sure PCI-X relaxed ordering bit is clear. */
8990 if (tg3_flag(tp
, PCIX_MODE
)) {
8993 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8995 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8996 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9000 if (tg3_flag(tp
, 5780_CLASS
)) {
9002 /* Chip reset on 5780 will reset MSI enable bit,
9003 * so need to restore it.
9005 if (tg3_flag(tp
, USING_MSI
)) {
9008 pci_read_config_word(tp
->pdev
,
9009 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9011 pci_write_config_word(tp
->pdev
,
9012 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9013 ctrl
| PCI_MSI_FLAGS_ENABLE
);
9014 val
= tr32(MSGINT_MODE
);
9015 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
9020 static void tg3_override_clk(struct tg3
*tp
)
9024 switch (tg3_asic_rev(tp
)) {
9026 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9027 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9028 TG3_CPMU_MAC_ORIDE_ENABLE
);
9033 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9041 static void tg3_restore_clk(struct tg3
*tp
)
9045 switch (tg3_asic_rev(tp
)) {
9047 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9048 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9049 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9054 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9055 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9063 /* tp->lock is held. */
9064 static int tg3_chip_reset(struct tg3
*tp
)
9065 __releases(tp
->lock
)
9066 __acquires(tp
->lock
)
9069 void (*write_op
)(struct tg3
*, u32
, u32
);
9072 if (!pci_device_is_present(tp
->pdev
))
9077 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9079 /* No matching tg3_nvram_unlock() after this because
9080 * chip reset below will undo the nvram lock.
9082 tp
->nvram_lock_cnt
= 0;
9084 /* GRC_MISC_CFG core clock reset will clear the memory
9085 * enable bit in PCI register 4 and the MSI enable bit
9086 * on some chips, so we save relevant registers here.
9088 tg3_save_pci_state(tp
);
9090 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9091 tg3_flag(tp
, 5755_PLUS
))
9092 tw32(GRC_FASTBOOT_PC
, 0);
9095 * We must avoid the readl() that normally takes place.
9096 * It locks machines, causes machine checks, and other
9097 * fun things. So, temporarily disable the 5701
9098 * hardware workaround, while we do the reset.
9100 write_op
= tp
->write32
;
9101 if (write_op
== tg3_write_flush_reg32
)
9102 tp
->write32
= tg3_write32
;
9104 /* Prevent the irq handler from reading or writing PCI registers
9105 * during chip reset when the memory enable bit in the PCI command
9106 * register may be cleared. The chip does not generate interrupt
9107 * at this time, but the irq handler may still be called due to irq
9108 * sharing or irqpoll.
9110 tg3_flag_set(tp
, CHIP_RESETTING
);
9111 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9112 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9113 if (tnapi
->hw_status
) {
9114 tnapi
->hw_status
->status
= 0;
9115 tnapi
->hw_status
->status_tag
= 0;
9117 tnapi
->last_tag
= 0;
9118 tnapi
->last_irq_tag
= 0;
9122 tg3_full_unlock(tp
);
9124 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9125 synchronize_irq(tp
->napi
[i
].irq_vec
);
9127 tg3_full_lock(tp
, 0);
9129 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9130 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9131 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9135 val
= GRC_MISC_CFG_CORECLK_RESET
;
9137 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9138 /* Force PCIe 1.0a mode */
9139 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9140 !tg3_flag(tp
, 57765_PLUS
) &&
9141 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9142 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9143 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9145 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9146 tw32(GRC_MISC_CFG
, (1 << 29));
9151 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9152 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9153 tw32(GRC_VCPU_EXT_CTRL
,
9154 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9157 /* Set the clock to the highest frequency to avoid timeouts. With link
9158 * aware mode, the clock speed could be slow and bootcode does not
9159 * complete within the expected time. Override the clock to allow the
9160 * bootcode to finish sooner and then restore it.
9162 tg3_override_clk(tp
);
9164 /* Manage gphy power for all CPMU absent PCIe devices. */
9165 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9166 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9168 tw32(GRC_MISC_CFG
, val
);
9170 /* restore 5701 hardware bug workaround write method */
9171 tp
->write32
= write_op
;
9173 /* Unfortunately, we have to delay before the PCI read back.
9174 * Some 575X chips even will not respond to a PCI cfg access
9175 * when the reset command is given to the chip.
9177 * How do these hardware designers expect things to work
9178 * properly if the PCI write is posted for a long period
9179 * of time? It is always necessary to have some method by
9180 * which a register read back can occur to push the write
9181 * out which does the reset.
9183 * For most tg3 variants the trick below was working.
9188 /* Flush PCI posted writes. The normal MMIO registers
9189 * are inaccessible at this time so this is the only
9190 * way to make this reliably (actually, this is no longer
9191 * the case, see above). I tried to use indirect
9192 * register read/write but this upset some 5701 variants.
9194 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9198 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9201 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9205 /* Wait for link training to complete. */
9206 for (j
= 0; j
< 5000; j
++)
9209 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9210 pci_write_config_dword(tp
->pdev
, 0xc4,
9211 cfg_val
| (1 << 15));
9214 /* Clear the "no snoop" and "relaxed ordering" bits. */
9215 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9217 * Older PCIe devices only support the 128 byte
9218 * MPS setting. Enforce the restriction.
9220 if (!tg3_flag(tp
, CPMU_PRESENT
))
9221 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9222 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9224 /* Clear error status */
9225 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9226 PCI_EXP_DEVSTA_CED
|
9227 PCI_EXP_DEVSTA_NFED
|
9228 PCI_EXP_DEVSTA_FED
|
9229 PCI_EXP_DEVSTA_URD
);
9232 tg3_restore_pci_state(tp
);
9234 tg3_flag_clear(tp
, CHIP_RESETTING
);
9235 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9238 if (tg3_flag(tp
, 5780_CLASS
))
9239 val
= tr32(MEMARB_MODE
);
9240 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9242 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9244 tw32(0x5000, 0x400);
9247 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9249 * BCM4785: In order to avoid repercussions from using
9250 * potentially defective internal ROM, stop the Rx RISC CPU,
9251 * which is not required.
9254 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9257 err
= tg3_poll_fw(tp
);
9261 tw32(GRC_MODE
, tp
->grc_mode
);
9263 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9266 tw32(0xc4, val
| (1 << 15));
9269 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9270 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9271 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9272 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9273 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9274 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9277 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9278 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9280 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9281 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9286 tw32_f(MAC_MODE
, val
);
9289 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9293 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9294 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9295 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9296 !tg3_flag(tp
, 57765_PLUS
)) {
9299 tw32(0x7c00, val
| (1 << 25));
9302 tg3_restore_clk(tp
);
9304 /* Increase the core clock speed to fix tx timeout issue for 5762
9305 * with 100Mbps link speed.
9307 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9308 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9309 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9310 TG3_CPMU_MAC_ORIDE_ENABLE
);
9313 /* Reprobe ASF enable state. */
9314 tg3_flag_clear(tp
, ENABLE_ASF
);
9315 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9316 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9318 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9319 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9320 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9323 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9324 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9325 tg3_flag_set(tp
, ENABLE_ASF
);
9326 tp
->last_event_jiffies
= jiffies
;
9327 if (tg3_flag(tp
, 5750_PLUS
))
9328 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9330 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9331 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9332 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9333 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9334 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9341 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9342 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9343 static void __tg3_set_rx_mode(struct net_device
*);
9345 /* tp->lock is held. */
9346 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9352 tg3_write_sig_pre_reset(tp
, kind
);
9354 tg3_abort_hw(tp
, silent
);
9355 err
= tg3_chip_reset(tp
);
9357 __tg3_set_mac_addr(tp
, false);
9359 tg3_write_sig_legacy(tp
, kind
);
9360 tg3_write_sig_post_reset(tp
, kind
);
9363 /* Save the stats across chip resets... */
9364 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9365 tg3_get_estats(tp
, &tp
->estats_prev
);
9367 /* And make sure the next sample is new data */
9368 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9374 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9376 struct tg3
*tp
= netdev_priv(dev
);
9377 struct sockaddr
*addr
= p
;
9379 bool skip_mac_1
= false;
9381 if (!is_valid_ether_addr(addr
->sa_data
))
9382 return -EADDRNOTAVAIL
;
9384 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9386 if (!netif_running(dev
))
9389 if (tg3_flag(tp
, ENABLE_ASF
)) {
9390 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9392 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9393 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9394 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9395 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9397 /* Skip MAC addr 1 if ASF is using it. */
9398 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9399 !(addr1_high
== 0 && addr1_low
== 0))
9402 spin_lock_bh(&tp
->lock
);
9403 __tg3_set_mac_addr(tp
, skip_mac_1
);
9404 __tg3_set_rx_mode(dev
);
9405 spin_unlock_bh(&tp
->lock
);
9410 /* tp->lock is held. */
9411 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9412 dma_addr_t mapping
, u32 maxlen_flags
,
9416 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9417 ((u64
) mapping
>> 32));
9419 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9420 ((u64
) mapping
& 0xffffffff));
9422 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9425 if (!tg3_flag(tp
, 5705_PLUS
))
9427 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9432 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9436 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9437 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9438 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9439 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9441 tw32(HOSTCC_TXCOL_TICKS
, 0);
9442 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9443 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9445 for (; i
< tp
->txq_cnt
; i
++) {
9448 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9449 tw32(reg
, ec
->tx_coalesce_usecs
);
9450 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9451 tw32(reg
, ec
->tx_max_coalesced_frames
);
9452 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9453 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9457 for (; i
< tp
->irq_max
- 1; i
++) {
9458 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9459 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9460 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9464 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9467 u32 limit
= tp
->rxq_cnt
;
9469 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9470 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9471 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9472 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9475 tw32(HOSTCC_RXCOL_TICKS
, 0);
9476 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9477 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9480 for (; i
< limit
; i
++) {
9483 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9484 tw32(reg
, ec
->rx_coalesce_usecs
);
9485 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9486 tw32(reg
, ec
->rx_max_coalesced_frames
);
9487 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9488 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9491 for (; i
< tp
->irq_max
- 1; i
++) {
9492 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9493 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9494 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9498 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9500 tg3_coal_tx_init(tp
, ec
);
9501 tg3_coal_rx_init(tp
, ec
);
9503 if (!tg3_flag(tp
, 5705_PLUS
)) {
9504 u32 val
= ec
->stats_block_coalesce_usecs
;
9506 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9507 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9512 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9516 /* tp->lock is held. */
9517 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9521 /* Disable all transmit rings but the first. */
9522 if (!tg3_flag(tp
, 5705_PLUS
))
9523 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9524 else if (tg3_flag(tp
, 5717_PLUS
))
9525 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9526 else if (tg3_flag(tp
, 57765_CLASS
) ||
9527 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9528 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9530 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9532 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9533 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9534 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9535 BDINFO_FLAGS_DISABLED
);
9538 /* tp->lock is held. */
9539 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9542 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9544 if (tg3_flag(tp
, ENABLE_TSS
))
9547 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9548 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9550 if (!tnapi
->tx_ring
)
9553 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9554 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9555 NIC_SRAM_TX_BUFFER_DESC
);
9559 /* tp->lock is held. */
9560 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9564 /* Disable all receive return rings but the first. */
9565 if (tg3_flag(tp
, 5717_PLUS
))
9566 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9567 else if (!tg3_flag(tp
, 5705_PLUS
))
9568 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9569 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9570 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9571 tg3_flag(tp
, 57765_CLASS
))
9572 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9574 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9576 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9577 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9578 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9579 BDINFO_FLAGS_DISABLED
);
9582 /* tp->lock is held. */
9583 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9586 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9588 if (tg3_flag(tp
, ENABLE_RSS
))
9591 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9592 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9597 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9598 (tp
->rx_ret_ring_mask
+ 1) <<
9599 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9603 /* tp->lock is held. */
9604 static void tg3_rings_reset(struct tg3
*tp
)
9608 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9610 tg3_tx_rcbs_disable(tp
);
9612 tg3_rx_ret_rcbs_disable(tp
);
9614 /* Disable interrupts */
9615 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9616 tp
->napi
[0].chk_msi_cnt
= 0;
9617 tp
->napi
[0].last_rx_cons
= 0;
9618 tp
->napi
[0].last_tx_cons
= 0;
9620 /* Zero mailbox registers. */
9621 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9622 for (i
= 1; i
< tp
->irq_max
; i
++) {
9623 tp
->napi
[i
].tx_prod
= 0;
9624 tp
->napi
[i
].tx_cons
= 0;
9625 if (tg3_flag(tp
, ENABLE_TSS
))
9626 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9627 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9628 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9629 tp
->napi
[i
].chk_msi_cnt
= 0;
9630 tp
->napi
[i
].last_rx_cons
= 0;
9631 tp
->napi
[i
].last_tx_cons
= 0;
9633 if (!tg3_flag(tp
, ENABLE_TSS
))
9634 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9636 tp
->napi
[0].tx_prod
= 0;
9637 tp
->napi
[0].tx_cons
= 0;
9638 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9639 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9642 /* Make sure the NIC-based send BD rings are disabled. */
9643 if (!tg3_flag(tp
, 5705_PLUS
)) {
9644 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9645 for (i
= 0; i
< 16; i
++)
9646 tw32_tx_mbox(mbox
+ i
* 8, 0);
9649 /* Clear status block in ram. */
9650 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9652 /* Set status block DMA address */
9653 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9654 ((u64
) tnapi
->status_mapping
>> 32));
9655 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9656 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9658 stblk
= HOSTCC_STATBLCK_RING1
;
9660 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9661 u64 mapping
= (u64
)tnapi
->status_mapping
;
9662 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9663 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9666 /* Clear status block in ram. */
9667 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9670 tg3_tx_rcbs_init(tp
);
9671 tg3_rx_ret_rcbs_init(tp
);
9674 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9676 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9678 if (!tg3_flag(tp
, 5750_PLUS
) ||
9679 tg3_flag(tp
, 5780_CLASS
) ||
9680 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9681 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9682 tg3_flag(tp
, 57765_PLUS
))
9683 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9684 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9685 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9686 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9688 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9690 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9691 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9693 val
= min(nic_rep_thresh
, host_rep_thresh
);
9694 tw32(RCVBDI_STD_THRESH
, val
);
9696 if (tg3_flag(tp
, 57765_PLUS
))
9697 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9699 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9702 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9704 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9706 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9707 tw32(RCVBDI_JUMBO_THRESH
, val
);
9709 if (tg3_flag(tp
, 57765_PLUS
))
9710 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9713 static inline u32
calc_crc(unsigned char *buf
, int len
)
9721 for (j
= 0; j
< len
; j
++) {
9724 for (k
= 0; k
< 8; k
++) {
9730 reg
^= CRC32_POLY_LE
;
9737 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9739 /* accept or reject all multicast frames */
9740 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9741 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9742 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9743 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9746 static void __tg3_set_rx_mode(struct net_device
*dev
)
9748 struct tg3
*tp
= netdev_priv(dev
);
9751 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9752 RX_MODE_KEEP_VLAN_TAG
);
9754 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9755 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9758 if (!tg3_flag(tp
, ENABLE_ASF
))
9759 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9762 if (dev
->flags
& IFF_PROMISC
) {
9763 /* Promiscuous mode. */
9764 rx_mode
|= RX_MODE_PROMISC
;
9765 } else if (dev
->flags
& IFF_ALLMULTI
) {
9766 /* Accept all multicast. */
9767 tg3_set_multi(tp
, 1);
9768 } else if (netdev_mc_empty(dev
)) {
9769 /* Reject all multicast. */
9770 tg3_set_multi(tp
, 0);
9772 /* Accept one or more multicast(s). */
9773 struct netdev_hw_addr
*ha
;
9774 u32 mc_filter
[4] = { 0, };
9779 netdev_for_each_mc_addr(ha
, dev
) {
9780 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9782 regidx
= (bit
& 0x60) >> 5;
9784 mc_filter
[regidx
] |= (1 << bit
);
9787 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9788 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9789 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9790 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9793 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9794 rx_mode
|= RX_MODE_PROMISC
;
9795 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9796 /* Add all entries into to the mac addr filter list */
9798 struct netdev_hw_addr
*ha
;
9800 netdev_for_each_uc_addr(ha
, dev
) {
9801 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9802 i
+ TG3_UCAST_ADDR_IDX(tp
));
9807 if (rx_mode
!= tp
->rx_mode
) {
9808 tp
->rx_mode
= rx_mode
;
9809 tw32_f(MAC_RX_MODE
, rx_mode
);
9814 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9818 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9819 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9822 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9826 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9829 if (tp
->rxq_cnt
== 1) {
9830 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9834 /* Validate table against current IRQ count */
9835 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9836 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9840 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9841 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9844 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9847 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9849 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9850 u32 val
= tp
->rss_ind_tbl
[i
];
9852 for (; i
% 8; i
++) {
9854 val
|= tp
->rss_ind_tbl
[i
];
9861 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9863 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9864 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9866 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9869 /* tp->lock is held. */
9870 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9872 u32 val
, rdmac_mode
;
9874 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9876 tg3_disable_ints(tp
);
9880 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9882 if (tg3_flag(tp
, INIT_COMPLETE
))
9883 tg3_abort_hw(tp
, 1);
9885 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9886 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9887 tg3_phy_pull_config(tp
);
9888 tg3_eee_pull_config(tp
, NULL
);
9889 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9892 /* Enable MAC control of LPI */
9893 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9899 err
= tg3_chip_reset(tp
);
9903 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9905 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9906 val
= tr32(TG3_CPMU_CTRL
);
9907 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9908 tw32(TG3_CPMU_CTRL
, val
);
9910 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9911 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9912 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9913 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9915 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9916 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9917 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9918 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9920 val
= tr32(TG3_CPMU_HST_ACC
);
9921 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9922 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9923 tw32(TG3_CPMU_HST_ACC
, val
);
9926 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9927 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9928 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9929 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9930 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9932 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9933 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9935 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9937 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9938 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9941 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9942 u32 grc_mode
= tr32(GRC_MODE
);
9944 /* Access the lower 1K of PL PCIE block registers. */
9945 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9946 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9948 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9949 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9950 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9952 tw32(GRC_MODE
, grc_mode
);
9955 if (tg3_flag(tp
, 57765_CLASS
)) {
9956 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9957 u32 grc_mode
= tr32(GRC_MODE
);
9959 /* Access the lower 1K of PL PCIE block registers. */
9960 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9961 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9963 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9964 TG3_PCIE_PL_LO_PHYCTL5
);
9965 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9966 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9968 tw32(GRC_MODE
, grc_mode
);
9971 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9974 /* Fix transmit hangs */
9975 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9976 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9977 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9979 grc_mode
= tr32(GRC_MODE
);
9981 /* Access the lower 1K of DL PCIE block registers. */
9982 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9983 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9985 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9986 TG3_PCIE_DL_LO_FTSMAX
);
9987 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9988 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9989 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9991 tw32(GRC_MODE
, grc_mode
);
9994 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9995 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9996 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9997 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
10000 /* This works around an issue with Athlon chipsets on
10001 * B3 tigon3 silicon. This bit has no effect on any
10002 * other revision. But do not set this on PCI Express
10003 * chips and don't even touch the clocks if the CPMU is present.
10005 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
10006 if (!tg3_flag(tp
, PCI_EXPRESS
))
10007 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
10008 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
10011 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
10012 tg3_flag(tp
, PCIX_MODE
)) {
10013 val
= tr32(TG3PCI_PCISTATE
);
10014 val
|= PCISTATE_RETRY_SAME_DMA
;
10015 tw32(TG3PCI_PCISTATE
, val
);
10018 if (tg3_flag(tp
, ENABLE_APE
)) {
10019 /* Allow reads and writes to the
10020 * APE register and memory space.
10022 val
= tr32(TG3PCI_PCISTATE
);
10023 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
10024 PCISTATE_ALLOW_APE_SHMEM_WR
|
10025 PCISTATE_ALLOW_APE_PSPACE_WR
;
10026 tw32(TG3PCI_PCISTATE
, val
);
10029 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
10030 /* Enable some hw fixes. */
10031 val
= tr32(TG3PCI_MSI_DATA
);
10032 val
|= (1 << 26) | (1 << 28) | (1 << 29);
10033 tw32(TG3PCI_MSI_DATA
, val
);
10036 /* Descriptor ring init may make accesses to the
10037 * NIC SRAM area to setup the TX descriptors, so we
10038 * can only do this after the hardware has been
10039 * successfully reset.
10041 err
= tg3_init_rings(tp
);
10045 if (tg3_flag(tp
, 57765_PLUS
)) {
10046 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10047 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10048 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10049 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10050 if (!tg3_flag(tp
, 57765_CLASS
) &&
10051 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10052 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10053 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10054 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10055 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10056 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10057 /* This value is determined during the probe time DMA
10058 * engine test, tg3_test_dma.
10060 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10063 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10064 GRC_MODE_4X_NIC_SEND_RINGS
|
10065 GRC_MODE_NO_TX_PHDR_CSUM
|
10066 GRC_MODE_NO_RX_PHDR_CSUM
);
10067 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10069 /* Pseudo-header checksum is done by hardware logic and not
10070 * the offload processers, so make the chip do the pseudo-
10071 * header checksums on receive. For transmit it is more
10072 * convenient to do the pseudo-header checksum in software
10073 * as Linux does that on transmit for us in all cases.
10075 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10077 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10079 tw32(TG3_RX_PTP_CTL
,
10080 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10082 if (tg3_flag(tp
, PTP_CAPABLE
))
10083 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10085 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10087 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10088 * south bridge limitation. As a workaround, Driver is setting MRRS
10089 * to 2048 instead of default 4096.
10091 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10092 tp
->pdev
->subsystem_device
== TG3PCI_SUBDEVICE_ID_DELL_5762
) {
10093 val
= tr32(TG3PCI_DEV_STATUS_CTRL
) & ~MAX_READ_REQ_MASK
;
10094 tw32(TG3PCI_DEV_STATUS_CTRL
, val
| MAX_READ_REQ_SIZE_2048
);
10097 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10098 val
= tr32(GRC_MISC_CFG
);
10100 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10101 tw32(GRC_MISC_CFG
, val
);
10103 /* Initialize MBUF/DESC pool. */
10104 if (tg3_flag(tp
, 5750_PLUS
)) {
10106 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10107 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10108 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10109 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10111 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10112 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10113 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10114 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10117 fw_len
= tp
->fw_len
;
10118 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10119 tw32(BUFMGR_MB_POOL_ADDR
,
10120 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10121 tw32(BUFMGR_MB_POOL_SIZE
,
10122 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10125 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10126 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10127 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10128 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10129 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10130 tw32(BUFMGR_MB_HIGH_WATER
,
10131 tp
->bufmgr_config
.mbuf_high_water
);
10133 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10134 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10135 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10136 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10137 tw32(BUFMGR_MB_HIGH_WATER
,
10138 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10140 tw32(BUFMGR_DMA_LOW_WATER
,
10141 tp
->bufmgr_config
.dma_low_water
);
10142 tw32(BUFMGR_DMA_HIGH_WATER
,
10143 tp
->bufmgr_config
.dma_high_water
);
10145 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10146 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10147 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10148 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10149 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10150 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10151 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10152 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10153 tw32(BUFMGR_MODE
, val
);
10154 for (i
= 0; i
< 2000; i
++) {
10155 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10160 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10164 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10165 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10167 tg3_setup_rxbd_thresholds(tp
);
10169 /* Initialize TG3_BDINFO's at:
10170 * RCVDBDI_STD_BD: standard eth size rx ring
10171 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10172 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10175 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10176 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10177 * ring attribute flags
10178 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10180 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10181 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10183 * The size of each ring is fixed in the firmware, but the location is
10186 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10187 ((u64
) tpr
->rx_std_mapping
>> 32));
10188 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10189 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10190 if (!tg3_flag(tp
, 5717_PLUS
))
10191 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10192 NIC_SRAM_RX_BUFFER_DESC
);
10194 /* Disable the mini ring */
10195 if (!tg3_flag(tp
, 5705_PLUS
))
10196 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10197 BDINFO_FLAGS_DISABLED
);
10199 /* Program the jumbo buffer descriptor ring control
10200 * blocks on those devices that have them.
10202 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10203 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10205 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10206 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10207 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10208 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10209 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10210 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10211 BDINFO_FLAGS_MAXLEN_SHIFT
;
10212 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10213 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10214 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10215 tg3_flag(tp
, 57765_CLASS
) ||
10216 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10217 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10218 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10220 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10221 BDINFO_FLAGS_DISABLED
);
10224 if (tg3_flag(tp
, 57765_PLUS
)) {
10225 val
= TG3_RX_STD_RING_SIZE(tp
);
10226 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10227 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10229 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10231 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10233 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10235 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10236 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10238 tpr
->rx_jmb_prod_idx
=
10239 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10240 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10242 tg3_rings_reset(tp
);
10244 /* Initialize MAC address and backoff seed. */
10245 __tg3_set_mac_addr(tp
, false);
10247 /* MTU + ethernet header + FCS + optional VLAN tag */
10248 tw32(MAC_RX_MTU_SIZE
,
10249 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10251 /* The slot time is changed by tg3_setup_phy if we
10252 * run at gigabit with half duplex.
10254 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10255 (6 << TX_LENGTHS_IPG_SHIFT
) |
10256 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10258 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10259 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10260 val
|= tr32(MAC_TX_LENGTHS
) &
10261 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10262 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10264 tw32(MAC_TX_LENGTHS
, val
);
10266 /* Receive rules. */
10267 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10268 tw32(RCVLPC_CONFIG
, 0x0181);
10270 /* Calculate RDMAC_MODE setting early, we need it to determine
10271 * the RCVLPC_STATE_ENABLE mask.
10273 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10274 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10275 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10276 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10277 RDMAC_MODE_LNGREAD_ENAB
);
10279 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10280 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10282 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10283 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10284 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10285 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10286 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10287 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10289 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10290 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10291 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10292 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10293 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10294 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10295 !tg3_flag(tp
, IS_5788
)) {
10296 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10300 if (tg3_flag(tp
, PCI_EXPRESS
))
10301 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10303 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10305 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10306 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10307 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10311 if (tg3_flag(tp
, HW_TSO_1
) ||
10312 tg3_flag(tp
, HW_TSO_2
) ||
10313 tg3_flag(tp
, HW_TSO_3
))
10314 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10316 if (tg3_flag(tp
, 57765_PLUS
) ||
10317 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10318 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10319 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10321 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10322 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10323 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10325 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10326 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10327 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10328 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10329 tg3_flag(tp
, 57765_PLUS
)) {
10332 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10333 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10335 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10337 val
= tr32(tgtreg
);
10338 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10339 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10340 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10341 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10342 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10343 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10344 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10345 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10347 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10350 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10351 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10352 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10355 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10356 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10358 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10360 val
= tr32(tgtreg
);
10362 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10363 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10366 /* Receive/send statistics. */
10367 if (tg3_flag(tp
, 5750_PLUS
)) {
10368 val
= tr32(RCVLPC_STATS_ENABLE
);
10369 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10370 tw32(RCVLPC_STATS_ENABLE
, val
);
10371 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10372 tg3_flag(tp
, TSO_CAPABLE
)) {
10373 val
= tr32(RCVLPC_STATS_ENABLE
);
10374 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10375 tw32(RCVLPC_STATS_ENABLE
, val
);
10377 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10379 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10380 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10381 tw32(SNDDATAI_STATSCTRL
,
10382 (SNDDATAI_SCTRL_ENABLE
|
10383 SNDDATAI_SCTRL_FASTUPD
));
10385 /* Setup host coalescing engine. */
10386 tw32(HOSTCC_MODE
, 0);
10387 for (i
= 0; i
< 2000; i
++) {
10388 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10393 __tg3_set_coalesce(tp
, &tp
->coal
);
10395 if (!tg3_flag(tp
, 5705_PLUS
)) {
10396 /* Status/statistics block address. See tg3_timer,
10397 * the tg3_periodic_fetch_stats call there, and
10398 * tg3_get_stats to see how this works for 5705/5750 chips.
10400 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10401 ((u64
) tp
->stats_mapping
>> 32));
10402 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10403 ((u64
) tp
->stats_mapping
& 0xffffffff));
10404 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10406 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10408 /* Clear statistics and status block memory areas */
10409 for (i
= NIC_SRAM_STATS_BLK
;
10410 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10411 i
+= sizeof(u32
)) {
10412 tg3_write_mem(tp
, i
, 0);
10417 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10419 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10420 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10421 if (!tg3_flag(tp
, 5705_PLUS
))
10422 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10424 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10425 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10426 /* reset to prevent losing 1st rx packet intermittently */
10427 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10431 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10432 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10433 MAC_MODE_FHDE_ENABLE
;
10434 if (tg3_flag(tp
, ENABLE_APE
))
10435 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10436 if (!tg3_flag(tp
, 5705_PLUS
) &&
10437 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10438 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10439 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10440 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10443 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10444 * If TG3_FLAG_IS_NIC is zero, we should read the
10445 * register to preserve the GPIO settings for LOMs. The GPIOs,
10446 * whether used as inputs or outputs, are set by boot code after
10449 if (!tg3_flag(tp
, IS_NIC
)) {
10452 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10453 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10454 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10456 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10457 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10458 GRC_LCLCTRL_GPIO_OUTPUT3
;
10460 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10461 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10463 tp
->grc_local_ctrl
&= ~gpio_mask
;
10464 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10466 /* GPIO1 must be driven high for eeprom write protect */
10467 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10468 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10469 GRC_LCLCTRL_GPIO_OUTPUT1
);
10471 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10474 if (tg3_flag(tp
, USING_MSIX
)) {
10475 val
= tr32(MSGINT_MODE
);
10476 val
|= MSGINT_MODE_ENABLE
;
10477 if (tp
->irq_cnt
> 1)
10478 val
|= MSGINT_MODE_MULTIVEC_EN
;
10479 if (!tg3_flag(tp
, 1SHOT_MSI
))
10480 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10481 tw32(MSGINT_MODE
, val
);
10484 if (!tg3_flag(tp
, 5705_PLUS
)) {
10485 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10489 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10490 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10491 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10492 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10493 WDMAC_MODE_LNGREAD_ENAB
);
10495 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10496 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10497 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10498 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10499 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10501 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10502 !tg3_flag(tp
, IS_5788
)) {
10503 val
|= WDMAC_MODE_RX_ACCEL
;
10507 /* Enable host coalescing bug fix */
10508 if (tg3_flag(tp
, 5755_PLUS
))
10509 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10511 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10512 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10514 tw32_f(WDMAC_MODE
, val
);
10517 if (tg3_flag(tp
, PCIX_MODE
)) {
10520 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10522 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10523 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10524 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10525 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10526 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10527 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10529 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10533 tw32_f(RDMAC_MODE
, rdmac_mode
);
10536 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10537 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10538 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10539 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10542 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10543 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10544 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10545 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10546 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10550 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10551 if (!tg3_flag(tp
, 5705_PLUS
))
10552 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10554 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10555 tw32(SNDDATAC_MODE
,
10556 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10558 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10560 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10561 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10562 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10563 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10564 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10565 tw32(RCVDBDI_MODE
, val
);
10566 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10567 if (tg3_flag(tp
, HW_TSO_1
) ||
10568 tg3_flag(tp
, HW_TSO_2
) ||
10569 tg3_flag(tp
, HW_TSO_3
))
10570 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10571 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10572 if (tg3_flag(tp
, ENABLE_TSS
))
10573 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10574 tw32(SNDBDI_MODE
, val
);
10575 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10577 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10578 err
= tg3_load_5701_a0_firmware_fix(tp
);
10583 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10584 /* Ignore any errors for the firmware download. If download
10585 * fails, the device will operate with EEE disabled
10587 tg3_load_57766_firmware(tp
);
10590 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10591 err
= tg3_load_tso_firmware(tp
);
10596 tp
->tx_mode
= TX_MODE_ENABLE
;
10598 if (tg3_flag(tp
, 5755_PLUS
) ||
10599 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10600 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10602 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10603 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10604 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10605 tp
->tx_mode
&= ~val
;
10606 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10609 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10612 if (tg3_flag(tp
, ENABLE_RSS
)) {
10615 tg3_rss_write_indir_tbl(tp
);
10617 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10619 for (i
= 0; i
< 10 ; i
++)
10620 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10623 tp
->rx_mode
= RX_MODE_ENABLE
;
10624 if (tg3_flag(tp
, 5755_PLUS
))
10625 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10627 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10628 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10630 if (tg3_flag(tp
, ENABLE_RSS
))
10631 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10632 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10633 RX_MODE_RSS_IPV6_HASH_EN
|
10634 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10635 RX_MODE_RSS_IPV4_HASH_EN
|
10636 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10638 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10641 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10643 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10644 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10645 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10648 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10651 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10652 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10653 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10654 /* Set drive transmission level to 1.2V */
10655 /* only if the signal pre-emphasis bit is not set */
10656 val
= tr32(MAC_SERDES_CFG
);
10659 tw32(MAC_SERDES_CFG
, val
);
10661 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10662 tw32(MAC_SERDES_CFG
, 0x616000);
10665 /* Prevent chip from dropping frames when flow control
10668 if (tg3_flag(tp
, 57765_CLASS
))
10672 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10674 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10675 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10676 /* Use hardware link auto-negotiation */
10677 tg3_flag_set(tp
, HW_AUTONEG
);
10680 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10681 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10684 tmp
= tr32(SERDES_RX_CTRL
);
10685 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10686 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10687 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10688 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10691 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10692 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10693 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10695 err
= tg3_setup_phy(tp
, false);
10699 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10700 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10703 /* Clear CRC stats. */
10704 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10705 tg3_writephy(tp
, MII_TG3_TEST1
,
10706 tmp
| MII_TG3_TEST1_CRC_EN
);
10707 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10712 __tg3_set_rx_mode(tp
->dev
);
10714 /* Initialize receive rules. */
10715 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10716 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10717 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10718 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10720 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10724 if (tg3_flag(tp
, ENABLE_ASF
))
10728 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10731 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10734 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10737 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10740 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10743 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10746 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10749 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10752 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10755 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10758 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10761 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10764 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10766 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10774 if (tg3_flag(tp
, ENABLE_APE
))
10775 /* Write our heartbeat update interval to APE. */
10776 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10777 APE_HOST_HEARTBEAT_INT_5SEC
);
10779 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10784 /* Called at device open time to get the chip ready for
10785 * packet processing. Invoked with tp->lock held.
10787 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10789 /* Chip may have been just powered on. If so, the boot code may still
10790 * be running initialization. Wait for it to finish to avoid races in
10791 * accessing the hardware.
10793 tg3_enable_register_access(tp
);
10796 tg3_switch_clocks(tp
);
10798 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10800 return tg3_reset_hw(tp
, reset_phy
);
10803 #ifdef CONFIG_TIGON3_HWMON
10804 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10808 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10809 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10811 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10814 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10815 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10816 memset(ocir
, 0, TG3_OCIR_LEN
);
10820 /* sysfs attributes for hwmon */
10821 static ssize_t
tg3_show_temp(struct device
*dev
,
10822 struct device_attribute
*devattr
, char *buf
)
10824 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10825 struct tg3
*tp
= dev_get_drvdata(dev
);
10828 spin_lock_bh(&tp
->lock
);
10829 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10830 sizeof(temperature
));
10831 spin_unlock_bh(&tp
->lock
);
10832 return sprintf(buf
, "%u\n", temperature
* 1000);
10836 static SENSOR_DEVICE_ATTR(temp1_input
, 0444, tg3_show_temp
, NULL
,
10837 TG3_TEMP_SENSOR_OFFSET
);
10838 static SENSOR_DEVICE_ATTR(temp1_crit
, 0444, tg3_show_temp
, NULL
,
10839 TG3_TEMP_CAUTION_OFFSET
);
10840 static SENSOR_DEVICE_ATTR(temp1_max
, 0444, tg3_show_temp
, NULL
,
10841 TG3_TEMP_MAX_OFFSET
);
10843 static struct attribute
*tg3_attrs
[] = {
10844 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10845 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10846 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10849 ATTRIBUTE_GROUPS(tg3
);
10851 static void tg3_hwmon_close(struct tg3
*tp
)
10853 if (tp
->hwmon_dev
) {
10854 hwmon_device_unregister(tp
->hwmon_dev
);
10855 tp
->hwmon_dev
= NULL
;
10859 static void tg3_hwmon_open(struct tg3
*tp
)
10863 struct pci_dev
*pdev
= tp
->pdev
;
10864 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10866 tg3_sd_scan_scratchpad(tp
, ocirs
);
10868 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10869 if (!ocirs
[i
].src_data_length
)
10872 size
+= ocirs
[i
].src_hdr_length
;
10873 size
+= ocirs
[i
].src_data_length
;
10879 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10881 if (IS_ERR(tp
->hwmon_dev
)) {
10882 tp
->hwmon_dev
= NULL
;
10883 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10887 static inline void tg3_hwmon_close(struct tg3
*tp
) { }
10888 static inline void tg3_hwmon_open(struct tg3
*tp
) { }
10889 #endif /* CONFIG_TIGON3_HWMON */
10892 #define TG3_STAT_ADD32(PSTAT, REG) \
10893 do { u32 __val = tr32(REG); \
10894 (PSTAT)->low += __val; \
10895 if ((PSTAT)->low < __val) \
10896 (PSTAT)->high += 1; \
10899 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10901 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10906 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10907 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10908 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10909 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10910 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10911 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10912 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10913 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10914 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10915 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10916 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10917 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10918 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10919 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10920 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10921 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10924 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10925 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10926 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10927 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10930 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10931 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10932 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10933 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10934 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10935 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10936 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10937 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10938 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10939 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10940 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10941 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10942 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10943 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10945 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10946 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10947 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10948 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10949 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10950 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10952 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10953 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10955 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10956 sp
->rx_discards
.low
+= val
;
10957 if (sp
->rx_discards
.low
< val
)
10958 sp
->rx_discards
.high
+= 1;
10960 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10962 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10965 static void tg3_chk_missed_msi(struct tg3
*tp
)
10969 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10970 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10972 if (tg3_has_work(tnapi
)) {
10973 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10974 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10975 if (tnapi
->chk_msi_cnt
< 1) {
10976 tnapi
->chk_msi_cnt
++;
10982 tnapi
->chk_msi_cnt
= 0;
10983 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10984 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10988 static void tg3_timer(struct timer_list
*t
)
10990 struct tg3
*tp
= from_timer(tp
, t
, timer
);
10992 spin_lock(&tp
->lock
);
10994 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10995 spin_unlock(&tp
->lock
);
10996 goto restart_timer
;
10999 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
11000 tg3_flag(tp
, 57765_CLASS
))
11001 tg3_chk_missed_msi(tp
);
11003 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
11004 /* BCM4785: Flush posted writes from GbE to host memory. */
11008 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
11009 /* All of this garbage is because when using non-tagged
11010 * IRQ status the mailbox/status_block protocol the chip
11011 * uses with the cpu is race prone.
11013 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
11014 tw32(GRC_LOCAL_CTRL
,
11015 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
11017 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
11018 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
11021 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11022 spin_unlock(&tp
->lock
);
11023 tg3_reset_task_schedule(tp
);
11024 goto restart_timer
;
11028 /* This part only runs once per second. */
11029 if (!--tp
->timer_counter
) {
11030 if (tg3_flag(tp
, 5705_PLUS
))
11031 tg3_periodic_fetch_stats(tp
);
11033 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
11034 tg3_phy_eee_enable(tp
);
11036 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
11040 mac_stat
= tr32(MAC_STATUS
);
11043 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
11044 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
11046 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
11050 tg3_setup_phy(tp
, false);
11051 } else if (tg3_flag(tp
, POLL_SERDES
)) {
11052 u32 mac_stat
= tr32(MAC_STATUS
);
11053 int need_setup
= 0;
11056 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
11059 if (!tp
->link_up
&&
11060 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11061 MAC_STATUS_SIGNAL_DET
))) {
11065 if (!tp
->serdes_counter
) {
11068 ~MAC_MODE_PORT_MODE_MASK
));
11070 tw32_f(MAC_MODE
, tp
->mac_mode
);
11073 tg3_setup_phy(tp
, false);
11075 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11076 tg3_flag(tp
, 5780_CLASS
)) {
11077 tg3_serdes_parallel_detect(tp
);
11078 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11079 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11080 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11081 TG3_CPMU_STATUS_LINK_MASK
);
11083 if (link_up
!= tp
->link_up
)
11084 tg3_setup_phy(tp
, false);
11087 tp
->timer_counter
= tp
->timer_multiplier
;
11090 /* Heartbeat is only sent once every 2 seconds.
11092 * The heartbeat is to tell the ASF firmware that the host
11093 * driver is still alive. In the event that the OS crashes,
11094 * ASF needs to reset the hardware to free up the FIFO space
11095 * that may be filled with rx packets destined for the host.
11096 * If the FIFO is full, ASF will no longer function properly.
11098 * Unintended resets have been reported on real time kernels
11099 * where the timer doesn't run on time. Netpoll will also have
11102 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11103 * to check the ring condition when the heartbeat is expiring
11104 * before doing the reset. This will prevent most unintended
11107 if (!--tp
->asf_counter
) {
11108 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11109 tg3_wait_for_event_ack(tp
);
11111 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11112 FWCMD_NICDRV_ALIVE3
);
11113 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11114 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11115 TG3_FW_UPDATE_TIMEOUT_SEC
);
11117 tg3_generate_fw_event(tp
);
11119 tp
->asf_counter
= tp
->asf_multiplier
;
11122 /* Update the APE heartbeat every 5 seconds.*/
11123 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
);
11125 spin_unlock(&tp
->lock
);
11128 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11129 add_timer(&tp
->timer
);
11132 static void tg3_timer_init(struct tg3
*tp
)
11134 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11135 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11136 !tg3_flag(tp
, 57765_CLASS
))
11137 tp
->timer_offset
= HZ
;
11139 tp
->timer_offset
= HZ
/ 10;
11141 BUG_ON(tp
->timer_offset
> HZ
);
11143 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11144 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11145 TG3_FW_UPDATE_FREQ_SEC
;
11147 timer_setup(&tp
->timer
, tg3_timer
, 0);
11150 static void tg3_timer_start(struct tg3
*tp
)
11152 tp
->asf_counter
= tp
->asf_multiplier
;
11153 tp
->timer_counter
= tp
->timer_multiplier
;
11155 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11156 add_timer(&tp
->timer
);
11159 static void tg3_timer_stop(struct tg3
*tp
)
11161 del_timer_sync(&tp
->timer
);
11164 /* Restart hardware after configuration changes, self-test, etc.
11165 * Invoked with tp->lock held.
11167 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11168 __releases(tp
->lock
)
11169 __acquires(tp
->lock
)
11173 err
= tg3_init_hw(tp
, reset_phy
);
11175 netdev_err(tp
->dev
,
11176 "Failed to re-initialize device, aborting\n");
11177 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11178 tg3_full_unlock(tp
);
11179 tg3_timer_stop(tp
);
11181 tg3_napi_enable(tp
);
11182 dev_close(tp
->dev
);
11183 tg3_full_lock(tp
, 0);
11188 static void tg3_reset_task(struct work_struct
*work
)
11190 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11194 tg3_full_lock(tp
, 0);
11196 if (!netif_running(tp
->dev
)) {
11197 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11198 tg3_full_unlock(tp
);
11203 tg3_full_unlock(tp
);
11207 tg3_netif_stop(tp
);
11209 tg3_full_lock(tp
, 1);
11211 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11212 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11213 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11214 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11215 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11218 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11219 err
= tg3_init_hw(tp
, true);
11223 tg3_netif_start(tp
);
11226 tg3_full_unlock(tp
);
11231 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11235 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11238 unsigned long flags
;
11240 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11242 if (tp
->irq_cnt
== 1)
11243 name
= tp
->dev
->name
;
11245 name
= &tnapi
->irq_lbl
[0];
11246 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11247 snprintf(name
, IFNAMSIZ
,
11248 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11249 else if (tnapi
->tx_buffers
)
11250 snprintf(name
, IFNAMSIZ
,
11251 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11252 else if (tnapi
->rx_rcb
)
11253 snprintf(name
, IFNAMSIZ
,
11254 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11256 snprintf(name
, IFNAMSIZ
,
11257 "%s-%d", tp
->dev
->name
, irq_num
);
11258 name
[IFNAMSIZ
-1] = 0;
11261 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11263 if (tg3_flag(tp
, 1SHOT_MSI
))
11264 fn
= tg3_msi_1shot
;
11267 fn
= tg3_interrupt
;
11268 if (tg3_flag(tp
, TAGGED_STATUS
))
11269 fn
= tg3_interrupt_tagged
;
11270 flags
= IRQF_SHARED
;
11273 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11276 static int tg3_test_interrupt(struct tg3
*tp
)
11278 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11279 struct net_device
*dev
= tp
->dev
;
11280 int err
, i
, intr_ok
= 0;
11283 if (!netif_running(dev
))
11286 tg3_disable_ints(tp
);
11288 free_irq(tnapi
->irq_vec
, tnapi
);
11291 * Turn off MSI one shot mode. Otherwise this test has no
11292 * observable way to know whether the interrupt was delivered.
11294 if (tg3_flag(tp
, 57765_PLUS
)) {
11295 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11296 tw32(MSGINT_MODE
, val
);
11299 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11300 IRQF_SHARED
, dev
->name
, tnapi
);
11304 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11305 tg3_enable_ints(tp
);
11307 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11310 for (i
= 0; i
< 5; i
++) {
11311 u32 int_mbox
, misc_host_ctrl
;
11313 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11314 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11316 if ((int_mbox
!= 0) ||
11317 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11322 if (tg3_flag(tp
, 57765_PLUS
) &&
11323 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11324 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11329 tg3_disable_ints(tp
);
11331 free_irq(tnapi
->irq_vec
, tnapi
);
11333 err
= tg3_request_irq(tp
, 0);
11339 /* Reenable MSI one shot mode. */
11340 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11341 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11342 tw32(MSGINT_MODE
, val
);
11350 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11351 * successfully restored
11353 static int tg3_test_msi(struct tg3
*tp
)
11358 if (!tg3_flag(tp
, USING_MSI
))
11361 /* Turn off SERR reporting in case MSI terminates with Master
11364 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11365 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11366 pci_cmd
& ~PCI_COMMAND_SERR
);
11368 err
= tg3_test_interrupt(tp
);
11370 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11375 /* other failures */
11379 /* MSI test failed, go back to INTx mode */
11380 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11381 "to INTx mode. Please report this failure to the PCI "
11382 "maintainer and include system chipset information\n");
11384 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11386 pci_disable_msi(tp
->pdev
);
11388 tg3_flag_clear(tp
, USING_MSI
);
11389 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11391 err
= tg3_request_irq(tp
, 0);
11395 /* Need to reset the chip because the MSI cycle may have terminated
11396 * with Master Abort.
11398 tg3_full_lock(tp
, 1);
11400 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11401 err
= tg3_init_hw(tp
, true);
11403 tg3_full_unlock(tp
);
11406 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11411 static int tg3_request_firmware(struct tg3
*tp
)
11413 const struct tg3_firmware_hdr
*fw_hdr
;
11415 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11416 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11421 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11423 /* Firmware blob starts with version numbers, followed by
11424 * start address and _full_ length including BSS sections
11425 * (which must be longer than the actual data, of course
11428 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11429 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11430 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11431 tp
->fw_len
, tp
->fw_needed
);
11432 release_firmware(tp
->fw
);
11437 /* We no longer need firmware; we have it. */
11438 tp
->fw_needed
= NULL
;
11442 static u32
tg3_irq_count(struct tg3
*tp
)
11444 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11447 /* We want as many rx rings enabled as there are cpus.
11448 * In multiqueue MSI-X mode, the first MSI-X vector
11449 * only deals with link interrupts, etc, so we add
11450 * one to the number of vectors we are requesting.
11452 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11458 static bool tg3_enable_msix(struct tg3
*tp
)
11461 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11463 tp
->txq_cnt
= tp
->txq_req
;
11464 tp
->rxq_cnt
= tp
->rxq_req
;
11466 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11467 if (tp
->rxq_cnt
> tp
->rxq_max
)
11468 tp
->rxq_cnt
= tp
->rxq_max
;
11470 /* Disable multiple TX rings by default. Simple round-robin hardware
11471 * scheduling of the TX rings can cause starvation of rings with
11472 * small packets when other rings have TSO or jumbo packets.
11477 tp
->irq_cnt
= tg3_irq_count(tp
);
11479 for (i
= 0; i
< tp
->irq_max
; i
++) {
11480 msix_ent
[i
].entry
= i
;
11481 msix_ent
[i
].vector
= 0;
11484 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11487 } else if (rc
< tp
->irq_cnt
) {
11488 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11491 tp
->rxq_cnt
= max(rc
- 1, 1);
11493 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11496 for (i
= 0; i
< tp
->irq_max
; i
++)
11497 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11499 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11500 pci_disable_msix(tp
->pdev
);
11504 if (tp
->irq_cnt
== 1)
11507 tg3_flag_set(tp
, ENABLE_RSS
);
11509 if (tp
->txq_cnt
> 1)
11510 tg3_flag_set(tp
, ENABLE_TSS
);
11512 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11517 static void tg3_ints_init(struct tg3
*tp
)
11519 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11520 !tg3_flag(tp
, TAGGED_STATUS
)) {
11521 /* All MSI supporting chips should support tagged
11522 * status. Assert that this is the case.
11524 netdev_warn(tp
->dev
,
11525 "MSI without TAGGED_STATUS? Not using MSI\n");
11529 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11530 tg3_flag_set(tp
, USING_MSIX
);
11531 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11532 tg3_flag_set(tp
, USING_MSI
);
11534 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11535 u32 msi_mode
= tr32(MSGINT_MODE
);
11536 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11537 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11538 if (!tg3_flag(tp
, 1SHOT_MSI
))
11539 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11540 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11543 if (!tg3_flag(tp
, USING_MSIX
)) {
11545 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11548 if (tp
->irq_cnt
== 1) {
11551 netif_set_real_num_tx_queues(tp
->dev
, 1);
11552 netif_set_real_num_rx_queues(tp
->dev
, 1);
11556 static void tg3_ints_fini(struct tg3
*tp
)
11558 if (tg3_flag(tp
, USING_MSIX
))
11559 pci_disable_msix(tp
->pdev
);
11560 else if (tg3_flag(tp
, USING_MSI
))
11561 pci_disable_msi(tp
->pdev
);
11562 tg3_flag_clear(tp
, USING_MSI
);
11563 tg3_flag_clear(tp
, USING_MSIX
);
11564 tg3_flag_clear(tp
, ENABLE_RSS
);
11565 tg3_flag_clear(tp
, ENABLE_TSS
);
11568 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11571 struct net_device
*dev
= tp
->dev
;
11575 * Setup interrupts first so we know how
11576 * many NAPI resources to allocate
11580 tg3_rss_check_indir_tbl(tp
);
11582 /* The placement of this call is tied
11583 * to the setup and use of Host TX descriptors.
11585 err
= tg3_alloc_consistent(tp
);
11587 goto out_ints_fini
;
11591 tg3_napi_enable(tp
);
11593 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11594 err
= tg3_request_irq(tp
, i
);
11596 for (i
--; i
>= 0; i
--) {
11597 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11599 free_irq(tnapi
->irq_vec
, tnapi
);
11601 goto out_napi_fini
;
11605 tg3_full_lock(tp
, 0);
11608 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11610 err
= tg3_init_hw(tp
, reset_phy
);
11612 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11613 tg3_free_rings(tp
);
11616 tg3_full_unlock(tp
);
11621 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11622 err
= tg3_test_msi(tp
);
11625 tg3_full_lock(tp
, 0);
11626 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11627 tg3_free_rings(tp
);
11628 tg3_full_unlock(tp
);
11630 goto out_napi_fini
;
11633 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11634 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11636 tw32(PCIE_TRANSACTION_CFG
,
11637 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11643 tg3_hwmon_open(tp
);
11645 tg3_full_lock(tp
, 0);
11647 tg3_timer_start(tp
);
11648 tg3_flag_set(tp
, INIT_COMPLETE
);
11649 tg3_enable_ints(tp
);
11651 tg3_ptp_resume(tp
);
11653 tg3_full_unlock(tp
);
11655 netif_tx_start_all_queues(dev
);
11658 * Reset loopback feature if it was turned on while the device was down
11659 * make sure that it's installed properly now.
11661 if (dev
->features
& NETIF_F_LOOPBACK
)
11662 tg3_set_loopback(dev
, dev
->features
);
11667 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11668 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11669 free_irq(tnapi
->irq_vec
, tnapi
);
11673 tg3_napi_disable(tp
);
11675 tg3_free_consistent(tp
);
11683 static void tg3_stop(struct tg3
*tp
)
11687 tg3_reset_task_cancel(tp
);
11688 tg3_netif_stop(tp
);
11690 tg3_timer_stop(tp
);
11692 tg3_hwmon_close(tp
);
11696 tg3_full_lock(tp
, 1);
11698 tg3_disable_ints(tp
);
11700 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11701 tg3_free_rings(tp
);
11702 tg3_flag_clear(tp
, INIT_COMPLETE
);
11704 tg3_full_unlock(tp
);
11706 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11707 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11708 free_irq(tnapi
->irq_vec
, tnapi
);
11715 tg3_free_consistent(tp
);
11718 static int tg3_open(struct net_device
*dev
)
11720 struct tg3
*tp
= netdev_priv(dev
);
11723 if (tp
->pcierr_recovery
) {
11724 netdev_err(dev
, "Failed to open device. PCI error recovery "
11729 if (tp
->fw_needed
) {
11730 err
= tg3_request_firmware(tp
);
11731 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11733 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11734 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11735 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11736 netdev_warn(tp
->dev
, "EEE capability restored\n");
11737 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11739 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11743 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11744 tg3_flag_clear(tp
, TSO_CAPABLE
);
11745 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11746 netdev_notice(tp
->dev
, "TSO capability restored\n");
11747 tg3_flag_set(tp
, TSO_CAPABLE
);
11751 tg3_carrier_off(tp
);
11753 err
= tg3_power_up(tp
);
11757 tg3_full_lock(tp
, 0);
11759 tg3_disable_ints(tp
);
11760 tg3_flag_clear(tp
, INIT_COMPLETE
);
11762 tg3_full_unlock(tp
);
11764 err
= tg3_start(tp
,
11765 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11768 tg3_frob_aux_power(tp
, false);
11769 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11775 static int tg3_close(struct net_device
*dev
)
11777 struct tg3
*tp
= netdev_priv(dev
);
11779 if (tp
->pcierr_recovery
) {
11780 netdev_err(dev
, "Failed to close device. PCI error recovery "
11787 if (pci_device_is_present(tp
->pdev
)) {
11788 tg3_power_down_prepare(tp
);
11790 tg3_carrier_off(tp
);
11795 static inline u64
get_stat64(tg3_stat64_t
*val
)
11797 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11800 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11802 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11804 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11805 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11806 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11809 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11810 tg3_writephy(tp
, MII_TG3_TEST1
,
11811 val
| MII_TG3_TEST1_CRC_EN
);
11812 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11816 tp
->phy_crc_errors
+= val
;
11818 return tp
->phy_crc_errors
;
11821 return get_stat64(&hw_stats
->rx_fcs_errors
);
11824 #define ESTAT_ADD(member) \
11825 estats->member = old_estats->member + \
11826 get_stat64(&hw_stats->member)
11828 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11830 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11831 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11833 ESTAT_ADD(rx_octets
);
11834 ESTAT_ADD(rx_fragments
);
11835 ESTAT_ADD(rx_ucast_packets
);
11836 ESTAT_ADD(rx_mcast_packets
);
11837 ESTAT_ADD(rx_bcast_packets
);
11838 ESTAT_ADD(rx_fcs_errors
);
11839 ESTAT_ADD(rx_align_errors
);
11840 ESTAT_ADD(rx_xon_pause_rcvd
);
11841 ESTAT_ADD(rx_xoff_pause_rcvd
);
11842 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11843 ESTAT_ADD(rx_xoff_entered
);
11844 ESTAT_ADD(rx_frame_too_long_errors
);
11845 ESTAT_ADD(rx_jabbers
);
11846 ESTAT_ADD(rx_undersize_packets
);
11847 ESTAT_ADD(rx_in_length_errors
);
11848 ESTAT_ADD(rx_out_length_errors
);
11849 ESTAT_ADD(rx_64_or_less_octet_packets
);
11850 ESTAT_ADD(rx_65_to_127_octet_packets
);
11851 ESTAT_ADD(rx_128_to_255_octet_packets
);
11852 ESTAT_ADD(rx_256_to_511_octet_packets
);
11853 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11854 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11855 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11856 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11857 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11858 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11860 ESTAT_ADD(tx_octets
);
11861 ESTAT_ADD(tx_collisions
);
11862 ESTAT_ADD(tx_xon_sent
);
11863 ESTAT_ADD(tx_xoff_sent
);
11864 ESTAT_ADD(tx_flow_control
);
11865 ESTAT_ADD(tx_mac_errors
);
11866 ESTAT_ADD(tx_single_collisions
);
11867 ESTAT_ADD(tx_mult_collisions
);
11868 ESTAT_ADD(tx_deferred
);
11869 ESTAT_ADD(tx_excessive_collisions
);
11870 ESTAT_ADD(tx_late_collisions
);
11871 ESTAT_ADD(tx_collide_2times
);
11872 ESTAT_ADD(tx_collide_3times
);
11873 ESTAT_ADD(tx_collide_4times
);
11874 ESTAT_ADD(tx_collide_5times
);
11875 ESTAT_ADD(tx_collide_6times
);
11876 ESTAT_ADD(tx_collide_7times
);
11877 ESTAT_ADD(tx_collide_8times
);
11878 ESTAT_ADD(tx_collide_9times
);
11879 ESTAT_ADD(tx_collide_10times
);
11880 ESTAT_ADD(tx_collide_11times
);
11881 ESTAT_ADD(tx_collide_12times
);
11882 ESTAT_ADD(tx_collide_13times
);
11883 ESTAT_ADD(tx_collide_14times
);
11884 ESTAT_ADD(tx_collide_15times
);
11885 ESTAT_ADD(tx_ucast_packets
);
11886 ESTAT_ADD(tx_mcast_packets
);
11887 ESTAT_ADD(tx_bcast_packets
);
11888 ESTAT_ADD(tx_carrier_sense_errors
);
11889 ESTAT_ADD(tx_discards
);
11890 ESTAT_ADD(tx_errors
);
11892 ESTAT_ADD(dma_writeq_full
);
11893 ESTAT_ADD(dma_write_prioq_full
);
11894 ESTAT_ADD(rxbds_empty
);
11895 ESTAT_ADD(rx_discards
);
11896 ESTAT_ADD(rx_errors
);
11897 ESTAT_ADD(rx_threshold_hit
);
11899 ESTAT_ADD(dma_readq_full
);
11900 ESTAT_ADD(dma_read_prioq_full
);
11901 ESTAT_ADD(tx_comp_queue_full
);
11903 ESTAT_ADD(ring_set_send_prod_index
);
11904 ESTAT_ADD(ring_status_update
);
11905 ESTAT_ADD(nic_irqs
);
11906 ESTAT_ADD(nic_avoided_irqs
);
11907 ESTAT_ADD(nic_tx_threshold_hit
);
11909 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11912 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11914 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11915 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11917 stats
->rx_packets
= old_stats
->rx_packets
+
11918 get_stat64(&hw_stats
->rx_ucast_packets
) +
11919 get_stat64(&hw_stats
->rx_mcast_packets
) +
11920 get_stat64(&hw_stats
->rx_bcast_packets
);
11922 stats
->tx_packets
= old_stats
->tx_packets
+
11923 get_stat64(&hw_stats
->tx_ucast_packets
) +
11924 get_stat64(&hw_stats
->tx_mcast_packets
) +
11925 get_stat64(&hw_stats
->tx_bcast_packets
);
11927 stats
->rx_bytes
= old_stats
->rx_bytes
+
11928 get_stat64(&hw_stats
->rx_octets
);
11929 stats
->tx_bytes
= old_stats
->tx_bytes
+
11930 get_stat64(&hw_stats
->tx_octets
);
11932 stats
->rx_errors
= old_stats
->rx_errors
+
11933 get_stat64(&hw_stats
->rx_errors
);
11934 stats
->tx_errors
= old_stats
->tx_errors
+
11935 get_stat64(&hw_stats
->tx_errors
) +
11936 get_stat64(&hw_stats
->tx_mac_errors
) +
11937 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11938 get_stat64(&hw_stats
->tx_discards
);
11940 stats
->multicast
= old_stats
->multicast
+
11941 get_stat64(&hw_stats
->rx_mcast_packets
);
11942 stats
->collisions
= old_stats
->collisions
+
11943 get_stat64(&hw_stats
->tx_collisions
);
11945 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11946 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11947 get_stat64(&hw_stats
->rx_undersize_packets
);
11949 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11950 get_stat64(&hw_stats
->rx_align_errors
);
11951 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11952 get_stat64(&hw_stats
->tx_discards
);
11953 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11954 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11956 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11957 tg3_calc_crc_errors(tp
);
11959 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11960 get_stat64(&hw_stats
->rx_discards
);
11962 stats
->rx_dropped
= tp
->rx_dropped
;
11963 stats
->tx_dropped
= tp
->tx_dropped
;
11966 static int tg3_get_regs_len(struct net_device
*dev
)
11968 return TG3_REG_BLK_SIZE
;
11971 static void tg3_get_regs(struct net_device
*dev
,
11972 struct ethtool_regs
*regs
, void *_p
)
11974 struct tg3
*tp
= netdev_priv(dev
);
11978 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11980 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11983 tg3_full_lock(tp
, 0);
11985 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11987 tg3_full_unlock(tp
);
11990 static int tg3_get_eeprom_len(struct net_device
*dev
)
11992 struct tg3
*tp
= netdev_priv(dev
);
11994 return tp
->nvram_size
;
11997 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11999 struct tg3
*tp
= netdev_priv(dev
);
12000 int ret
, cpmu_restore
= 0;
12002 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
12005 if (tg3_flag(tp
, NO_NVRAM
))
12008 offset
= eeprom
->offset
;
12012 eeprom
->magic
= TG3_EEPROM_MAGIC
;
12014 /* Override clock, link aware and link idle modes */
12015 if (tg3_flag(tp
, CPMU_PRESENT
)) {
12016 cpmu_val
= tr32(TG3_CPMU_CTRL
);
12017 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
12018 CPMU_CTRL_LINK_IDLE_MODE
)) {
12019 tw32(TG3_CPMU_CTRL
, cpmu_val
&
12020 ~(CPMU_CTRL_LINK_AWARE_MODE
|
12021 CPMU_CTRL_LINK_IDLE_MODE
));
12025 tg3_override_clk(tp
);
12028 /* adjustments to start on required 4 byte boundary */
12029 b_offset
= offset
& 3;
12030 b_count
= 4 - b_offset
;
12031 if (b_count
> len
) {
12032 /* i.e. offset=1 len=2 */
12035 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
12038 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
12041 eeprom
->len
+= b_count
;
12044 /* read bytes up to the last 4 byte boundary */
12045 pd
= &data
[eeprom
->len
];
12046 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
12047 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
12054 memcpy(pd
+ i
, &val
, 4);
12055 if (need_resched()) {
12056 if (signal_pending(current
)) {
12067 /* read last bytes not ending on 4 byte boundary */
12068 pd
= &data
[eeprom
->len
];
12070 b_offset
= offset
+ len
- b_count
;
12071 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12074 memcpy(pd
, &val
, b_count
);
12075 eeprom
->len
+= b_count
;
12080 /* Restore clock, link aware and link idle modes */
12081 tg3_restore_clk(tp
);
12083 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12088 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12090 struct tg3
*tp
= netdev_priv(dev
);
12092 u32 offset
, len
, b_offset
, odd_len
;
12094 __be32 start
= 0, end
;
12096 if (tg3_flag(tp
, NO_NVRAM
) ||
12097 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12100 offset
= eeprom
->offset
;
12103 if ((b_offset
= (offset
& 3))) {
12104 /* adjustments to start on required 4 byte boundary */
12105 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12116 /* adjustments to end on required 4 byte boundary */
12118 len
= (len
+ 3) & ~3;
12119 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12125 if (b_offset
|| odd_len
) {
12126 buf
= kmalloc(len
, GFP_KERNEL
);
12130 memcpy(buf
, &start
, 4);
12132 memcpy(buf
+len
-4, &end
, 4);
12133 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12136 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12144 static int tg3_get_link_ksettings(struct net_device
*dev
,
12145 struct ethtool_link_ksettings
*cmd
)
12147 struct tg3
*tp
= netdev_priv(dev
);
12148 u32 supported
, advertising
;
12150 if (tg3_flag(tp
, USE_PHYLIB
)) {
12151 struct phy_device
*phydev
;
12152 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12154 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12155 phy_ethtool_ksettings_get(phydev
, cmd
);
12160 supported
= (SUPPORTED_Autoneg
);
12162 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12163 supported
|= (SUPPORTED_1000baseT_Half
|
12164 SUPPORTED_1000baseT_Full
);
12166 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12167 supported
|= (SUPPORTED_100baseT_Half
|
12168 SUPPORTED_100baseT_Full
|
12169 SUPPORTED_10baseT_Half
|
12170 SUPPORTED_10baseT_Full
|
12172 cmd
->base
.port
= PORT_TP
;
12174 supported
|= SUPPORTED_FIBRE
;
12175 cmd
->base
.port
= PORT_FIBRE
;
12177 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12180 advertising
= tp
->link_config
.advertising
;
12181 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12182 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12183 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12184 advertising
|= ADVERTISED_Pause
;
12186 advertising
|= ADVERTISED_Pause
|
12187 ADVERTISED_Asym_Pause
;
12189 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12190 advertising
|= ADVERTISED_Asym_Pause
;
12193 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12196 if (netif_running(dev
) && tp
->link_up
) {
12197 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12198 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12199 ethtool_convert_legacy_u32_to_link_mode(
12200 cmd
->link_modes
.lp_advertising
,
12201 tp
->link_config
.rmt_adv
);
12203 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12204 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12205 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12207 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12210 cmd
->base
.speed
= SPEED_UNKNOWN
;
12211 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12212 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12214 cmd
->base
.phy_address
= tp
->phy_addr
;
12215 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12219 static int tg3_set_link_ksettings(struct net_device
*dev
,
12220 const struct ethtool_link_ksettings
*cmd
)
12222 struct tg3
*tp
= netdev_priv(dev
);
12223 u32 speed
= cmd
->base
.speed
;
12226 if (tg3_flag(tp
, USE_PHYLIB
)) {
12227 struct phy_device
*phydev
;
12228 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12230 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12231 return phy_ethtool_ksettings_set(phydev
, cmd
);
12234 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12235 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12238 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12239 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12240 cmd
->base
.duplex
!= DUPLEX_HALF
)
12243 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12244 cmd
->link_modes
.advertising
);
12246 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12247 u32 mask
= ADVERTISED_Autoneg
|
12249 ADVERTISED_Asym_Pause
;
12251 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12252 mask
|= ADVERTISED_1000baseT_Half
|
12253 ADVERTISED_1000baseT_Full
;
12255 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12256 mask
|= ADVERTISED_100baseT_Half
|
12257 ADVERTISED_100baseT_Full
|
12258 ADVERTISED_10baseT_Half
|
12259 ADVERTISED_10baseT_Full
|
12262 mask
|= ADVERTISED_FIBRE
;
12264 if (advertising
& ~mask
)
12267 mask
&= (ADVERTISED_1000baseT_Half
|
12268 ADVERTISED_1000baseT_Full
|
12269 ADVERTISED_100baseT_Half
|
12270 ADVERTISED_100baseT_Full
|
12271 ADVERTISED_10baseT_Half
|
12272 ADVERTISED_10baseT_Full
);
12274 advertising
&= mask
;
12276 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12277 if (speed
!= SPEED_1000
)
12280 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12283 if (speed
!= SPEED_100
&&
12289 tg3_full_lock(tp
, 0);
12291 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12292 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12293 tp
->link_config
.advertising
= (advertising
|
12294 ADVERTISED_Autoneg
);
12295 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12296 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12298 tp
->link_config
.advertising
= 0;
12299 tp
->link_config
.speed
= speed
;
12300 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12303 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12305 tg3_warn_mgmt_link_flap(tp
);
12307 if (netif_running(dev
))
12308 tg3_setup_phy(tp
, true);
12310 tg3_full_unlock(tp
);
12315 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12317 struct tg3
*tp
= netdev_priv(dev
);
12319 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12320 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12321 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12322 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12325 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12327 struct tg3
*tp
= netdev_priv(dev
);
12329 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12330 wol
->supported
= WAKE_MAGIC
;
12332 wol
->supported
= 0;
12334 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12335 wol
->wolopts
= WAKE_MAGIC
;
12336 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12339 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12341 struct tg3
*tp
= netdev_priv(dev
);
12342 struct device
*dp
= &tp
->pdev
->dev
;
12344 if (wol
->wolopts
& ~WAKE_MAGIC
)
12346 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12347 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12350 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12352 if (device_may_wakeup(dp
))
12353 tg3_flag_set(tp
, WOL_ENABLE
);
12355 tg3_flag_clear(tp
, WOL_ENABLE
);
12360 static u32
tg3_get_msglevel(struct net_device
*dev
)
12362 struct tg3
*tp
= netdev_priv(dev
);
12363 return tp
->msg_enable
;
12366 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12368 struct tg3
*tp
= netdev_priv(dev
);
12369 tp
->msg_enable
= value
;
12372 static int tg3_nway_reset(struct net_device
*dev
)
12374 struct tg3
*tp
= netdev_priv(dev
);
12377 if (!netif_running(dev
))
12380 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12383 tg3_warn_mgmt_link_flap(tp
);
12385 if (tg3_flag(tp
, USE_PHYLIB
)) {
12386 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12388 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12392 spin_lock_bh(&tp
->lock
);
12394 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12395 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12396 ((bmcr
& BMCR_ANENABLE
) ||
12397 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12398 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12402 spin_unlock_bh(&tp
->lock
);
12408 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12410 struct tg3
*tp
= netdev_priv(dev
);
12412 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12413 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12414 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12416 ering
->rx_jumbo_max_pending
= 0;
12418 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12420 ering
->rx_pending
= tp
->rx_pending
;
12421 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12422 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12424 ering
->rx_jumbo_pending
= 0;
12426 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12429 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12431 struct tg3
*tp
= netdev_priv(dev
);
12432 int i
, irq_sync
= 0, err
= 0;
12433 bool reset_phy
= false;
12435 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12436 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12437 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12438 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12439 (tg3_flag(tp
, TSO_BUG
) &&
12440 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12443 if (netif_running(dev
)) {
12445 tg3_netif_stop(tp
);
12449 tg3_full_lock(tp
, irq_sync
);
12451 tp
->rx_pending
= ering
->rx_pending
;
12453 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12454 tp
->rx_pending
> 63)
12455 tp
->rx_pending
= 63;
12457 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12458 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12460 for (i
= 0; i
< tp
->irq_max
; i
++)
12461 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12463 if (netif_running(dev
)) {
12464 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12465 /* Reset PHY to avoid PHY lock up */
12466 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12467 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12468 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12471 err
= tg3_restart_hw(tp
, reset_phy
);
12473 tg3_netif_start(tp
);
12476 tg3_full_unlock(tp
);
12478 if (irq_sync
&& !err
)
12484 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12486 struct tg3
*tp
= netdev_priv(dev
);
12488 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12490 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12491 epause
->rx_pause
= 1;
12493 epause
->rx_pause
= 0;
12495 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12496 epause
->tx_pause
= 1;
12498 epause
->tx_pause
= 0;
12501 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12503 struct tg3
*tp
= netdev_priv(dev
);
12505 bool reset_phy
= false;
12507 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12508 tg3_warn_mgmt_link_flap(tp
);
12510 if (tg3_flag(tp
, USE_PHYLIB
)) {
12511 struct phy_device
*phydev
;
12513 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12515 if (!phy_validate_pause(phydev
, epause
))
12518 tp
->link_config
.flowctrl
= 0;
12519 phy_set_asym_pause(phydev
, epause
->rx_pause
, epause
->tx_pause
);
12520 if (epause
->rx_pause
) {
12521 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12523 if (epause
->tx_pause
) {
12524 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12526 } else if (epause
->tx_pause
) {
12527 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12530 if (epause
->autoneg
)
12531 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12533 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12535 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12536 if (phydev
->autoneg
) {
12537 /* phy_set_asym_pause() will
12538 * renegotiate the link to inform our
12539 * link partner of our flow control
12540 * settings, even if the flow control
12541 * is forced. Let tg3_adjust_link()
12542 * do the final flow control setup.
12547 if (!epause
->autoneg
)
12548 tg3_setup_flow_control(tp
, 0, 0);
12553 if (netif_running(dev
)) {
12554 tg3_netif_stop(tp
);
12558 tg3_full_lock(tp
, irq_sync
);
12560 if (epause
->autoneg
)
12561 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12563 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12564 if (epause
->rx_pause
)
12565 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12567 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12568 if (epause
->tx_pause
)
12569 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12571 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12573 if (netif_running(dev
)) {
12574 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12575 /* Reset PHY to avoid PHY lock up */
12576 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12577 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12578 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12581 err
= tg3_restart_hw(tp
, reset_phy
);
12583 tg3_netif_start(tp
);
12586 tg3_full_unlock(tp
);
12589 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12594 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12598 return TG3_NUM_TEST
;
12600 return TG3_NUM_STATS
;
12602 return -EOPNOTSUPP
;
12606 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12607 u32
*rules __always_unused
)
12609 struct tg3
*tp
= netdev_priv(dev
);
12611 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12612 return -EOPNOTSUPP
;
12614 switch (info
->cmd
) {
12615 case ETHTOOL_GRXRINGS
:
12616 if (netif_running(tp
->dev
))
12617 info
->data
= tp
->rxq_cnt
;
12619 info
->data
= num_online_cpus();
12620 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12621 info
->data
= TG3_RSS_MAX_NUM_QS
;
12627 return -EOPNOTSUPP
;
12631 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12634 struct tg3
*tp
= netdev_priv(dev
);
12636 if (tg3_flag(tp
, SUPPORT_MSIX
))
12637 size
= TG3_RSS_INDIR_TBL_SIZE
;
12642 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12644 struct tg3
*tp
= netdev_priv(dev
);
12648 *hfunc
= ETH_RSS_HASH_TOP
;
12652 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12653 indir
[i
] = tp
->rss_ind_tbl
[i
];
12658 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12661 struct tg3
*tp
= netdev_priv(dev
);
12664 /* We require at least one supported parameter to be changed and no
12665 * change in any of the unsupported parameters
12668 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12669 return -EOPNOTSUPP
;
12674 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12675 tp
->rss_ind_tbl
[i
] = indir
[i
];
12677 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12680 /* It is legal to write the indirection
12681 * table while the device is running.
12683 tg3_full_lock(tp
, 0);
12684 tg3_rss_write_indir_tbl(tp
);
12685 tg3_full_unlock(tp
);
12690 static void tg3_get_channels(struct net_device
*dev
,
12691 struct ethtool_channels
*channel
)
12693 struct tg3
*tp
= netdev_priv(dev
);
12694 u32 deflt_qs
= netif_get_num_default_rss_queues();
12696 channel
->max_rx
= tp
->rxq_max
;
12697 channel
->max_tx
= tp
->txq_max
;
12699 if (netif_running(dev
)) {
12700 channel
->rx_count
= tp
->rxq_cnt
;
12701 channel
->tx_count
= tp
->txq_cnt
;
12704 channel
->rx_count
= tp
->rxq_req
;
12706 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12709 channel
->tx_count
= tp
->txq_req
;
12711 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12715 static int tg3_set_channels(struct net_device
*dev
,
12716 struct ethtool_channels
*channel
)
12718 struct tg3
*tp
= netdev_priv(dev
);
12720 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12721 return -EOPNOTSUPP
;
12723 if (channel
->rx_count
> tp
->rxq_max
||
12724 channel
->tx_count
> tp
->txq_max
)
12727 tp
->rxq_req
= channel
->rx_count
;
12728 tp
->txq_req
= channel
->tx_count
;
12730 if (!netif_running(dev
))
12735 tg3_carrier_off(tp
);
12737 tg3_start(tp
, true, false, false);
12742 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12744 switch (stringset
) {
12746 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12749 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12752 WARN_ON(1); /* we need a WARN() */
12757 static int tg3_set_phys_id(struct net_device
*dev
,
12758 enum ethtool_phys_id_state state
)
12760 struct tg3
*tp
= netdev_priv(dev
);
12763 case ETHTOOL_ID_ACTIVE
:
12764 return 1; /* cycle on/off once per second */
12766 case ETHTOOL_ID_ON
:
12767 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12768 LED_CTRL_1000MBPS_ON
|
12769 LED_CTRL_100MBPS_ON
|
12770 LED_CTRL_10MBPS_ON
|
12771 LED_CTRL_TRAFFIC_OVERRIDE
|
12772 LED_CTRL_TRAFFIC_BLINK
|
12773 LED_CTRL_TRAFFIC_LED
);
12776 case ETHTOOL_ID_OFF
:
12777 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12778 LED_CTRL_TRAFFIC_OVERRIDE
);
12781 case ETHTOOL_ID_INACTIVE
:
12782 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12789 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12790 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12792 struct tg3
*tp
= netdev_priv(dev
);
12795 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12797 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12800 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12804 u32 offset
= 0, len
= 0;
12807 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12810 if (magic
== TG3_EEPROM_MAGIC
) {
12811 for (offset
= TG3_NVM_DIR_START
;
12812 offset
< TG3_NVM_DIR_END
;
12813 offset
+= TG3_NVM_DIRENT_SIZE
) {
12814 if (tg3_nvram_read(tp
, offset
, &val
))
12817 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12818 TG3_NVM_DIRTYPE_EXTVPD
)
12822 if (offset
!= TG3_NVM_DIR_END
) {
12823 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12824 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12827 offset
= tg3_nvram_logical_addr(tp
, offset
);
12831 if (!offset
|| !len
) {
12832 offset
= TG3_NVM_VPD_OFF
;
12833 len
= TG3_NVM_VPD_LEN
;
12836 buf
= kmalloc(len
, GFP_KERNEL
);
12840 if (magic
== TG3_EEPROM_MAGIC
) {
12841 for (i
= 0; i
< len
; i
+= 4) {
12842 /* The data is in little-endian format in NVRAM.
12843 * Use the big-endian read routines to preserve
12844 * the byte order as it exists in NVRAM.
12846 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12852 unsigned int pos
= 0;
12854 ptr
= (u8
*)&buf
[0];
12855 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12856 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12858 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12876 #define NVRAM_TEST_SIZE 0x100
12877 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12878 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12879 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12880 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12881 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12882 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12883 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12884 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12886 static int tg3_test_nvram(struct tg3
*tp
)
12888 u32 csum
, magic
, len
;
12890 int i
, j
, k
, err
= 0, size
;
12892 if (tg3_flag(tp
, NO_NVRAM
))
12895 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12898 if (magic
== TG3_EEPROM_MAGIC
)
12899 size
= NVRAM_TEST_SIZE
;
12900 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12901 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12902 TG3_EEPROM_SB_FORMAT_1
) {
12903 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12904 case TG3_EEPROM_SB_REVISION_0
:
12905 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12907 case TG3_EEPROM_SB_REVISION_2
:
12908 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12910 case TG3_EEPROM_SB_REVISION_3
:
12911 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12913 case TG3_EEPROM_SB_REVISION_4
:
12914 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12916 case TG3_EEPROM_SB_REVISION_5
:
12917 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12919 case TG3_EEPROM_SB_REVISION_6
:
12920 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12927 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12928 size
= NVRAM_SELFBOOT_HW_SIZE
;
12932 buf
= kmalloc(size
, GFP_KERNEL
);
12937 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12938 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12945 /* Selfboot format */
12946 magic
= be32_to_cpu(buf
[0]);
12947 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12948 TG3_EEPROM_MAGIC_FW
) {
12949 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12951 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12952 TG3_EEPROM_SB_REVISION_2
) {
12953 /* For rev 2, the csum doesn't include the MBA. */
12954 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12956 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12959 for (i
= 0; i
< size
; i
++)
12972 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12973 TG3_EEPROM_MAGIC_HW
) {
12974 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12975 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12976 u8
*buf8
= (u8
*) buf
;
12978 /* Separate the parity bits and the data bytes. */
12979 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12980 if ((i
== 0) || (i
== 8)) {
12984 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12985 parity
[k
++] = buf8
[i
] & msk
;
12987 } else if (i
== 16) {
12991 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12992 parity
[k
++] = buf8
[i
] & msk
;
12995 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12996 parity
[k
++] = buf8
[i
] & msk
;
12999 data
[j
++] = buf8
[i
];
13003 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
13004 u8 hw8
= hweight8(data
[i
]);
13006 if ((hw8
& 0x1) && parity
[i
])
13008 else if (!(hw8
& 0x1) && !parity
[i
])
13017 /* Bootstrap checksum at offset 0x10 */
13018 csum
= calc_crc((unsigned char *) buf
, 0x10);
13019 if (csum
!= le32_to_cpu(buf
[0x10/4]))
13022 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13023 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
13024 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
13029 buf
= tg3_vpd_readblock(tp
, &len
);
13033 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
13035 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
13039 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
13042 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13043 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
13044 PCI_VPD_RO_KEYWORD_CHKSUM
);
13048 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13050 for (i
= 0; i
<= j
; i
++)
13051 csum8
+= ((u8
*)buf
)[i
];
13065 #define TG3_SERDES_TIMEOUT_SEC 2
13066 #define TG3_COPPER_TIMEOUT_SEC 6
13068 static int tg3_test_link(struct tg3
*tp
)
13072 if (!netif_running(tp
->dev
))
13075 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13076 max
= TG3_SERDES_TIMEOUT_SEC
;
13078 max
= TG3_COPPER_TIMEOUT_SEC
;
13080 for (i
= 0; i
< max
; i
++) {
13084 if (msleep_interruptible(1000))
13091 /* Only test the commonly used registers */
13092 static int tg3_test_registers(struct tg3
*tp
)
13094 int i
, is_5705
, is_5750
;
13095 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13099 #define TG3_FL_5705 0x1
13100 #define TG3_FL_NOT_5705 0x2
13101 #define TG3_FL_NOT_5788 0x4
13102 #define TG3_FL_NOT_5750 0x8
13106 /* MAC Control Registers */
13107 { MAC_MODE
, TG3_FL_NOT_5705
,
13108 0x00000000, 0x00ef6f8c },
13109 { MAC_MODE
, TG3_FL_5705
,
13110 0x00000000, 0x01ef6b8c },
13111 { MAC_STATUS
, TG3_FL_NOT_5705
,
13112 0x03800107, 0x00000000 },
13113 { MAC_STATUS
, TG3_FL_5705
,
13114 0x03800100, 0x00000000 },
13115 { MAC_ADDR_0_HIGH
, 0x0000,
13116 0x00000000, 0x0000ffff },
13117 { MAC_ADDR_0_LOW
, 0x0000,
13118 0x00000000, 0xffffffff },
13119 { MAC_RX_MTU_SIZE
, 0x0000,
13120 0x00000000, 0x0000ffff },
13121 { MAC_TX_MODE
, 0x0000,
13122 0x00000000, 0x00000070 },
13123 { MAC_TX_LENGTHS
, 0x0000,
13124 0x00000000, 0x00003fff },
13125 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13126 0x00000000, 0x000007fc },
13127 { MAC_RX_MODE
, TG3_FL_5705
,
13128 0x00000000, 0x000007dc },
13129 { MAC_HASH_REG_0
, 0x0000,
13130 0x00000000, 0xffffffff },
13131 { MAC_HASH_REG_1
, 0x0000,
13132 0x00000000, 0xffffffff },
13133 { MAC_HASH_REG_2
, 0x0000,
13134 0x00000000, 0xffffffff },
13135 { MAC_HASH_REG_3
, 0x0000,
13136 0x00000000, 0xffffffff },
13138 /* Receive Data and Receive BD Initiator Control Registers. */
13139 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13140 0x00000000, 0xffffffff },
13141 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13142 0x00000000, 0xffffffff },
13143 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13144 0x00000000, 0x00000003 },
13145 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13146 0x00000000, 0xffffffff },
13147 { RCVDBDI_STD_BD
+0, 0x0000,
13148 0x00000000, 0xffffffff },
13149 { RCVDBDI_STD_BD
+4, 0x0000,
13150 0x00000000, 0xffffffff },
13151 { RCVDBDI_STD_BD
+8, 0x0000,
13152 0x00000000, 0xffff0002 },
13153 { RCVDBDI_STD_BD
+0xc, 0x0000,
13154 0x00000000, 0xffffffff },
13156 /* Receive BD Initiator Control Registers. */
13157 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13158 0x00000000, 0xffffffff },
13159 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13160 0x00000000, 0x000003ff },
13161 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13162 0x00000000, 0xffffffff },
13164 /* Host Coalescing Control Registers. */
13165 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13166 0x00000000, 0x00000004 },
13167 { HOSTCC_MODE
, TG3_FL_5705
,
13168 0x00000000, 0x000000f6 },
13169 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13170 0x00000000, 0xffffffff },
13171 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13172 0x00000000, 0x000003ff },
13173 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13174 0x00000000, 0xffffffff },
13175 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13176 0x00000000, 0x000003ff },
13177 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13178 0x00000000, 0xffffffff },
13179 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13180 0x00000000, 0x000000ff },
13181 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13182 0x00000000, 0xffffffff },
13183 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13184 0x00000000, 0x000000ff },
13185 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13186 0x00000000, 0xffffffff },
13187 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13188 0x00000000, 0xffffffff },
13189 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13190 0x00000000, 0xffffffff },
13191 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13192 0x00000000, 0x000000ff },
13193 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13194 0x00000000, 0xffffffff },
13195 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13196 0x00000000, 0x000000ff },
13197 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13198 0x00000000, 0xffffffff },
13199 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13200 0x00000000, 0xffffffff },
13201 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13202 0x00000000, 0xffffffff },
13203 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13204 0x00000000, 0xffffffff },
13205 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13206 0x00000000, 0xffffffff },
13207 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13208 0xffffffff, 0x00000000 },
13209 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13210 0xffffffff, 0x00000000 },
13212 /* Buffer Manager Control Registers. */
13213 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13214 0x00000000, 0x007fff80 },
13215 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13216 0x00000000, 0x007fffff },
13217 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13218 0x00000000, 0x0000003f },
13219 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13220 0x00000000, 0x000001ff },
13221 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13222 0x00000000, 0x000001ff },
13223 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13224 0xffffffff, 0x00000000 },
13225 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13226 0xffffffff, 0x00000000 },
13228 /* Mailbox Registers */
13229 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13230 0x00000000, 0x000001ff },
13231 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13232 0x00000000, 0x000001ff },
13233 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13234 0x00000000, 0x000007ff },
13235 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13236 0x00000000, 0x000001ff },
13238 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13241 is_5705
= is_5750
= 0;
13242 if (tg3_flag(tp
, 5705_PLUS
)) {
13244 if (tg3_flag(tp
, 5750_PLUS
))
13248 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13249 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13252 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13255 if (tg3_flag(tp
, IS_5788
) &&
13256 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13259 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13262 offset
= (u32
) reg_tbl
[i
].offset
;
13263 read_mask
= reg_tbl
[i
].read_mask
;
13264 write_mask
= reg_tbl
[i
].write_mask
;
13266 /* Save the original register content */
13267 save_val
= tr32(offset
);
13269 /* Determine the read-only value. */
13270 read_val
= save_val
& read_mask
;
13272 /* Write zero to the register, then make sure the read-only bits
13273 * are not changed and the read/write bits are all zeros.
13277 val
= tr32(offset
);
13279 /* Test the read-only and read/write bits. */
13280 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13283 /* Write ones to all the bits defined by RdMask and WrMask, then
13284 * make sure the read-only bits are not changed and the
13285 * read/write bits are all ones.
13287 tw32(offset
, read_mask
| write_mask
);
13289 val
= tr32(offset
);
13291 /* Test the read-only bits. */
13292 if ((val
& read_mask
) != read_val
)
13295 /* Test the read/write bits. */
13296 if ((val
& write_mask
) != write_mask
)
13299 tw32(offset
, save_val
);
13305 if (netif_msg_hw(tp
))
13306 netdev_err(tp
->dev
,
13307 "Register test failed at offset %x\n", offset
);
13308 tw32(offset
, save_val
);
13312 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13314 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13318 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13319 for (j
= 0; j
< len
; j
+= 4) {
13322 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13323 tg3_read_mem(tp
, offset
+ j
, &val
);
13324 if (val
!= test_pattern
[i
])
13331 static int tg3_test_memory(struct tg3
*tp
)
13333 static struct mem_entry
{
13336 } mem_tbl_570x
[] = {
13337 { 0x00000000, 0x00b50},
13338 { 0x00002000, 0x1c000},
13339 { 0xffffffff, 0x00000}
13340 }, mem_tbl_5705
[] = {
13341 { 0x00000100, 0x0000c},
13342 { 0x00000200, 0x00008},
13343 { 0x00004000, 0x00800},
13344 { 0x00006000, 0x01000},
13345 { 0x00008000, 0x02000},
13346 { 0x00010000, 0x0e000},
13347 { 0xffffffff, 0x00000}
13348 }, mem_tbl_5755
[] = {
13349 { 0x00000200, 0x00008},
13350 { 0x00004000, 0x00800},
13351 { 0x00006000, 0x00800},
13352 { 0x00008000, 0x02000},
13353 { 0x00010000, 0x0c000},
13354 { 0xffffffff, 0x00000}
13355 }, mem_tbl_5906
[] = {
13356 { 0x00000200, 0x00008},
13357 { 0x00004000, 0x00400},
13358 { 0x00006000, 0x00400},
13359 { 0x00008000, 0x01000},
13360 { 0x00010000, 0x01000},
13361 { 0xffffffff, 0x00000}
13362 }, mem_tbl_5717
[] = {
13363 { 0x00000200, 0x00008},
13364 { 0x00010000, 0x0a000},
13365 { 0x00020000, 0x13c00},
13366 { 0xffffffff, 0x00000}
13367 }, mem_tbl_57765
[] = {
13368 { 0x00000200, 0x00008},
13369 { 0x00004000, 0x00800},
13370 { 0x00006000, 0x09800},
13371 { 0x00010000, 0x0a000},
13372 { 0xffffffff, 0x00000}
13374 struct mem_entry
*mem_tbl
;
13378 if (tg3_flag(tp
, 5717_PLUS
))
13379 mem_tbl
= mem_tbl_5717
;
13380 else if (tg3_flag(tp
, 57765_CLASS
) ||
13381 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13382 mem_tbl
= mem_tbl_57765
;
13383 else if (tg3_flag(tp
, 5755_PLUS
))
13384 mem_tbl
= mem_tbl_5755
;
13385 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13386 mem_tbl
= mem_tbl_5906
;
13387 else if (tg3_flag(tp
, 5705_PLUS
))
13388 mem_tbl
= mem_tbl_5705
;
13390 mem_tbl
= mem_tbl_570x
;
13392 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13393 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13401 #define TG3_TSO_MSS 500
13403 #define TG3_TSO_IP_HDR_LEN 20
13404 #define TG3_TSO_TCP_HDR_LEN 20
13405 #define TG3_TSO_TCP_OPT_LEN 12
13407 static const u8 tg3_tso_header
[] = {
13409 0x45, 0x00, 0x00, 0x00,
13410 0x00, 0x00, 0x40, 0x00,
13411 0x40, 0x06, 0x00, 0x00,
13412 0x0a, 0x00, 0x00, 0x01,
13413 0x0a, 0x00, 0x00, 0x02,
13414 0x0d, 0x00, 0xe0, 0x00,
13415 0x00, 0x00, 0x01, 0x00,
13416 0x00, 0x00, 0x02, 0x00,
13417 0x80, 0x10, 0x10, 0x00,
13418 0x14, 0x09, 0x00, 0x00,
13419 0x01, 0x01, 0x08, 0x0a,
13420 0x11, 0x11, 0x11, 0x11,
13421 0x11, 0x11, 0x11, 0x11,
13424 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13426 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13427 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13429 struct sk_buff
*skb
;
13430 u8
*tx_data
, *rx_data
;
13432 int num_pkts
, tx_len
, rx_len
, i
, err
;
13433 struct tg3_rx_buffer_desc
*desc
;
13434 struct tg3_napi
*tnapi
, *rnapi
;
13435 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13437 tnapi
= &tp
->napi
[0];
13438 rnapi
= &tp
->napi
[0];
13439 if (tp
->irq_cnt
> 1) {
13440 if (tg3_flag(tp
, ENABLE_RSS
))
13441 rnapi
= &tp
->napi
[1];
13442 if (tg3_flag(tp
, ENABLE_TSS
))
13443 tnapi
= &tp
->napi
[1];
13445 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13450 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13454 tx_data
= skb_put(skb
, tx_len
);
13455 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13456 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13458 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13460 if (tso_loopback
) {
13461 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13463 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13464 TG3_TSO_TCP_OPT_LEN
;
13466 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13467 sizeof(tg3_tso_header
));
13470 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13471 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13473 /* Set the total length field in the IP header */
13474 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13476 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13477 TXD_FLAG_CPU_POST_DMA
);
13479 if (tg3_flag(tp
, HW_TSO_1
) ||
13480 tg3_flag(tp
, HW_TSO_2
) ||
13481 tg3_flag(tp
, HW_TSO_3
)) {
13483 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13484 th
= (struct tcphdr
*)&tx_data
[val
];
13487 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13489 if (tg3_flag(tp
, HW_TSO_3
)) {
13490 mss
|= (hdr_len
& 0xc) << 12;
13491 if (hdr_len
& 0x10)
13492 base_flags
|= 0x00000010;
13493 base_flags
|= (hdr_len
& 0x3e0) << 5;
13494 } else if (tg3_flag(tp
, HW_TSO_2
))
13495 mss
|= hdr_len
<< 9;
13496 else if (tg3_flag(tp
, HW_TSO_1
) ||
13497 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13498 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13500 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13503 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13506 data_off
= ETH_HLEN
;
13508 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13509 tx_len
> VLAN_ETH_FRAME_LEN
)
13510 base_flags
|= TXD_FLAG_JMB_PKT
;
13513 for (i
= data_off
; i
< tx_len
; i
++)
13514 tx_data
[i
] = (u8
) (i
& 0xff);
13516 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13517 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13518 dev_kfree_skb(skb
);
13522 val
= tnapi
->tx_prod
;
13523 tnapi
->tx_buffers
[val
].skb
= skb
;
13524 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13526 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13531 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13533 budget
= tg3_tx_avail(tnapi
);
13534 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13535 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13536 tnapi
->tx_buffers
[val
].skb
= NULL
;
13537 dev_kfree_skb(skb
);
13543 /* Sync BD data before updating mailbox */
13546 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13547 tr32_mailbox(tnapi
->prodmbox
);
13551 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13552 for (i
= 0; i
< 35; i
++) {
13553 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13558 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13559 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13560 if ((tx_idx
== tnapi
->tx_prod
) &&
13561 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13565 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13566 dev_kfree_skb(skb
);
13568 if (tx_idx
!= tnapi
->tx_prod
)
13571 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13575 while (rx_idx
!= rx_start_idx
) {
13576 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13577 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13578 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13580 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13581 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13584 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13587 if (!tso_loopback
) {
13588 if (rx_len
!= tx_len
)
13591 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13592 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13595 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13598 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13599 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13600 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13604 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13605 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13606 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13608 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13609 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13610 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13615 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13616 PCI_DMA_FROMDEVICE
);
13618 rx_data
+= TG3_RX_OFFSET(tp
);
13619 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13620 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13627 /* tg3_free_rings will unmap and free the rx_data */
13632 #define TG3_STD_LOOPBACK_FAILED 1
13633 #define TG3_JMB_LOOPBACK_FAILED 2
13634 #define TG3_TSO_LOOPBACK_FAILED 4
13635 #define TG3_LOOPBACK_FAILED \
13636 (TG3_STD_LOOPBACK_FAILED | \
13637 TG3_JMB_LOOPBACK_FAILED | \
13638 TG3_TSO_LOOPBACK_FAILED)
13640 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13644 u32 jmb_pkt_sz
= 9000;
13647 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13649 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13650 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13652 if (!netif_running(tp
->dev
)) {
13653 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13654 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13656 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13660 err
= tg3_reset_hw(tp
, true);
13662 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13663 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13665 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13669 if (tg3_flag(tp
, ENABLE_RSS
)) {
13672 /* Reroute all rx packets to the 1st queue */
13673 for (i
= MAC_RSS_INDIR_TBL_0
;
13674 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13678 /* HW errata - mac loopback fails in some cases on 5780.
13679 * Normal traffic and PHY loopback are not affected by
13680 * errata. Also, the MAC loopback test is deprecated for
13681 * all newer ASIC revisions.
13683 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13684 !tg3_flag(tp
, CPMU_PRESENT
)) {
13685 tg3_mac_loopback(tp
, true);
13687 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13688 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13690 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13691 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13692 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13694 tg3_mac_loopback(tp
, false);
13697 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13698 !tg3_flag(tp
, USE_PHYLIB
)) {
13701 tg3_phy_lpbk_set(tp
, 0, false);
13703 /* Wait for link */
13704 for (i
= 0; i
< 100; i
++) {
13705 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13710 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13711 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13712 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13713 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13714 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13715 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13716 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13717 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13720 tg3_phy_lpbk_set(tp
, 0, true);
13722 /* All link indications report up, but the hardware
13723 * isn't really ready for about 20 msec. Double it
13728 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13729 data
[TG3_EXT_LOOPB_TEST
] |=
13730 TG3_STD_LOOPBACK_FAILED
;
13731 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13732 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13733 data
[TG3_EXT_LOOPB_TEST
] |=
13734 TG3_TSO_LOOPBACK_FAILED
;
13735 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13736 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13737 data
[TG3_EXT_LOOPB_TEST
] |=
13738 TG3_JMB_LOOPBACK_FAILED
;
13741 /* Re-enable gphy autopowerdown. */
13742 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13743 tg3_phy_toggle_apd(tp
, true);
13746 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13747 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13750 tp
->phy_flags
|= eee_cap
;
13755 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13758 struct tg3
*tp
= netdev_priv(dev
);
13759 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13761 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13762 if (tg3_power_up(tp
)) {
13763 etest
->flags
|= ETH_TEST_FL_FAILED
;
13764 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13767 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13770 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13772 if (tg3_test_nvram(tp
) != 0) {
13773 etest
->flags
|= ETH_TEST_FL_FAILED
;
13774 data
[TG3_NVRAM_TEST
] = 1;
13776 if (!doextlpbk
&& tg3_test_link(tp
)) {
13777 etest
->flags
|= ETH_TEST_FL_FAILED
;
13778 data
[TG3_LINK_TEST
] = 1;
13780 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13781 int err
, err2
= 0, irq_sync
= 0;
13783 if (netif_running(dev
)) {
13785 tg3_netif_stop(tp
);
13789 tg3_full_lock(tp
, irq_sync
);
13790 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13791 err
= tg3_nvram_lock(tp
);
13792 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13793 if (!tg3_flag(tp
, 5705_PLUS
))
13794 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13796 tg3_nvram_unlock(tp
);
13798 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13801 if (tg3_test_registers(tp
) != 0) {
13802 etest
->flags
|= ETH_TEST_FL_FAILED
;
13803 data
[TG3_REGISTER_TEST
] = 1;
13806 if (tg3_test_memory(tp
) != 0) {
13807 etest
->flags
|= ETH_TEST_FL_FAILED
;
13808 data
[TG3_MEMORY_TEST
] = 1;
13812 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13814 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13815 etest
->flags
|= ETH_TEST_FL_FAILED
;
13817 tg3_full_unlock(tp
);
13819 if (tg3_test_interrupt(tp
) != 0) {
13820 etest
->flags
|= ETH_TEST_FL_FAILED
;
13821 data
[TG3_INTERRUPT_TEST
] = 1;
13824 tg3_full_lock(tp
, 0);
13826 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13827 if (netif_running(dev
)) {
13828 tg3_flag_set(tp
, INIT_COMPLETE
);
13829 err2
= tg3_restart_hw(tp
, true);
13831 tg3_netif_start(tp
);
13834 tg3_full_unlock(tp
);
13836 if (irq_sync
&& !err2
)
13839 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13840 tg3_power_down_prepare(tp
);
13844 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13846 struct tg3
*tp
= netdev_priv(dev
);
13847 struct hwtstamp_config stmpconf
;
13849 if (!tg3_flag(tp
, PTP_CAPABLE
))
13850 return -EOPNOTSUPP
;
13852 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13855 if (stmpconf
.flags
)
13858 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13859 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13862 switch (stmpconf
.rx_filter
) {
13863 case HWTSTAMP_FILTER_NONE
:
13866 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13867 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13868 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13870 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13871 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13872 TG3_RX_PTP_CTL_SYNC_EVNT
;
13874 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13875 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13876 TG3_RX_PTP_CTL_DELAY_REQ
;
13878 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13879 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13880 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13882 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13883 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13884 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13886 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13887 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13888 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13890 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13891 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13892 TG3_RX_PTP_CTL_SYNC_EVNT
;
13894 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13895 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13896 TG3_RX_PTP_CTL_SYNC_EVNT
;
13898 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13899 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13900 TG3_RX_PTP_CTL_SYNC_EVNT
;
13902 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13903 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13904 TG3_RX_PTP_CTL_DELAY_REQ
;
13906 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13907 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13908 TG3_RX_PTP_CTL_DELAY_REQ
;
13910 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13911 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13912 TG3_RX_PTP_CTL_DELAY_REQ
;
13918 if (netif_running(dev
) && tp
->rxptpctl
)
13919 tw32(TG3_RX_PTP_CTL
,
13920 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13922 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13923 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13925 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13927 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13931 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13933 struct tg3
*tp
= netdev_priv(dev
);
13934 struct hwtstamp_config stmpconf
;
13936 if (!tg3_flag(tp
, PTP_CAPABLE
))
13937 return -EOPNOTSUPP
;
13939 stmpconf
.flags
= 0;
13940 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13941 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13943 switch (tp
->rxptpctl
) {
13945 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13947 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13948 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13950 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13951 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13953 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13954 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13956 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13957 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13959 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13960 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13962 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13963 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13965 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13966 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13968 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13969 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13971 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13972 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13974 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13975 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13977 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13978 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13980 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13981 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13988 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13992 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13994 struct mii_ioctl_data
*data
= if_mii(ifr
);
13995 struct tg3
*tp
= netdev_priv(dev
);
13998 if (tg3_flag(tp
, USE_PHYLIB
)) {
13999 struct phy_device
*phydev
;
14000 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
14002 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
14003 return phy_mii_ioctl(phydev
, ifr
, cmd
);
14008 data
->phy_id
= tp
->phy_addr
;
14011 case SIOCGMIIREG
: {
14014 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14015 break; /* We have no PHY */
14017 if (!netif_running(dev
))
14020 spin_lock_bh(&tp
->lock
);
14021 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
14022 data
->reg_num
& 0x1f, &mii_regval
);
14023 spin_unlock_bh(&tp
->lock
);
14025 data
->val_out
= mii_regval
;
14031 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14032 break; /* We have no PHY */
14034 if (!netif_running(dev
))
14037 spin_lock_bh(&tp
->lock
);
14038 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
14039 data
->reg_num
& 0x1f, data
->val_in
);
14040 spin_unlock_bh(&tp
->lock
);
14044 case SIOCSHWTSTAMP
:
14045 return tg3_hwtstamp_set(dev
, ifr
);
14047 case SIOCGHWTSTAMP
:
14048 return tg3_hwtstamp_get(dev
, ifr
);
14054 return -EOPNOTSUPP
;
14057 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14059 struct tg3
*tp
= netdev_priv(dev
);
14061 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14065 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14067 struct tg3
*tp
= netdev_priv(dev
);
14068 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14069 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14071 if (!tg3_flag(tp
, 5705_PLUS
)) {
14072 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14073 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14074 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14075 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14078 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14079 (!ec
->rx_coalesce_usecs
) ||
14080 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14081 (!ec
->tx_coalesce_usecs
) ||
14082 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14083 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14084 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14085 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14086 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14087 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14088 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14089 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14092 /* Only copy relevant parameters, ignore all others. */
14093 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14094 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14095 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14096 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14097 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14098 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14099 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14100 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14101 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14103 if (netif_running(dev
)) {
14104 tg3_full_lock(tp
, 0);
14105 __tg3_set_coalesce(tp
, &tp
->coal
);
14106 tg3_full_unlock(tp
);
14111 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14113 struct tg3
*tp
= netdev_priv(dev
);
14115 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14116 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14117 return -EOPNOTSUPP
;
14120 if (edata
->advertised
!= tp
->eee
.advertised
) {
14121 netdev_warn(tp
->dev
,
14122 "Direct manipulation of EEE advertisement is not supported\n");
14126 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14127 netdev_warn(tp
->dev
,
14128 "Maximal Tx Lpi timer supported is %#x(u)\n",
14129 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14135 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14136 tg3_warn_mgmt_link_flap(tp
);
14138 if (netif_running(tp
->dev
)) {
14139 tg3_full_lock(tp
, 0);
14142 tg3_full_unlock(tp
);
14148 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14150 struct tg3
*tp
= netdev_priv(dev
);
14152 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14153 netdev_warn(tp
->dev
,
14154 "Board does not support EEE!\n");
14155 return -EOPNOTSUPP
;
14162 static const struct ethtool_ops tg3_ethtool_ops
= {
14163 .get_drvinfo
= tg3_get_drvinfo
,
14164 .get_regs_len
= tg3_get_regs_len
,
14165 .get_regs
= tg3_get_regs
,
14166 .get_wol
= tg3_get_wol
,
14167 .set_wol
= tg3_set_wol
,
14168 .get_msglevel
= tg3_get_msglevel
,
14169 .set_msglevel
= tg3_set_msglevel
,
14170 .nway_reset
= tg3_nway_reset
,
14171 .get_link
= ethtool_op_get_link
,
14172 .get_eeprom_len
= tg3_get_eeprom_len
,
14173 .get_eeprom
= tg3_get_eeprom
,
14174 .set_eeprom
= tg3_set_eeprom
,
14175 .get_ringparam
= tg3_get_ringparam
,
14176 .set_ringparam
= tg3_set_ringparam
,
14177 .get_pauseparam
= tg3_get_pauseparam
,
14178 .set_pauseparam
= tg3_set_pauseparam
,
14179 .self_test
= tg3_self_test
,
14180 .get_strings
= tg3_get_strings
,
14181 .set_phys_id
= tg3_set_phys_id
,
14182 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14183 .get_coalesce
= tg3_get_coalesce
,
14184 .set_coalesce
= tg3_set_coalesce
,
14185 .get_sset_count
= tg3_get_sset_count
,
14186 .get_rxnfc
= tg3_get_rxnfc
,
14187 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14188 .get_rxfh
= tg3_get_rxfh
,
14189 .set_rxfh
= tg3_set_rxfh
,
14190 .get_channels
= tg3_get_channels
,
14191 .set_channels
= tg3_set_channels
,
14192 .get_ts_info
= tg3_get_ts_info
,
14193 .get_eee
= tg3_get_eee
,
14194 .set_eee
= tg3_set_eee
,
14195 .get_link_ksettings
= tg3_get_link_ksettings
,
14196 .set_link_ksettings
= tg3_set_link_ksettings
,
14199 static void tg3_get_stats64(struct net_device
*dev
,
14200 struct rtnl_link_stats64
*stats
)
14202 struct tg3
*tp
= netdev_priv(dev
);
14204 spin_lock_bh(&tp
->lock
);
14205 if (!tp
->hw_stats
|| !tg3_flag(tp
, INIT_COMPLETE
)) {
14206 *stats
= tp
->net_stats_prev
;
14207 spin_unlock_bh(&tp
->lock
);
14211 tg3_get_nstats(tp
, stats
);
14212 spin_unlock_bh(&tp
->lock
);
14215 static void tg3_set_rx_mode(struct net_device
*dev
)
14217 struct tg3
*tp
= netdev_priv(dev
);
14219 if (!netif_running(dev
))
14222 tg3_full_lock(tp
, 0);
14223 __tg3_set_rx_mode(dev
);
14224 tg3_full_unlock(tp
);
14227 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14230 dev
->mtu
= new_mtu
;
14232 if (new_mtu
> ETH_DATA_LEN
) {
14233 if (tg3_flag(tp
, 5780_CLASS
)) {
14234 netdev_update_features(dev
);
14235 tg3_flag_clear(tp
, TSO_CAPABLE
);
14237 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14240 if (tg3_flag(tp
, 5780_CLASS
)) {
14241 tg3_flag_set(tp
, TSO_CAPABLE
);
14242 netdev_update_features(dev
);
14244 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14248 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14250 struct tg3
*tp
= netdev_priv(dev
);
14252 bool reset_phy
= false;
14254 if (!netif_running(dev
)) {
14255 /* We'll just catch it later when the
14258 tg3_set_mtu(dev
, tp
, new_mtu
);
14264 tg3_netif_stop(tp
);
14266 tg3_set_mtu(dev
, tp
, new_mtu
);
14268 tg3_full_lock(tp
, 1);
14270 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14272 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14273 * breaks all requests to 256 bytes.
14275 if (tg3_asic_rev(tp
) == ASIC_REV_57766
||
14276 tg3_asic_rev(tp
) == ASIC_REV_5717
||
14277 tg3_asic_rev(tp
) == ASIC_REV_5719
||
14278 tg3_asic_rev(tp
) == ASIC_REV_5720
)
14281 err
= tg3_restart_hw(tp
, reset_phy
);
14284 tg3_netif_start(tp
);
14286 tg3_full_unlock(tp
);
14294 static const struct net_device_ops tg3_netdev_ops
= {
14295 .ndo_open
= tg3_open
,
14296 .ndo_stop
= tg3_close
,
14297 .ndo_start_xmit
= tg3_start_xmit
,
14298 .ndo_get_stats64
= tg3_get_stats64
,
14299 .ndo_validate_addr
= eth_validate_addr
,
14300 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14301 .ndo_set_mac_address
= tg3_set_mac_addr
,
14302 .ndo_do_ioctl
= tg3_ioctl
,
14303 .ndo_tx_timeout
= tg3_tx_timeout
,
14304 .ndo_change_mtu
= tg3_change_mtu
,
14305 .ndo_fix_features
= tg3_fix_features
,
14306 .ndo_set_features
= tg3_set_features
,
14307 #ifdef CONFIG_NET_POLL_CONTROLLER
14308 .ndo_poll_controller
= tg3_poll_controller
,
14312 static void tg3_get_eeprom_size(struct tg3
*tp
)
14314 u32 cursize
, val
, magic
;
14316 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14318 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14321 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14322 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14323 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14327 * Size the chip by reading offsets at increasing powers of two.
14328 * When we encounter our validation signature, we know the addressing
14329 * has wrapped around, and thus have our chip size.
14333 while (cursize
< tp
->nvram_size
) {
14334 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14343 tp
->nvram_size
= cursize
;
14346 static void tg3_get_nvram_size(struct tg3
*tp
)
14350 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14353 /* Selfboot format */
14354 if (val
!= TG3_EEPROM_MAGIC
) {
14355 tg3_get_eeprom_size(tp
);
14359 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14361 /* This is confusing. We want to operate on the
14362 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14363 * call will read from NVRAM and byteswap the data
14364 * according to the byteswapping settings for all
14365 * other register accesses. This ensures the data we
14366 * want will always reside in the lower 16-bits.
14367 * However, the data in NVRAM is in LE format, which
14368 * means the data from the NVRAM read will always be
14369 * opposite the endianness of the CPU. The 16-bit
14370 * byteswap then brings the data to CPU endianness.
14372 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14376 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14379 static void tg3_get_nvram_info(struct tg3
*tp
)
14383 nvcfg1
= tr32(NVRAM_CFG1
);
14384 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14385 tg3_flag_set(tp
, FLASH
);
14387 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14388 tw32(NVRAM_CFG1
, nvcfg1
);
14391 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14392 tg3_flag(tp
, 5780_CLASS
)) {
14393 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14394 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14395 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14396 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14397 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14399 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14400 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14401 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14403 case FLASH_VENDOR_ATMEL_EEPROM
:
14404 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14405 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14406 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14408 case FLASH_VENDOR_ST
:
14409 tp
->nvram_jedecnum
= JEDEC_ST
;
14410 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14411 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14413 case FLASH_VENDOR_SAIFUN
:
14414 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14415 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14417 case FLASH_VENDOR_SST_SMALL
:
14418 case FLASH_VENDOR_SST_LARGE
:
14419 tp
->nvram_jedecnum
= JEDEC_SST
;
14420 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14424 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14425 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14426 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14430 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14432 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14433 case FLASH_5752PAGE_SIZE_256
:
14434 tp
->nvram_pagesize
= 256;
14436 case FLASH_5752PAGE_SIZE_512
:
14437 tp
->nvram_pagesize
= 512;
14439 case FLASH_5752PAGE_SIZE_1K
:
14440 tp
->nvram_pagesize
= 1024;
14442 case FLASH_5752PAGE_SIZE_2K
:
14443 tp
->nvram_pagesize
= 2048;
14445 case FLASH_5752PAGE_SIZE_4K
:
14446 tp
->nvram_pagesize
= 4096;
14448 case FLASH_5752PAGE_SIZE_264
:
14449 tp
->nvram_pagesize
= 264;
14451 case FLASH_5752PAGE_SIZE_528
:
14452 tp
->nvram_pagesize
= 528;
14457 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14461 nvcfg1
= tr32(NVRAM_CFG1
);
14463 /* NVRAM protection for TPM */
14464 if (nvcfg1
& (1 << 27))
14465 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14467 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14468 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14469 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14470 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14471 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14473 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14474 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14475 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14476 tg3_flag_set(tp
, FLASH
);
14478 case FLASH_5752VENDOR_ST_M45PE10
:
14479 case FLASH_5752VENDOR_ST_M45PE20
:
14480 case FLASH_5752VENDOR_ST_M45PE40
:
14481 tp
->nvram_jedecnum
= JEDEC_ST
;
14482 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14483 tg3_flag_set(tp
, FLASH
);
14487 if (tg3_flag(tp
, FLASH
)) {
14488 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14490 /* For eeprom, set pagesize to maximum eeprom size */
14491 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14493 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14494 tw32(NVRAM_CFG1
, nvcfg1
);
14498 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14500 u32 nvcfg1
, protect
= 0;
14502 nvcfg1
= tr32(NVRAM_CFG1
);
14504 /* NVRAM protection for TPM */
14505 if (nvcfg1
& (1 << 27)) {
14506 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14510 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14512 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14513 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14514 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14515 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14516 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14517 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14518 tg3_flag_set(tp
, FLASH
);
14519 tp
->nvram_pagesize
= 264;
14520 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14521 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14522 tp
->nvram_size
= (protect
? 0x3e200 :
14523 TG3_NVRAM_SIZE_512KB
);
14524 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14525 tp
->nvram_size
= (protect
? 0x1f200 :
14526 TG3_NVRAM_SIZE_256KB
);
14528 tp
->nvram_size
= (protect
? 0x1f200 :
14529 TG3_NVRAM_SIZE_128KB
);
14531 case FLASH_5752VENDOR_ST_M45PE10
:
14532 case FLASH_5752VENDOR_ST_M45PE20
:
14533 case FLASH_5752VENDOR_ST_M45PE40
:
14534 tp
->nvram_jedecnum
= JEDEC_ST
;
14535 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14536 tg3_flag_set(tp
, FLASH
);
14537 tp
->nvram_pagesize
= 256;
14538 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14539 tp
->nvram_size
= (protect
?
14540 TG3_NVRAM_SIZE_64KB
:
14541 TG3_NVRAM_SIZE_128KB
);
14542 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14543 tp
->nvram_size
= (protect
?
14544 TG3_NVRAM_SIZE_64KB
:
14545 TG3_NVRAM_SIZE_256KB
);
14547 tp
->nvram_size
= (protect
?
14548 TG3_NVRAM_SIZE_128KB
:
14549 TG3_NVRAM_SIZE_512KB
);
14554 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14558 nvcfg1
= tr32(NVRAM_CFG1
);
14560 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14561 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14562 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14563 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14564 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14565 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14566 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14567 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14569 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14570 tw32(NVRAM_CFG1
, nvcfg1
);
14572 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14573 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14574 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14575 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14576 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14577 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14578 tg3_flag_set(tp
, FLASH
);
14579 tp
->nvram_pagesize
= 264;
14581 case FLASH_5752VENDOR_ST_M45PE10
:
14582 case FLASH_5752VENDOR_ST_M45PE20
:
14583 case FLASH_5752VENDOR_ST_M45PE40
:
14584 tp
->nvram_jedecnum
= JEDEC_ST
;
14585 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14586 tg3_flag_set(tp
, FLASH
);
14587 tp
->nvram_pagesize
= 256;
14592 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14594 u32 nvcfg1
, protect
= 0;
14596 nvcfg1
= tr32(NVRAM_CFG1
);
14598 /* NVRAM protection for TPM */
14599 if (nvcfg1
& (1 << 27)) {
14600 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14604 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14606 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14607 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14608 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14609 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14610 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14611 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14612 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14613 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14614 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14615 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14616 tg3_flag_set(tp
, FLASH
);
14617 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14618 tp
->nvram_pagesize
= 256;
14620 case FLASH_5761VENDOR_ST_A_M45PE20
:
14621 case FLASH_5761VENDOR_ST_A_M45PE40
:
14622 case FLASH_5761VENDOR_ST_A_M45PE80
:
14623 case FLASH_5761VENDOR_ST_A_M45PE16
:
14624 case FLASH_5761VENDOR_ST_M_M45PE20
:
14625 case FLASH_5761VENDOR_ST_M_M45PE40
:
14626 case FLASH_5761VENDOR_ST_M_M45PE80
:
14627 case FLASH_5761VENDOR_ST_M_M45PE16
:
14628 tp
->nvram_jedecnum
= JEDEC_ST
;
14629 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14630 tg3_flag_set(tp
, FLASH
);
14631 tp
->nvram_pagesize
= 256;
14636 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14639 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14640 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14641 case FLASH_5761VENDOR_ST_A_M45PE16
:
14642 case FLASH_5761VENDOR_ST_M_M45PE16
:
14643 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14645 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14646 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14647 case FLASH_5761VENDOR_ST_A_M45PE80
:
14648 case FLASH_5761VENDOR_ST_M_M45PE80
:
14649 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14651 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14652 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14653 case FLASH_5761VENDOR_ST_A_M45PE40
:
14654 case FLASH_5761VENDOR_ST_M_M45PE40
:
14655 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14657 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14658 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14659 case FLASH_5761VENDOR_ST_A_M45PE20
:
14660 case FLASH_5761VENDOR_ST_M_M45PE20
:
14661 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14667 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14669 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14670 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14671 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14674 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14678 nvcfg1
= tr32(NVRAM_CFG1
);
14680 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14681 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14682 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14683 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14684 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14685 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14687 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14688 tw32(NVRAM_CFG1
, nvcfg1
);
14690 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14691 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14694 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14695 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14696 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14697 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14698 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14699 tg3_flag_set(tp
, FLASH
);
14701 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14702 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14703 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14704 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14705 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14707 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14708 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14709 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14711 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14712 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14713 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14717 case FLASH_5752VENDOR_ST_M45PE10
:
14718 case FLASH_5752VENDOR_ST_M45PE20
:
14719 case FLASH_5752VENDOR_ST_M45PE40
:
14720 tp
->nvram_jedecnum
= JEDEC_ST
;
14721 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14722 tg3_flag_set(tp
, FLASH
);
14724 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14725 case FLASH_5752VENDOR_ST_M45PE10
:
14726 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14728 case FLASH_5752VENDOR_ST_M45PE20
:
14729 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14731 case FLASH_5752VENDOR_ST_M45PE40
:
14732 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14737 tg3_flag_set(tp
, NO_NVRAM
);
14741 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14742 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14743 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14747 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14751 nvcfg1
= tr32(NVRAM_CFG1
);
14753 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14754 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14755 case FLASH_5717VENDOR_MICRO_EEPROM
:
14756 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14757 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14758 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14760 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14761 tw32(NVRAM_CFG1
, nvcfg1
);
14763 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14764 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14765 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14766 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14767 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14768 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14769 case FLASH_5717VENDOR_ATMEL_45USPT
:
14770 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14771 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14772 tg3_flag_set(tp
, FLASH
);
14774 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14775 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14776 /* Detect size with tg3_nvram_get_size() */
14778 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14779 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14780 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14783 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14787 case FLASH_5717VENDOR_ST_M_M25PE10
:
14788 case FLASH_5717VENDOR_ST_A_M25PE10
:
14789 case FLASH_5717VENDOR_ST_M_M45PE10
:
14790 case FLASH_5717VENDOR_ST_A_M45PE10
:
14791 case FLASH_5717VENDOR_ST_M_M25PE20
:
14792 case FLASH_5717VENDOR_ST_A_M25PE20
:
14793 case FLASH_5717VENDOR_ST_M_M45PE20
:
14794 case FLASH_5717VENDOR_ST_A_M45PE20
:
14795 case FLASH_5717VENDOR_ST_25USPT
:
14796 case FLASH_5717VENDOR_ST_45USPT
:
14797 tp
->nvram_jedecnum
= JEDEC_ST
;
14798 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14799 tg3_flag_set(tp
, FLASH
);
14801 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14802 case FLASH_5717VENDOR_ST_M_M25PE20
:
14803 case FLASH_5717VENDOR_ST_M_M45PE20
:
14804 /* Detect size with tg3_nvram_get_size() */
14806 case FLASH_5717VENDOR_ST_A_M25PE20
:
14807 case FLASH_5717VENDOR_ST_A_M45PE20
:
14808 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14811 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14816 tg3_flag_set(tp
, NO_NVRAM
);
14820 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14821 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14822 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14825 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14827 u32 nvcfg1
, nvmpinstrp
, nv_status
;
14829 nvcfg1
= tr32(NVRAM_CFG1
);
14830 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14832 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14833 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14834 tg3_flag_set(tp
, NO_NVRAM
);
14838 switch (nvmpinstrp
) {
14839 case FLASH_5762_MX25L_100
:
14840 case FLASH_5762_MX25L_200
:
14841 case FLASH_5762_MX25L_400
:
14842 case FLASH_5762_MX25L_800
:
14843 case FLASH_5762_MX25L_160_320
:
14844 tp
->nvram_pagesize
= 4096;
14845 tp
->nvram_jedecnum
= JEDEC_MACRONIX
;
14846 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14847 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14848 tg3_flag_set(tp
, FLASH
);
14849 nv_status
= tr32(NVRAM_AUTOSENSE_STATUS
);
14851 (1 << (nv_status
>> AUTOSENSE_DEVID
&
14852 AUTOSENSE_DEVID_MASK
)
14853 << AUTOSENSE_SIZE_IN_MB
);
14856 case FLASH_5762_EEPROM_HD
:
14857 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14859 case FLASH_5762_EEPROM_LD
:
14860 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14862 case FLASH_5720VENDOR_M_ST_M45PE20
:
14863 /* This pinstrap supports multiple sizes, so force it
14864 * to read the actual size from location 0xf0.
14866 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14871 switch (nvmpinstrp
) {
14872 case FLASH_5720_EEPROM_HD
:
14873 case FLASH_5720_EEPROM_LD
:
14874 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14875 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14877 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14878 tw32(NVRAM_CFG1
, nvcfg1
);
14879 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14880 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14882 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14884 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14885 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14886 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14887 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14888 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14889 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14890 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14891 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14892 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14893 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14894 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14895 case FLASH_5720VENDOR_ATMEL_45USPT
:
14896 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14897 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14898 tg3_flag_set(tp
, FLASH
);
14900 switch (nvmpinstrp
) {
14901 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14902 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14903 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14904 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14906 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14907 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14908 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14909 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14911 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14912 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14913 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14916 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14917 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14921 case FLASH_5720VENDOR_M_ST_M25PE10
:
14922 case FLASH_5720VENDOR_M_ST_M45PE10
:
14923 case FLASH_5720VENDOR_A_ST_M25PE10
:
14924 case FLASH_5720VENDOR_A_ST_M45PE10
:
14925 case FLASH_5720VENDOR_M_ST_M25PE20
:
14926 case FLASH_5720VENDOR_M_ST_M45PE20
:
14927 case FLASH_5720VENDOR_A_ST_M25PE20
:
14928 case FLASH_5720VENDOR_A_ST_M45PE20
:
14929 case FLASH_5720VENDOR_M_ST_M25PE40
:
14930 case FLASH_5720VENDOR_M_ST_M45PE40
:
14931 case FLASH_5720VENDOR_A_ST_M25PE40
:
14932 case FLASH_5720VENDOR_A_ST_M45PE40
:
14933 case FLASH_5720VENDOR_M_ST_M25PE80
:
14934 case FLASH_5720VENDOR_M_ST_M45PE80
:
14935 case FLASH_5720VENDOR_A_ST_M25PE80
:
14936 case FLASH_5720VENDOR_A_ST_M45PE80
:
14937 case FLASH_5720VENDOR_ST_25USPT
:
14938 case FLASH_5720VENDOR_ST_45USPT
:
14939 tp
->nvram_jedecnum
= JEDEC_ST
;
14940 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14941 tg3_flag_set(tp
, FLASH
);
14943 switch (nvmpinstrp
) {
14944 case FLASH_5720VENDOR_M_ST_M25PE20
:
14945 case FLASH_5720VENDOR_M_ST_M45PE20
:
14946 case FLASH_5720VENDOR_A_ST_M25PE20
:
14947 case FLASH_5720VENDOR_A_ST_M45PE20
:
14948 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14950 case FLASH_5720VENDOR_M_ST_M25PE40
:
14951 case FLASH_5720VENDOR_M_ST_M45PE40
:
14952 case FLASH_5720VENDOR_A_ST_M25PE40
:
14953 case FLASH_5720VENDOR_A_ST_M45PE40
:
14954 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14956 case FLASH_5720VENDOR_M_ST_M25PE80
:
14957 case FLASH_5720VENDOR_M_ST_M45PE80
:
14958 case FLASH_5720VENDOR_A_ST_M25PE80
:
14959 case FLASH_5720VENDOR_A_ST_M45PE80
:
14960 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14963 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14964 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14969 tg3_flag_set(tp
, NO_NVRAM
);
14973 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14974 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14975 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14977 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14980 if (tg3_nvram_read(tp
, 0, &val
))
14983 if (val
!= TG3_EEPROM_MAGIC
&&
14984 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14985 tg3_flag_set(tp
, NO_NVRAM
);
14989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14990 static void tg3_nvram_init(struct tg3
*tp
)
14992 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14993 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14994 tg3_flag_clear(tp
, NVRAM
);
14995 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14996 tg3_flag_set(tp
, NO_NVRAM
);
15000 tw32_f(GRC_EEPROM_ADDR
,
15001 (EEPROM_ADDR_FSM_RESET
|
15002 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
15003 EEPROM_ADDR_CLKPERD_SHIFT
)));
15007 /* Enable seeprom accesses. */
15008 tw32_f(GRC_LOCAL_CTRL
,
15009 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
15012 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15013 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
15014 tg3_flag_set(tp
, NVRAM
);
15016 if (tg3_nvram_lock(tp
)) {
15017 netdev_warn(tp
->dev
,
15018 "Cannot get nvram lock, %s failed\n",
15022 tg3_enable_nvram_access(tp
);
15024 tp
->nvram_size
= 0;
15026 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15027 tg3_get_5752_nvram_info(tp
);
15028 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
15029 tg3_get_5755_nvram_info(tp
);
15030 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
15031 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15032 tg3_asic_rev(tp
) == ASIC_REV_5785
)
15033 tg3_get_5787_nvram_info(tp
);
15034 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
15035 tg3_get_5761_nvram_info(tp
);
15036 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15037 tg3_get_5906_nvram_info(tp
);
15038 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
15039 tg3_flag(tp
, 57765_CLASS
))
15040 tg3_get_57780_nvram_info(tp
);
15041 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15042 tg3_asic_rev(tp
) == ASIC_REV_5719
)
15043 tg3_get_5717_nvram_info(tp
);
15044 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15045 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15046 tg3_get_5720_nvram_info(tp
);
15048 tg3_get_nvram_info(tp
);
15050 if (tp
->nvram_size
== 0)
15051 tg3_get_nvram_size(tp
);
15053 tg3_disable_nvram_access(tp
);
15054 tg3_nvram_unlock(tp
);
15057 tg3_flag_clear(tp
, NVRAM
);
15058 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15060 tg3_get_eeprom_size(tp
);
15064 struct subsys_tbl_ent
{
15065 u16 subsys_vendor
, subsys_devid
;
15069 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15070 /* Broadcom boards. */
15071 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15072 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15073 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15074 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15075 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15076 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15077 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15078 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15079 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15081 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15083 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15084 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15085 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15087 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15089 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15090 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15091 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15092 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15095 { TG3PCI_SUBVENDOR_ID_3COM
,
15096 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15097 { TG3PCI_SUBVENDOR_ID_3COM
,
15098 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15099 { TG3PCI_SUBVENDOR_ID_3COM
,
15100 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15101 { TG3PCI_SUBVENDOR_ID_3COM
,
15102 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15103 { TG3PCI_SUBVENDOR_ID_3COM
,
15104 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15107 { TG3PCI_SUBVENDOR_ID_DELL
,
15108 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15109 { TG3PCI_SUBVENDOR_ID_DELL
,
15110 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15111 { TG3PCI_SUBVENDOR_ID_DELL
,
15112 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15113 { TG3PCI_SUBVENDOR_ID_DELL
,
15114 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15116 /* Compaq boards. */
15117 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15118 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15119 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15120 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15121 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15122 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15123 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15124 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15125 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15126 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15129 { TG3PCI_SUBVENDOR_ID_IBM
,
15130 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15133 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15137 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15138 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15139 tp
->pdev
->subsystem_vendor
) &&
15140 (subsys_id_to_phy_id
[i
].subsys_devid
==
15141 tp
->pdev
->subsystem_device
))
15142 return &subsys_id_to_phy_id
[i
];
15147 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15151 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15152 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15154 /* Assume an onboard device and WOL capable by default. */
15155 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15156 tg3_flag_set(tp
, WOL_CAP
);
15158 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15159 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15160 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15161 tg3_flag_set(tp
, IS_NIC
);
15163 val
= tr32(VCPU_CFGSHDW
);
15164 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15165 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15166 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15167 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15168 tg3_flag_set(tp
, WOL_ENABLE
);
15169 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15174 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15175 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15176 u32 nic_cfg
, led_cfg
;
15177 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15178 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15179 int eeprom_phy_serdes
= 0;
15181 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15182 tp
->nic_sram_data_cfg
= nic_cfg
;
15184 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15185 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15186 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15187 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15188 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15189 (ver
> 0) && (ver
< 0x100))
15190 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15192 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15193 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15195 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15196 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15197 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15198 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15200 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15201 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15202 eeprom_phy_serdes
= 1;
15204 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15205 if (nic_phy_id
!= 0) {
15206 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15207 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15209 eeprom_phy_id
= (id1
>> 16) << 10;
15210 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15211 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15215 tp
->phy_id
= eeprom_phy_id
;
15216 if (eeprom_phy_serdes
) {
15217 if (!tg3_flag(tp
, 5705_PLUS
))
15218 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15220 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15223 if (tg3_flag(tp
, 5750_PLUS
))
15224 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15225 SHASTA_EXT_LED_MODE_MASK
);
15227 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15231 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15232 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15235 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15236 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15239 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15240 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15242 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15243 * read on some older 5700/5701 bootcode.
15245 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15246 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15247 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15251 case SHASTA_EXT_LED_SHARED
:
15252 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15253 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15254 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15255 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15256 LED_CTRL_MODE_PHY_2
);
15258 if (tg3_flag(tp
, 5717_PLUS
) ||
15259 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15260 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15261 LED_CTRL_BLINK_RATE_MASK
;
15265 case SHASTA_EXT_LED_MAC
:
15266 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15269 case SHASTA_EXT_LED_COMBO
:
15270 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15271 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15272 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15273 LED_CTRL_MODE_PHY_2
);
15278 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15279 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15280 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15281 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15283 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15284 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15286 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15287 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15288 if ((tp
->pdev
->subsystem_vendor
==
15289 PCI_VENDOR_ID_ARIMA
) &&
15290 (tp
->pdev
->subsystem_device
== 0x205a ||
15291 tp
->pdev
->subsystem_device
== 0x2063))
15292 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15294 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15295 tg3_flag_set(tp
, IS_NIC
);
15298 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15299 tg3_flag_set(tp
, ENABLE_ASF
);
15300 if (tg3_flag(tp
, 5750_PLUS
))
15301 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15304 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15305 tg3_flag(tp
, 5750_PLUS
))
15306 tg3_flag_set(tp
, ENABLE_APE
);
15308 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15309 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15310 tg3_flag_clear(tp
, WOL_CAP
);
15312 if (tg3_flag(tp
, WOL_CAP
) &&
15313 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15314 tg3_flag_set(tp
, WOL_ENABLE
);
15315 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15318 if (cfg2
& (1 << 17))
15319 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15321 /* serdes signal pre-emphasis in register 0x590 set by */
15322 /* bootcode if bit 18 is set */
15323 if (cfg2
& (1 << 18))
15324 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15326 if ((tg3_flag(tp
, 57765_PLUS
) ||
15327 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15328 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15329 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15330 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15332 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15335 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15336 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15337 !tg3_flag(tp
, 57765_PLUS
) &&
15338 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15339 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15340 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15341 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15342 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15343 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15346 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15347 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15348 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15349 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15350 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15351 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15353 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15354 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15357 if (tg3_flag(tp
, WOL_CAP
))
15358 device_set_wakeup_enable(&tp
->pdev
->dev
,
15359 tg3_flag(tp
, WOL_ENABLE
));
15361 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15364 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15367 u32 val2
, off
= offset
* 8;
15369 err
= tg3_nvram_lock(tp
);
15373 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15374 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15375 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15376 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15379 for (i
= 0; i
< 100; i
++) {
15380 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15381 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15382 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15388 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15390 tg3_nvram_unlock(tp
);
15391 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15397 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15402 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15403 tw32(OTP_CTRL
, cmd
);
15405 /* Wait for up to 1 ms for command to execute. */
15406 for (i
= 0; i
< 100; i
++) {
15407 val
= tr32(OTP_STATUS
);
15408 if (val
& OTP_STATUS_CMD_DONE
)
15413 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15416 /* Read the gphy configuration from the OTP region of the chip. The gphy
15417 * configuration is a 32-bit value that straddles the alignment boundary.
15418 * We do two 32-bit reads and then shift and merge the results.
15420 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15422 u32 bhalf_otp
, thalf_otp
;
15424 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15426 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15429 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15431 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15434 thalf_otp
= tr32(OTP_READ_DATA
);
15436 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15438 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15441 bhalf_otp
= tr32(OTP_READ_DATA
);
15443 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15446 static void tg3_phy_init_link_config(struct tg3
*tp
)
15448 u32 adv
= ADVERTISED_Autoneg
;
15450 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15451 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15452 adv
|= ADVERTISED_1000baseT_Half
;
15453 adv
|= ADVERTISED_1000baseT_Full
;
15456 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15457 adv
|= ADVERTISED_100baseT_Half
|
15458 ADVERTISED_100baseT_Full
|
15459 ADVERTISED_10baseT_Half
|
15460 ADVERTISED_10baseT_Full
|
15463 adv
|= ADVERTISED_FIBRE
;
15465 tp
->link_config
.advertising
= adv
;
15466 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15467 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15468 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15469 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15470 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15475 static int tg3_phy_probe(struct tg3
*tp
)
15477 u32 hw_phy_id_1
, hw_phy_id_2
;
15478 u32 hw_phy_id
, hw_phy_id_masked
;
15481 /* flow control autonegotiation is default behavior */
15482 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15483 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15485 if (tg3_flag(tp
, ENABLE_APE
)) {
15486 switch (tp
->pci_fn
) {
15488 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15491 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15494 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15497 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15502 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15503 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15504 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15505 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15506 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15508 if (tg3_flag(tp
, USE_PHYLIB
))
15509 return tg3_phy_init(tp
);
15511 /* Reading the PHY ID register can conflict with ASF
15512 * firmware access to the PHY hardware.
15515 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15516 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15518 /* Now read the physical PHY_ID from the chip and verify
15519 * that it is sane. If it doesn't look good, we fall back
15520 * to either the hard-coded table based PHY_ID and failing
15521 * that the value found in the eeprom area.
15523 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15524 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15526 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15527 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15528 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15530 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15533 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15534 tp
->phy_id
= hw_phy_id
;
15535 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15536 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15538 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15540 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15541 /* Do nothing, phy ID already set up in
15542 * tg3_get_eeprom_hw_cfg().
15545 struct subsys_tbl_ent
*p
;
15547 /* No eeprom signature? Try the hardcoded
15548 * subsys device table.
15550 p
= tg3_lookup_by_subsys(tp
);
15552 tp
->phy_id
= p
->phy_id
;
15553 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15554 /* For now we saw the IDs 0xbc050cd0,
15555 * 0xbc050f80 and 0xbc050c30 on devices
15556 * connected to an BCM4785 and there are
15557 * probably more. Just assume that the phy is
15558 * supported when it is connected to a SSB core
15565 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15566 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15570 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15571 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15572 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15573 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15574 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15575 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15576 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15577 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15578 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15579 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15581 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15582 SUPPORTED_1000baseT_Full
;
15583 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15584 ADVERTISED_1000baseT_Full
;
15585 tp
->eee
.eee_enabled
= 1;
15586 tp
->eee
.tx_lpi_enabled
= 1;
15587 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15590 tg3_phy_init_link_config(tp
);
15592 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15593 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15594 !tg3_flag(tp
, ENABLE_APE
) &&
15595 !tg3_flag(tp
, ENABLE_ASF
)) {
15598 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15599 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15600 (bmsr
& BMSR_LSTATUS
))
15601 goto skip_phy_reset
;
15603 err
= tg3_phy_reset(tp
);
15607 tg3_phy_set_wirespeed(tp
);
15609 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15610 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15611 tp
->link_config
.flowctrl
);
15613 tg3_writephy(tp
, MII_BMCR
,
15614 BMCR_ANENABLE
| BMCR_ANRESTART
);
15619 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15620 err
= tg3_init_5401phy_dsp(tp
);
15624 err
= tg3_init_5401phy_dsp(tp
);
15630 static void tg3_read_vpd(struct tg3
*tp
)
15633 unsigned int block_end
, rosize
, len
;
15637 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15641 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15643 goto out_not_found
;
15645 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15646 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15647 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15649 if (block_end
> vpdlen
)
15650 goto out_not_found
;
15652 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15653 PCI_VPD_RO_KEYWORD_MFR_ID
);
15655 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15657 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15658 if (j
+ len
> block_end
|| len
!= 4 ||
15659 memcmp(&vpd_data
[j
], "1028", 4))
15662 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15663 PCI_VPD_RO_KEYWORD_VENDOR0
);
15667 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15669 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15670 if (j
+ len
> block_end
)
15673 if (len
>= sizeof(tp
->fw_ver
))
15674 len
= sizeof(tp
->fw_ver
) - 1;
15675 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15676 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15681 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15682 PCI_VPD_RO_KEYWORD_PARTNO
);
15684 goto out_not_found
;
15686 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15688 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15689 if (len
> TG3_BPN_SIZE
||
15690 (len
+ i
) > vpdlen
)
15691 goto out_not_found
;
15693 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15697 if (tp
->board_part_number
[0])
15701 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15702 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15703 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15704 strcpy(tp
->board_part_number
, "BCM5717");
15705 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15706 strcpy(tp
->board_part_number
, "BCM5718");
15709 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15710 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15711 strcpy(tp
->board_part_number
, "BCM57780");
15712 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15713 strcpy(tp
->board_part_number
, "BCM57760");
15714 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15715 strcpy(tp
->board_part_number
, "BCM57790");
15716 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15717 strcpy(tp
->board_part_number
, "BCM57788");
15720 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15721 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15722 strcpy(tp
->board_part_number
, "BCM57761");
15723 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15724 strcpy(tp
->board_part_number
, "BCM57765");
15725 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15726 strcpy(tp
->board_part_number
, "BCM57781");
15727 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15728 strcpy(tp
->board_part_number
, "BCM57785");
15729 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15730 strcpy(tp
->board_part_number
, "BCM57791");
15731 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15732 strcpy(tp
->board_part_number
, "BCM57795");
15735 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15736 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15737 strcpy(tp
->board_part_number
, "BCM57762");
15738 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15739 strcpy(tp
->board_part_number
, "BCM57766");
15740 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15741 strcpy(tp
->board_part_number
, "BCM57782");
15742 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15743 strcpy(tp
->board_part_number
, "BCM57786");
15746 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15747 strcpy(tp
->board_part_number
, "BCM95906");
15750 strcpy(tp
->board_part_number
, "none");
15754 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15758 if (tg3_nvram_read(tp
, offset
, &val
) ||
15759 (val
& 0xfc000000) != 0x0c000000 ||
15760 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15767 static void tg3_read_bc_ver(struct tg3
*tp
)
15769 u32 val
, offset
, start
, ver_offset
;
15771 bool newver
= false;
15773 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15774 tg3_nvram_read(tp
, 0x4, &start
))
15777 offset
= tg3_nvram_logical_addr(tp
, offset
);
15779 if (tg3_nvram_read(tp
, offset
, &val
))
15782 if ((val
& 0xfc000000) == 0x0c000000) {
15783 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15790 dst_off
= strlen(tp
->fw_ver
);
15793 if (TG3_VER_SIZE
- dst_off
< 16 ||
15794 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15797 offset
= offset
+ ver_offset
- start
;
15798 for (i
= 0; i
< 16; i
+= 4) {
15800 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15803 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15808 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15811 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15812 TG3_NVM_BCVER_MAJSFT
;
15813 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15814 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15815 "v%d.%02d", major
, minor
);
15819 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15821 u32 val
, major
, minor
;
15823 /* Use native endian representation */
15824 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15827 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15828 TG3_NVM_HWSB_CFG1_MAJSFT
;
15829 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15830 TG3_NVM_HWSB_CFG1_MINSFT
;
15832 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15835 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15837 u32 offset
, major
, minor
, build
;
15839 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15841 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15844 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15845 case TG3_EEPROM_SB_REVISION_0
:
15846 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15848 case TG3_EEPROM_SB_REVISION_2
:
15849 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15851 case TG3_EEPROM_SB_REVISION_3
:
15852 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15854 case TG3_EEPROM_SB_REVISION_4
:
15855 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15857 case TG3_EEPROM_SB_REVISION_5
:
15858 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15860 case TG3_EEPROM_SB_REVISION_6
:
15861 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15867 if (tg3_nvram_read(tp
, offset
, &val
))
15870 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15871 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15872 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15873 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15874 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15876 if (minor
> 99 || build
> 26)
15879 offset
= strlen(tp
->fw_ver
);
15880 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15881 " v%d.%02d", major
, minor
);
15884 offset
= strlen(tp
->fw_ver
);
15885 if (offset
< TG3_VER_SIZE
- 1)
15886 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15890 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15892 u32 val
, offset
, start
;
15895 for (offset
= TG3_NVM_DIR_START
;
15896 offset
< TG3_NVM_DIR_END
;
15897 offset
+= TG3_NVM_DIRENT_SIZE
) {
15898 if (tg3_nvram_read(tp
, offset
, &val
))
15901 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15905 if (offset
== TG3_NVM_DIR_END
)
15908 if (!tg3_flag(tp
, 5705_PLUS
))
15909 start
= 0x08000000;
15910 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15913 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15914 !tg3_fw_img_is_valid(tp
, offset
) ||
15915 tg3_nvram_read(tp
, offset
+ 8, &val
))
15918 offset
+= val
- start
;
15920 vlen
= strlen(tp
->fw_ver
);
15922 tp
->fw_ver
[vlen
++] = ',';
15923 tp
->fw_ver
[vlen
++] = ' ';
15925 for (i
= 0; i
< 4; i
++) {
15927 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15930 offset
+= sizeof(v
);
15932 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15933 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15937 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15942 static void tg3_probe_ncsi(struct tg3
*tp
)
15946 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15947 if (apedata
!= APE_SEG_SIG_MAGIC
)
15950 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15951 if (!(apedata
& APE_FW_STATUS_READY
))
15954 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15955 tg3_flag_set(tp
, APE_HAS_NCSI
);
15958 static void tg3_read_dash_ver(struct tg3
*tp
)
15964 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15966 if (tg3_flag(tp
, APE_HAS_NCSI
))
15968 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15973 vlen
= strlen(tp
->fw_ver
);
15975 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15977 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15978 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15979 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15980 (apedata
& APE_FW_VERSION_BLDMSK
));
15983 static void tg3_read_otp_ver(struct tg3
*tp
)
15987 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15990 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15991 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15992 TG3_OTP_MAGIC0_VALID(val
)) {
15993 u64 val64
= (u64
) val
<< 32 | val2
;
15997 for (i
= 0; i
< 7; i
++) {
15998 if ((val64
& 0xff) == 0)
16000 ver
= val64
& 0xff;
16003 vlen
= strlen(tp
->fw_ver
);
16004 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
16008 static void tg3_read_fw_ver(struct tg3
*tp
)
16011 bool vpd_vers
= false;
16013 if (tp
->fw_ver
[0] != 0)
16016 if (tg3_flag(tp
, NO_NVRAM
)) {
16017 strcat(tp
->fw_ver
, "sb");
16018 tg3_read_otp_ver(tp
);
16022 if (tg3_nvram_read(tp
, 0, &val
))
16025 if (val
== TG3_EEPROM_MAGIC
)
16026 tg3_read_bc_ver(tp
);
16027 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
16028 tg3_read_sb_ver(tp
, val
);
16029 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
16030 tg3_read_hwsb_ver(tp
);
16032 if (tg3_flag(tp
, ENABLE_ASF
)) {
16033 if (tg3_flag(tp
, ENABLE_APE
)) {
16034 tg3_probe_ncsi(tp
);
16036 tg3_read_dash_ver(tp
);
16037 } else if (!vpd_vers
) {
16038 tg3_read_mgmtfw_ver(tp
);
16042 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
16045 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
16047 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
16048 return TG3_RX_RET_MAX_SIZE_5717
;
16049 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
16050 return TG3_RX_RET_MAX_SIZE_5700
;
16052 return TG3_RX_RET_MAX_SIZE_5705
;
16055 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
16056 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
16057 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
16058 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
16062 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
16064 struct pci_dev
*peer
;
16065 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
16067 for (func
= 0; func
< 8; func
++) {
16068 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
16069 if (peer
&& peer
!= tp
->pdev
)
16073 /* 5704 can be configured in single-port mode, set peer to
16074 * tp->pdev in that case.
16082 * We don't need to keep the refcount elevated; there's no way
16083 * to remove one half of this device without removing the other
16090 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16092 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16093 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16096 /* All devices that use the alternate
16097 * ASIC REV location have a CPMU.
16099 tg3_flag_set(tp
, CPMU_PRESENT
);
16101 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16102 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16103 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16104 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16105 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16106 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16107 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16108 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16109 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16110 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16111 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16112 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16113 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16114 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16115 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16116 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16117 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16118 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16119 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16120 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16121 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16122 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16123 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16125 reg
= TG3PCI_PRODID_ASICREV
;
16127 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16130 /* Wrong chip ID in 5752 A0. This code can be removed later
16131 * as A0 is not in production.
16133 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16134 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16136 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16137 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16139 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16140 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16141 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16142 tg3_flag_set(tp
, 5717_PLUS
);
16144 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16145 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16146 tg3_flag_set(tp
, 57765_CLASS
);
16148 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16149 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16150 tg3_flag_set(tp
, 57765_PLUS
);
16152 /* Intentionally exclude ASIC_REV_5906 */
16153 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16154 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16155 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16156 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16157 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16158 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16159 tg3_flag(tp
, 57765_PLUS
))
16160 tg3_flag_set(tp
, 5755_PLUS
);
16162 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16163 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16164 tg3_flag_set(tp
, 5780_CLASS
);
16166 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16167 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16168 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16169 tg3_flag(tp
, 5755_PLUS
) ||
16170 tg3_flag(tp
, 5780_CLASS
))
16171 tg3_flag_set(tp
, 5750_PLUS
);
16173 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16174 tg3_flag(tp
, 5750_PLUS
))
16175 tg3_flag_set(tp
, 5705_PLUS
);
16178 static bool tg3_10_100_only_device(struct tg3
*tp
,
16179 const struct pci_device_id
*ent
)
16181 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16183 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16184 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16185 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16188 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16189 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16190 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16200 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16203 u32 pci_state_reg
, grc_misc_cfg
;
16208 /* Force memory write invalidate off. If we leave it on,
16209 * then on 5700_BX chips we have to enable a workaround.
16210 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16211 * to match the cacheline size. The Broadcom driver have this
16212 * workaround but turns MWI off all the times so never uses
16213 * it. This seems to suggest that the workaround is insufficient.
16215 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16216 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16217 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16219 /* Important! -- Make sure register accesses are byteswapped
16220 * correctly. Also, for those chips that require it, make
16221 * sure that indirect register accesses are enabled before
16222 * the first operation.
16224 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16226 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16227 MISC_HOST_CTRL_CHIPREV
);
16228 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16229 tp
->misc_host_ctrl
);
16231 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16233 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16234 * we need to disable memory and use config. cycles
16235 * only to access all registers. The 5702/03 chips
16236 * can mistakenly decode the special cycles from the
16237 * ICH chipsets as memory write cycles, causing corruption
16238 * of register and memory space. Only certain ICH bridges
16239 * will drive special cycles with non-zero data during the
16240 * address phase which can fall within the 5703's address
16241 * range. This is not an ICH bug as the PCI spec allows
16242 * non-zero address during special cycles. However, only
16243 * these ICH bridges are known to drive non-zero addresses
16244 * during special cycles.
16246 * Since special cycles do not cross PCI bridges, we only
16247 * enable this workaround if the 5703 is on the secondary
16248 * bus of these ICH bridges.
16250 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16251 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16252 static struct tg3_dev_id
{
16256 } ich_chipsets
[] = {
16257 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16259 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16261 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16263 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16267 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16268 struct pci_dev
*bridge
= NULL
;
16270 while (pci_id
->vendor
!= 0) {
16271 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16277 if (pci_id
->rev
!= PCI_ANY_ID
) {
16278 if (bridge
->revision
> pci_id
->rev
)
16281 if (bridge
->subordinate
&&
16282 (bridge
->subordinate
->number
==
16283 tp
->pdev
->bus
->number
)) {
16284 tg3_flag_set(tp
, ICH_WORKAROUND
);
16285 pci_dev_put(bridge
);
16291 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16292 static struct tg3_dev_id
{
16295 } bridge_chipsets
[] = {
16296 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16297 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16300 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16301 struct pci_dev
*bridge
= NULL
;
16303 while (pci_id
->vendor
!= 0) {
16304 bridge
= pci_get_device(pci_id
->vendor
,
16311 if (bridge
->subordinate
&&
16312 (bridge
->subordinate
->number
<=
16313 tp
->pdev
->bus
->number
) &&
16314 (bridge
->subordinate
->busn_res
.end
>=
16315 tp
->pdev
->bus
->number
)) {
16316 tg3_flag_set(tp
, 5701_DMA_BUG
);
16317 pci_dev_put(bridge
);
16323 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16324 * DMA addresses > 40-bit. This bridge may have other additional
16325 * 57xx devices behind it in some 4-port NIC designs for example.
16326 * Any tg3 device found behind the bridge will also need the 40-bit
16329 if (tg3_flag(tp
, 5780_CLASS
)) {
16330 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16331 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16333 struct pci_dev
*bridge
= NULL
;
16336 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16337 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16339 if (bridge
&& bridge
->subordinate
&&
16340 (bridge
->subordinate
->number
<=
16341 tp
->pdev
->bus
->number
) &&
16342 (bridge
->subordinate
->busn_res
.end
>=
16343 tp
->pdev
->bus
->number
)) {
16344 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16345 pci_dev_put(bridge
);
16351 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16352 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16353 tp
->pdev_peer
= tg3_find_peer(tp
);
16355 /* Determine TSO capabilities */
16356 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16357 ; /* Do nothing. HW bug. */
16358 else if (tg3_flag(tp
, 57765_PLUS
))
16359 tg3_flag_set(tp
, HW_TSO_3
);
16360 else if (tg3_flag(tp
, 5755_PLUS
) ||
16361 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16362 tg3_flag_set(tp
, HW_TSO_2
);
16363 else if (tg3_flag(tp
, 5750_PLUS
)) {
16364 tg3_flag_set(tp
, HW_TSO_1
);
16365 tg3_flag_set(tp
, TSO_BUG
);
16366 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16367 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16368 tg3_flag_clear(tp
, TSO_BUG
);
16369 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16370 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16371 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16372 tg3_flag_set(tp
, FW_TSO
);
16373 tg3_flag_set(tp
, TSO_BUG
);
16374 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16375 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16377 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16380 /* Selectively allow TSO based on operating conditions */
16381 if (tg3_flag(tp
, HW_TSO_1
) ||
16382 tg3_flag(tp
, HW_TSO_2
) ||
16383 tg3_flag(tp
, HW_TSO_3
) ||
16384 tg3_flag(tp
, FW_TSO
)) {
16385 /* For firmware TSO, assume ASF is disabled.
16386 * We'll disable TSO later if we discover ASF
16387 * is enabled in tg3_get_eeprom_hw_cfg().
16389 tg3_flag_set(tp
, TSO_CAPABLE
);
16391 tg3_flag_clear(tp
, TSO_CAPABLE
);
16392 tg3_flag_clear(tp
, TSO_BUG
);
16393 tp
->fw_needed
= NULL
;
16396 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16397 tp
->fw_needed
= FIRMWARE_TG3
;
16399 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16400 tp
->fw_needed
= FIRMWARE_TG357766
;
16404 if (tg3_flag(tp
, 5750_PLUS
)) {
16405 tg3_flag_set(tp
, SUPPORT_MSI
);
16406 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16407 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16408 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16409 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16410 tp
->pdev_peer
== tp
->pdev
))
16411 tg3_flag_clear(tp
, SUPPORT_MSI
);
16413 if (tg3_flag(tp
, 5755_PLUS
) ||
16414 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16415 tg3_flag_set(tp
, 1SHOT_MSI
);
16418 if (tg3_flag(tp
, 57765_PLUS
)) {
16419 tg3_flag_set(tp
, SUPPORT_MSIX
);
16420 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16426 if (tp
->irq_max
> 1) {
16427 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16428 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16430 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16431 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16432 tp
->txq_max
= tp
->irq_max
- 1;
16435 if (tg3_flag(tp
, 5755_PLUS
) ||
16436 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16437 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16439 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16440 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16442 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16443 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16444 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16445 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16446 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16448 if (tg3_flag(tp
, 57765_PLUS
) &&
16449 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16450 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16452 if (!tg3_flag(tp
, 5705_PLUS
) ||
16453 tg3_flag(tp
, 5780_CLASS
) ||
16454 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16455 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16457 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16460 if (pci_is_pcie(tp
->pdev
)) {
16463 tg3_flag_set(tp
, PCI_EXPRESS
);
16465 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16466 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16467 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16468 tg3_flag_clear(tp
, HW_TSO_2
);
16469 tg3_flag_clear(tp
, TSO_CAPABLE
);
16471 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16472 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16473 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16474 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16475 tg3_flag_set(tp
, CLKREQ_BUG
);
16476 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16477 tg3_flag_set(tp
, L1PLLPD_EN
);
16479 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16480 /* BCM5785 devices are effectively PCIe devices, and should
16481 * follow PCIe codepaths, but do not have a PCIe capabilities
16484 tg3_flag_set(tp
, PCI_EXPRESS
);
16485 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16486 tg3_flag(tp
, 5780_CLASS
)) {
16487 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16488 if (!tp
->pcix_cap
) {
16489 dev_err(&tp
->pdev
->dev
,
16490 "Cannot find PCI-X capability, aborting\n");
16494 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16495 tg3_flag_set(tp
, PCIX_MODE
);
16498 /* If we have an AMD 762 or VIA K8T800 chipset, write
16499 * reordering to the mailbox registers done by the host
16500 * controller can cause major troubles. We read back from
16501 * every mailbox register write to force the writes to be
16502 * posted to the chip in order.
16504 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16505 !tg3_flag(tp
, PCI_EXPRESS
))
16506 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16508 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16509 &tp
->pci_cacheline_sz
);
16510 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16511 &tp
->pci_lat_timer
);
16512 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16513 tp
->pci_lat_timer
< 64) {
16514 tp
->pci_lat_timer
= 64;
16515 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16516 tp
->pci_lat_timer
);
16519 /* Important! -- It is critical that the PCI-X hw workaround
16520 * situation is decided before the first MMIO register access.
16522 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16523 /* 5700 BX chips need to have their TX producer index
16524 * mailboxes written twice to workaround a bug.
16526 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16528 /* If we are in PCI-X mode, enable register write workaround.
16530 * The workaround is to use indirect register accesses
16531 * for all chip writes not to mailbox registers.
16533 if (tg3_flag(tp
, PCIX_MODE
)) {
16536 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16538 /* The chip can have it's power management PCI config
16539 * space registers clobbered due to this bug.
16540 * So explicitly force the chip into D0 here.
16542 pci_read_config_dword(tp
->pdev
,
16543 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16545 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16546 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16547 pci_write_config_dword(tp
->pdev
,
16548 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16551 /* Also, force SERR#/PERR# in PCI command. */
16552 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16553 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16554 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16558 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16559 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16560 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16561 tg3_flag_set(tp
, PCI_32BIT
);
16563 /* Chip-specific fixup from Broadcom driver */
16564 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16565 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16566 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16567 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16570 /* Default fast path register access methods */
16571 tp
->read32
= tg3_read32
;
16572 tp
->write32
= tg3_write32
;
16573 tp
->read32_mbox
= tg3_read32
;
16574 tp
->write32_mbox
= tg3_write32
;
16575 tp
->write32_tx_mbox
= tg3_write32
;
16576 tp
->write32_rx_mbox
= tg3_write32
;
16578 /* Various workaround register access methods */
16579 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16580 tp
->write32
= tg3_write_indirect_reg32
;
16581 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16582 (tg3_flag(tp
, PCI_EXPRESS
) &&
16583 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16585 * Back to back register writes can cause problems on these
16586 * chips, the workaround is to read back all reg writes
16587 * except those to mailbox regs.
16589 * See tg3_write_indirect_reg32().
16591 tp
->write32
= tg3_write_flush_reg32
;
16594 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16595 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16596 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16597 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16600 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16601 tp
->read32
= tg3_read_indirect_reg32
;
16602 tp
->write32
= tg3_write_indirect_reg32
;
16603 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16604 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16605 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16606 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16611 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16612 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16613 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16615 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16616 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16617 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16618 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16619 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16622 if (tp
->write32
== tg3_write_indirect_reg32
||
16623 (tg3_flag(tp
, PCIX_MODE
) &&
16624 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16625 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16626 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16628 /* The memory arbiter has to be enabled in order for SRAM accesses
16629 * to succeed. Normally on powerup the tg3 chip firmware will make
16630 * sure it is enabled, but other entities such as system netboot
16631 * code might disable it.
16633 val
= tr32(MEMARB_MODE
);
16634 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16636 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16637 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16638 tg3_flag(tp
, 5780_CLASS
)) {
16639 if (tg3_flag(tp
, PCIX_MODE
)) {
16640 pci_read_config_dword(tp
->pdev
,
16641 tp
->pcix_cap
+ PCI_X_STATUS
,
16643 tp
->pci_fn
= val
& 0x7;
16645 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16646 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16647 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16648 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16649 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16650 val
= tr32(TG3_CPMU_STATUS
);
16652 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16653 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16655 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16656 TG3_CPMU_STATUS_FSHFT_5719
;
16659 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16660 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16661 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16664 /* Get eeprom hw config before calling tg3_set_power_state().
16665 * In particular, the TG3_FLAG_IS_NIC flag must be
16666 * determined before calling tg3_set_power_state() so that
16667 * we know whether or not to switch out of Vaux power.
16668 * When the flag is set, it means that GPIO1 is used for eeprom
16669 * write protect and also implies that it is a LOM where GPIOs
16670 * are not used to switch power.
16672 tg3_get_eeprom_hw_cfg(tp
);
16674 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16675 tg3_flag_clear(tp
, TSO_CAPABLE
);
16676 tg3_flag_clear(tp
, TSO_BUG
);
16677 tp
->fw_needed
= NULL
;
16680 if (tg3_flag(tp
, ENABLE_APE
)) {
16681 /* Allow reads and writes to the
16682 * APE register and memory space.
16684 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16685 PCISTATE_ALLOW_APE_SHMEM_WR
|
16686 PCISTATE_ALLOW_APE_PSPACE_WR
;
16687 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16690 tg3_ape_lock_init(tp
);
16691 tp
->ape_hb_interval
=
16692 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC
);
16695 /* Set up tp->grc_local_ctrl before calling
16696 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16697 * will bring 5700's external PHY out of reset.
16698 * It is also used as eeprom write protect on LOMs.
16700 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16701 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16702 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16703 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16704 GRC_LCLCTRL_GPIO_OUTPUT1
);
16705 /* Unused GPIO3 must be driven as output on 5752 because there
16706 * are no pull-up resistors on unused GPIO pins.
16708 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16709 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16711 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16712 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16713 tg3_flag(tp
, 57765_CLASS
))
16714 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16716 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16717 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16718 /* Turn off the debug UART. */
16719 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16720 if (tg3_flag(tp
, IS_NIC
))
16721 /* Keep VMain power. */
16722 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16723 GRC_LCLCTRL_GPIO_OUTPUT0
;
16726 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16727 tp
->grc_local_ctrl
|=
16728 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16730 /* Switch out of Vaux if it is a NIC */
16731 tg3_pwrsrc_switch_to_vmain(tp
);
16733 /* Derive initial jumbo mode from MTU assigned in
16734 * ether_setup() via the alloc_etherdev() call
16736 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16737 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16739 /* Determine WakeOnLan speed to use. */
16740 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16741 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16742 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16743 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16744 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16746 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16749 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16750 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16752 /* A few boards don't want Ethernet@WireSpeed phy feature */
16753 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16754 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16755 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16756 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16757 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16758 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16759 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16761 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16762 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16763 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16764 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16765 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16767 if (tg3_flag(tp
, 5705_PLUS
) &&
16768 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16769 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16770 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16771 !tg3_flag(tp
, 57765_PLUS
)) {
16772 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16773 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16774 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16775 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16776 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16777 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16778 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16779 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16780 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16782 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16785 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16786 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16787 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16788 if (tp
->phy_otp
== 0)
16789 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16792 if (tg3_flag(tp
, CPMU_PRESENT
))
16793 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16795 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16797 tp
->coalesce_mode
= 0;
16798 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16799 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16800 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16802 /* Set these bits to enable statistics workaround. */
16803 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16804 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16805 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16806 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16807 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16808 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16811 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16812 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16813 tg3_flag_set(tp
, USE_PHYLIB
);
16815 err
= tg3_mdio_init(tp
);
16819 /* Initialize data/descriptor byte/word swapping. */
16820 val
= tr32(GRC_MODE
);
16821 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16822 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16823 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16824 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16825 GRC_MODE_B2HRX_ENABLE
|
16826 GRC_MODE_HTX2B_ENABLE
|
16827 GRC_MODE_HOST_STACKUP
);
16829 val
&= GRC_MODE_HOST_STACKUP
;
16831 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16833 tg3_switch_clocks(tp
);
16835 /* Clear this out for sanity. */
16836 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16838 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16839 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16841 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16843 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16844 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16845 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16846 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16847 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16848 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16849 void __iomem
*sram_base
;
16851 /* Write some dummy words into the SRAM status block
16852 * area, see if it reads back correctly. If the return
16853 * value is bad, force enable the PCIX workaround.
16855 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16857 writel(0x00000000, sram_base
);
16858 writel(0x00000000, sram_base
+ 4);
16859 writel(0xffffffff, sram_base
+ 4);
16860 if (readl(sram_base
) != 0x00000000)
16861 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16866 tg3_nvram_init(tp
);
16868 /* If the device has an NVRAM, no need to load patch firmware */
16869 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16870 !tg3_flag(tp
, NO_NVRAM
))
16871 tp
->fw_needed
= NULL
;
16873 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16874 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16876 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16877 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16878 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16879 tg3_flag_set(tp
, IS_5788
);
16881 if (!tg3_flag(tp
, IS_5788
) &&
16882 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16883 tg3_flag_set(tp
, TAGGED_STATUS
);
16884 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16885 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16886 HOSTCC_MODE_CLRTICK_TXBD
);
16888 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16889 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16890 tp
->misc_host_ctrl
);
16893 /* Preserve the APE MAC_MODE bits */
16894 if (tg3_flag(tp
, ENABLE_APE
))
16895 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16899 if (tg3_10_100_only_device(tp
, ent
))
16900 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16902 err
= tg3_phy_probe(tp
);
16904 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16905 /* ... but do not return immediately ... */
16910 tg3_read_fw_ver(tp
);
16912 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16913 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16915 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16916 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16918 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16921 /* 5700 {AX,BX} chips have a broken status block link
16922 * change bit implementation, so we must use the
16923 * status register in those cases.
16925 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16926 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16928 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16930 /* The led_ctrl is set during tg3_phy_probe, here we might
16931 * have to force the link status polling mechanism based
16932 * upon subsystem IDs.
16934 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16935 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16936 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16937 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16938 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16941 /* For all SERDES we poll the MAC status register. */
16942 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16943 tg3_flag_set(tp
, POLL_SERDES
);
16945 tg3_flag_clear(tp
, POLL_SERDES
);
16947 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16948 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16950 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16951 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16952 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16953 tg3_flag(tp
, PCIX_MODE
)) {
16954 tp
->rx_offset
= NET_SKB_PAD
;
16955 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16956 tp
->rx_copy_thresh
= ~(u16
)0;
16960 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16961 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16962 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16964 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16966 /* Increment the rx prod index on the rx std ring by at most
16967 * 8 for these chips to workaround hw errata.
16969 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16970 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16971 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16972 tp
->rx_std_max_post
= 8;
16974 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16975 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16976 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16981 static int tg3_get_device_address(struct tg3
*tp
)
16983 struct net_device
*dev
= tp
->dev
;
16984 u32 hi
, lo
, mac_offset
;
16988 if (!eth_platform_get_mac_address(&tp
->pdev
->dev
, dev
->dev_addr
))
16991 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16992 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16993 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16998 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16999 tg3_flag(tp
, 5780_CLASS
)) {
17000 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
17002 if (tg3_nvram_lock(tp
))
17003 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
17005 tg3_nvram_unlock(tp
);
17006 } else if (tg3_flag(tp
, 5717_PLUS
)) {
17007 if (tp
->pci_fn
& 1)
17009 if (tp
->pci_fn
> 1)
17010 mac_offset
+= 0x18c;
17011 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
17014 /* First try to get it from MAC address mailbox. */
17015 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
17016 if ((hi
>> 16) == 0x484b) {
17017 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17018 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
17020 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
17021 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17022 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17023 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17024 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
17026 /* Some old bootcode may report a 0 MAC address in SRAM */
17027 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
17030 /* Next, try NVRAM. */
17031 if (!tg3_flag(tp
, NO_NVRAM
) &&
17032 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
17033 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
17034 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
17035 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
17037 /* Finally just fetch it out of the MAC control regs. */
17039 hi
= tr32(MAC_ADDR_0_HIGH
);
17040 lo
= tr32(MAC_ADDR_0_LOW
);
17042 dev
->dev_addr
[5] = lo
& 0xff;
17043 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17044 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17045 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17046 dev
->dev_addr
[1] = hi
& 0xff;
17047 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17051 if (!is_valid_ether_addr(&dev
->dev_addr
[0]))
17056 #define BOUNDARY_SINGLE_CACHELINE 1
17057 #define BOUNDARY_MULTI_CACHELINE 2
17059 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17061 int cacheline_size
;
17065 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17067 cacheline_size
= 1024;
17069 cacheline_size
= (int) byte
* 4;
17071 /* On 5703 and later chips, the boundary bits have no
17074 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17075 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17076 !tg3_flag(tp
, PCI_EXPRESS
))
17079 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17080 goal
= BOUNDARY_MULTI_CACHELINE
;
17082 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17083 goal
= BOUNDARY_SINGLE_CACHELINE
;
17089 if (tg3_flag(tp
, 57765_PLUS
)) {
17090 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17097 /* PCI controllers on most RISC systems tend to disconnect
17098 * when a device tries to burst across a cache-line boundary.
17099 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17101 * Unfortunately, for PCI-E there are only limited
17102 * write-side controls for this, and thus for reads
17103 * we will still get the disconnects. We'll also waste
17104 * these PCI cycles for both read and write for chips
17105 * other than 5700 and 5701 which do not implement the
17108 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17109 switch (cacheline_size
) {
17114 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17115 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17116 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17118 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17119 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17124 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17125 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17129 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17130 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17133 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17134 switch (cacheline_size
) {
17138 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17139 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17140 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17146 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17147 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17151 switch (cacheline_size
) {
17153 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17154 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17155 DMA_RWCTRL_WRITE_BNDRY_16
);
17160 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17161 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17162 DMA_RWCTRL_WRITE_BNDRY_32
);
17167 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17168 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17169 DMA_RWCTRL_WRITE_BNDRY_64
);
17174 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17175 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17176 DMA_RWCTRL_WRITE_BNDRY_128
);
17181 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17182 DMA_RWCTRL_WRITE_BNDRY_256
);
17185 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17186 DMA_RWCTRL_WRITE_BNDRY_512
);
17190 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17191 DMA_RWCTRL_WRITE_BNDRY_1024
);
17200 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17201 int size
, bool to_device
)
17203 struct tg3_internal_buffer_desc test_desc
;
17204 u32 sram_dma_descs
;
17207 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17209 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17210 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17211 tw32(RDMAC_STATUS
, 0);
17212 tw32(WDMAC_STATUS
, 0);
17214 tw32(BUFMGR_MODE
, 0);
17215 tw32(FTQ_RESET
, 0);
17217 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17218 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17219 test_desc
.nic_mbuf
= 0x00002100;
17220 test_desc
.len
= size
;
17223 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17224 * the *second* time the tg3 driver was getting loaded after an
17227 * Broadcom tells me:
17228 * ...the DMA engine is connected to the GRC block and a DMA
17229 * reset may affect the GRC block in some unpredictable way...
17230 * The behavior of resets to individual blocks has not been tested.
17232 * Broadcom noted the GRC reset will also reset all sub-components.
17235 test_desc
.cqid_sqid
= (13 << 8) | 2;
17237 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17240 test_desc
.cqid_sqid
= (16 << 8) | 7;
17242 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17245 test_desc
.flags
= 0x00000005;
17247 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17250 val
= *(((u32
*)&test_desc
) + i
);
17251 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17252 sram_dma_descs
+ (i
* sizeof(u32
)));
17253 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17255 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17258 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17260 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17263 for (i
= 0; i
< 40; i
++) {
17267 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17269 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17270 if ((val
& 0xffff) == sram_dma_descs
) {
17281 #define TEST_BUFFER_SIZE 0x2000
17283 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17284 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17288 static int tg3_test_dma(struct tg3
*tp
)
17290 dma_addr_t buf_dma
;
17291 u32
*buf
, saved_dma_rwctrl
;
17294 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17295 &buf_dma
, GFP_KERNEL
);
17301 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17302 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17304 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17306 if (tg3_flag(tp
, 57765_PLUS
))
17309 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17310 /* DMA read watermark not used on PCIE */
17311 tp
->dma_rwctrl
|= 0x00180000;
17312 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17313 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17314 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17315 tp
->dma_rwctrl
|= 0x003f0000;
17317 tp
->dma_rwctrl
|= 0x003f000f;
17319 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17320 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17321 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17322 u32 read_water
= 0x7;
17324 /* If the 5704 is behind the EPB bridge, we can
17325 * do the less restrictive ONE_DMA workaround for
17326 * better performance.
17328 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17329 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17330 tp
->dma_rwctrl
|= 0x8000;
17331 else if (ccval
== 0x6 || ccval
== 0x7)
17332 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17334 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17336 /* Set bit 23 to enable PCIX hw bug fix */
17338 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17339 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17341 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17342 /* 5780 always in PCIX mode */
17343 tp
->dma_rwctrl
|= 0x00144000;
17344 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17345 /* 5714 always in PCIX mode */
17346 tp
->dma_rwctrl
|= 0x00148000;
17348 tp
->dma_rwctrl
|= 0x001b000f;
17351 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17352 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17354 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17355 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17356 tp
->dma_rwctrl
&= 0xfffffff0;
17358 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17359 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17360 /* Remove this if it causes problems for some boards. */
17361 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17363 /* On 5700/5701 chips, we need to set this bit.
17364 * Otherwise the chip will issue cacheline transactions
17365 * to streamable DMA memory with not all the byte
17366 * enables turned on. This is an error on several
17367 * RISC PCI controllers, in particular sparc64.
17369 * On 5703/5704 chips, this bit has been reassigned
17370 * a different meaning. In particular, it is used
17371 * on those chips to enable a PCI-X workaround.
17373 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17376 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17379 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17380 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17383 /* It is best to perform DMA test with maximum write burst size
17384 * to expose the 5700/5701 write DMA bug.
17386 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17387 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17388 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17393 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17396 /* Send the buffer to the chip. */
17397 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17399 dev_err(&tp
->pdev
->dev
,
17400 "%s: Buffer write failed. err = %d\n",
17405 /* Now read it back. */
17406 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17408 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17409 "err = %d\n", __func__
, ret
);
17414 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17418 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17419 DMA_RWCTRL_WRITE_BNDRY_16
) {
17420 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17421 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17422 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17425 dev_err(&tp
->pdev
->dev
,
17426 "%s: Buffer corrupted on read back! "
17427 "(%d != %d)\n", __func__
, p
[i
], i
);
17433 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17439 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17440 DMA_RWCTRL_WRITE_BNDRY_16
) {
17441 /* DMA test passed without adjusting DMA boundary,
17442 * now look for chipsets that are known to expose the
17443 * DMA bug without failing the test.
17445 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17446 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17447 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17449 /* Safe to use the calculated DMA boundary. */
17450 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17453 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17457 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17462 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17464 if (tg3_flag(tp
, 57765_PLUS
)) {
17465 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17466 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17467 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17468 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17469 tp
->bufmgr_config
.mbuf_high_water
=
17470 DEFAULT_MB_HIGH_WATER_57765
;
17472 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17473 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17474 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17475 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17476 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17477 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17478 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17479 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17480 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17481 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17482 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17483 tp
->bufmgr_config
.mbuf_high_water
=
17484 DEFAULT_MB_HIGH_WATER_5705
;
17485 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17486 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17487 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17488 tp
->bufmgr_config
.mbuf_high_water
=
17489 DEFAULT_MB_HIGH_WATER_5906
;
17492 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17493 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17494 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17495 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17496 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17497 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17499 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17500 DEFAULT_MB_RDMA_LOW_WATER
;
17501 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17502 DEFAULT_MB_MACRX_LOW_WATER
;
17503 tp
->bufmgr_config
.mbuf_high_water
=
17504 DEFAULT_MB_HIGH_WATER
;
17506 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17507 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17508 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17509 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17510 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17511 DEFAULT_MB_HIGH_WATER_JUMBO
;
17514 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17515 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17518 static char *tg3_phy_string(struct tg3
*tp
)
17520 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17521 case TG3_PHY_ID_BCM5400
: return "5400";
17522 case TG3_PHY_ID_BCM5401
: return "5401";
17523 case TG3_PHY_ID_BCM5411
: return "5411";
17524 case TG3_PHY_ID_BCM5701
: return "5701";
17525 case TG3_PHY_ID_BCM5703
: return "5703";
17526 case TG3_PHY_ID_BCM5704
: return "5704";
17527 case TG3_PHY_ID_BCM5705
: return "5705";
17528 case TG3_PHY_ID_BCM5750
: return "5750";
17529 case TG3_PHY_ID_BCM5752
: return "5752";
17530 case TG3_PHY_ID_BCM5714
: return "5714";
17531 case TG3_PHY_ID_BCM5780
: return "5780";
17532 case TG3_PHY_ID_BCM5755
: return "5755";
17533 case TG3_PHY_ID_BCM5787
: return "5787";
17534 case TG3_PHY_ID_BCM5784
: return "5784";
17535 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17536 case TG3_PHY_ID_BCM5906
: return "5906";
17537 case TG3_PHY_ID_BCM5761
: return "5761";
17538 case TG3_PHY_ID_BCM5718C
: return "5718C";
17539 case TG3_PHY_ID_BCM5718S
: return "5718S";
17540 case TG3_PHY_ID_BCM57765
: return "57765";
17541 case TG3_PHY_ID_BCM5719C
: return "5719C";
17542 case TG3_PHY_ID_BCM5720C
: return "5720C";
17543 case TG3_PHY_ID_BCM5762
: return "5762C";
17544 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17545 case 0: return "serdes";
17546 default: return "unknown";
17550 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17552 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17553 strcpy(str
, "PCI Express");
17555 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17556 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17558 strcpy(str
, "PCIX:");
17560 if ((clock_ctrl
== 7) ||
17561 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17562 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17563 strcat(str
, "133MHz");
17564 else if (clock_ctrl
== 0)
17565 strcat(str
, "33MHz");
17566 else if (clock_ctrl
== 2)
17567 strcat(str
, "50MHz");
17568 else if (clock_ctrl
== 4)
17569 strcat(str
, "66MHz");
17570 else if (clock_ctrl
== 6)
17571 strcat(str
, "100MHz");
17573 strcpy(str
, "PCI:");
17574 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17575 strcat(str
, "66MHz");
17577 strcat(str
, "33MHz");
17579 if (tg3_flag(tp
, PCI_32BIT
))
17580 strcat(str
, ":32-bit");
17582 strcat(str
, ":64-bit");
17586 static void tg3_init_coal(struct tg3
*tp
)
17588 struct ethtool_coalesce
*ec
= &tp
->coal
;
17590 memset(ec
, 0, sizeof(*ec
));
17591 ec
->cmd
= ETHTOOL_GCOALESCE
;
17592 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17593 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17594 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17595 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17596 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17597 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17598 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17599 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17600 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17602 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17603 HOSTCC_MODE_CLRTICK_TXBD
)) {
17604 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17605 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17606 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17607 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17610 if (tg3_flag(tp
, 5705_PLUS
)) {
17611 ec
->rx_coalesce_usecs_irq
= 0;
17612 ec
->tx_coalesce_usecs_irq
= 0;
17613 ec
->stats_block_coalesce_usecs
= 0;
17617 static int tg3_init_one(struct pci_dev
*pdev
,
17618 const struct pci_device_id
*ent
)
17620 struct net_device
*dev
;
17623 u32 sndmbx
, rcvmbx
, intmbx
;
17625 u64 dma_mask
, persist_dma_mask
;
17626 netdev_features_t features
= 0;
17628 printk_once(KERN_INFO
"%s\n", version
);
17630 err
= pci_enable_device(pdev
);
17632 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17636 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17638 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17639 goto err_out_disable_pdev
;
17642 pci_set_master(pdev
);
17644 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17647 goto err_out_free_res
;
17650 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17652 tp
= netdev_priv(dev
);
17655 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17656 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17658 tp
->pcierr_recovery
= false;
17661 tp
->msg_enable
= tg3_debug
;
17663 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17665 if (pdev_is_ssb_gige_core(pdev
)) {
17666 tg3_flag_set(tp
, IS_SSB_CORE
);
17667 if (ssb_gige_must_flush_posted_writes(pdev
))
17668 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17669 if (ssb_gige_one_dma_at_once(pdev
))
17670 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17671 if (ssb_gige_have_roboswitch(pdev
)) {
17672 tg3_flag_set(tp
, USE_PHYLIB
);
17673 tg3_flag_set(tp
, ROBOSWITCH
);
17675 if (ssb_gige_is_rgmii(pdev
))
17676 tg3_flag_set(tp
, RGMII_MODE
);
17679 /* The word/byte swap controls here control register access byte
17680 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17683 tp
->misc_host_ctrl
=
17684 MISC_HOST_CTRL_MASK_PCI_INT
|
17685 MISC_HOST_CTRL_WORD_SWAP
|
17686 MISC_HOST_CTRL_INDIR_ACCESS
|
17687 MISC_HOST_CTRL_PCISTATE_RW
;
17689 /* The NONFRM (non-frame) byte/word swap controls take effect
17690 * on descriptor entries, anything which isn't packet data.
17692 * The StrongARM chips on the board (one for tx, one for rx)
17693 * are running in big-endian mode.
17695 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17696 GRC_MODE_WSWAP_NONFRM_DATA
);
17697 #ifdef __BIG_ENDIAN
17698 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17700 spin_lock_init(&tp
->lock
);
17701 spin_lock_init(&tp
->indirect_lock
);
17702 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17704 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17706 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17708 goto err_out_free_dev
;
17711 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17712 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17713 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17714 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17715 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17716 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17717 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17718 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17719 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17720 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17721 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17722 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17723 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17724 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17725 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17726 tg3_flag_set(tp
, ENABLE_APE
);
17727 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17728 if (!tp
->aperegs
) {
17729 dev_err(&pdev
->dev
,
17730 "Cannot map APE registers, aborting\n");
17732 goto err_out_iounmap
;
17736 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17737 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17739 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17740 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17741 dev
->netdev_ops
= &tg3_netdev_ops
;
17742 dev
->irq
= pdev
->irq
;
17744 err
= tg3_get_invariants(tp
, ent
);
17746 dev_err(&pdev
->dev
,
17747 "Problem fetching invariants of chip, aborting\n");
17748 goto err_out_apeunmap
;
17751 /* The EPB bridge inside 5714, 5715, and 5780 and any
17752 * device behind the EPB cannot support DMA addresses > 40-bit.
17753 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17754 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17755 * do DMA address check in tg3_start_xmit().
17757 if (tg3_flag(tp
, IS_5788
))
17758 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17759 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17760 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17761 #ifdef CONFIG_HIGHMEM
17762 dma_mask
= DMA_BIT_MASK(64);
17765 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17767 /* Configure DMA attributes. */
17768 if (dma_mask
> DMA_BIT_MASK(32)) {
17769 err
= pci_set_dma_mask(pdev
, dma_mask
);
17771 features
|= NETIF_F_HIGHDMA
;
17772 err
= pci_set_consistent_dma_mask(pdev
,
17775 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17776 "DMA for consistent allocations\n");
17777 goto err_out_apeunmap
;
17781 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17782 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17784 dev_err(&pdev
->dev
,
17785 "No usable DMA configuration, aborting\n");
17786 goto err_out_apeunmap
;
17790 tg3_init_bufmgr_config(tp
);
17792 /* 5700 B0 chips do not support checksumming correctly due
17793 * to hardware bugs.
17795 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17796 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17798 if (tg3_flag(tp
, 5755_PLUS
))
17799 features
|= NETIF_F_IPV6_CSUM
;
17802 /* TSO is on by default on chips that support hardware TSO.
17803 * Firmware TSO on older chips gives lower performance, so it
17804 * is off by default, but can be enabled using ethtool.
17806 if ((tg3_flag(tp
, HW_TSO_1
) ||
17807 tg3_flag(tp
, HW_TSO_2
) ||
17808 tg3_flag(tp
, HW_TSO_3
)) &&
17809 (features
& NETIF_F_IP_CSUM
))
17810 features
|= NETIF_F_TSO
;
17811 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17812 if (features
& NETIF_F_IPV6_CSUM
)
17813 features
|= NETIF_F_TSO6
;
17814 if (tg3_flag(tp
, HW_TSO_3
) ||
17815 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17816 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17817 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17818 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17819 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17820 features
|= NETIF_F_TSO_ECN
;
17823 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17824 NETIF_F_HW_VLAN_CTAG_RX
;
17825 dev
->vlan_features
|= features
;
17828 * Add loopback capability only for a subset of devices that support
17829 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17830 * loopback for the remaining devices.
17832 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17833 !tg3_flag(tp
, CPMU_PRESENT
))
17834 /* Add the loopback capability */
17835 features
|= NETIF_F_LOOPBACK
;
17837 dev
->hw_features
|= features
;
17838 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17840 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17841 dev
->min_mtu
= TG3_MIN_MTU
;
17842 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17844 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17845 !tg3_flag(tp
, TSO_CAPABLE
) &&
17846 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17847 tg3_flag_set(tp
, MAX_RXPEND_64
);
17848 tp
->rx_pending
= 63;
17851 err
= tg3_get_device_address(tp
);
17853 dev_err(&pdev
->dev
,
17854 "Could not obtain valid ethernet address, aborting\n");
17855 goto err_out_apeunmap
;
17858 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17859 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17860 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17861 for (i
= 0; i
< tp
->irq_max
; i
++) {
17862 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17865 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17867 tnapi
->int_mbox
= intmbx
;
17873 tnapi
->consmbox
= rcvmbx
;
17874 tnapi
->prodmbox
= sndmbx
;
17877 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17879 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17881 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17885 * If we support MSIX, we'll be using RSS. If we're using
17886 * RSS, the first vector only handles link interrupts and the
17887 * remaining vectors handle rx and tx interrupts. Reuse the
17888 * mailbox values for the next iteration. The values we setup
17889 * above are still useful for the single vectored mode.
17903 * Reset chip in case UNDI or EFI driver did not shutdown
17904 * DMA self test will enable WDMAC and we'll see (spurious)
17905 * pending DMA on the PCI bus at that point.
17907 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17908 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17909 tg3_full_lock(tp
, 0);
17910 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17911 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17912 tg3_full_unlock(tp
);
17915 err
= tg3_test_dma(tp
);
17917 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17918 goto err_out_apeunmap
;
17923 pci_set_drvdata(pdev
, dev
);
17925 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17926 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17927 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17928 tg3_flag_set(tp
, PTP_CAPABLE
);
17930 tg3_timer_init(tp
);
17932 tg3_carrier_off(tp
);
17934 err
= register_netdev(dev
);
17936 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17937 goto err_out_apeunmap
;
17940 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17942 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17944 if (IS_ERR(tp
->ptp_clock
))
17945 tp
->ptp_clock
= NULL
;
17948 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17949 tp
->board_part_number
,
17950 tg3_chip_rev_id(tp
),
17951 tg3_bus_string(tp
, str
),
17954 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
17957 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17958 ethtype
= "10/100Base-TX";
17959 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17960 ethtype
= "1000Base-SX";
17962 ethtype
= "10/100/1000Base-T";
17964 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17965 "(WireSpeed[%d], EEE[%d])\n",
17966 tg3_phy_string(tp
), ethtype
,
17967 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17968 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17971 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17972 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17973 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17974 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17975 tg3_flag(tp
, ENABLE_ASF
) != 0,
17976 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17977 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17979 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17980 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17982 pci_save_state(pdev
);
17988 iounmap(tp
->aperegs
);
17989 tp
->aperegs
= NULL
;
18002 pci_release_regions(pdev
);
18004 err_out_disable_pdev
:
18005 if (pci_is_enabled(pdev
))
18006 pci_disable_device(pdev
);
18010 static void tg3_remove_one(struct pci_dev
*pdev
)
18012 struct net_device
*dev
= pci_get_drvdata(pdev
);
18015 struct tg3
*tp
= netdev_priv(dev
);
18019 release_firmware(tp
->fw
);
18021 tg3_reset_task_cancel(tp
);
18023 if (tg3_flag(tp
, USE_PHYLIB
)) {
18028 unregister_netdev(dev
);
18030 iounmap(tp
->aperegs
);
18031 tp
->aperegs
= NULL
;
18038 pci_release_regions(pdev
);
18039 pci_disable_device(pdev
);
18043 #ifdef CONFIG_PM_SLEEP
18044 static int tg3_suspend(struct device
*device
)
18046 struct net_device
*dev
= dev_get_drvdata(device
);
18047 struct tg3
*tp
= netdev_priv(dev
);
18052 if (!netif_running(dev
))
18055 tg3_reset_task_cancel(tp
);
18057 tg3_netif_stop(tp
);
18059 tg3_timer_stop(tp
);
18061 tg3_full_lock(tp
, 1);
18062 tg3_disable_ints(tp
);
18063 tg3_full_unlock(tp
);
18065 netif_device_detach(dev
);
18067 tg3_full_lock(tp
, 0);
18068 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18069 tg3_flag_clear(tp
, INIT_COMPLETE
);
18070 tg3_full_unlock(tp
);
18072 err
= tg3_power_down_prepare(tp
);
18076 tg3_full_lock(tp
, 0);
18078 tg3_flag_set(tp
, INIT_COMPLETE
);
18079 err2
= tg3_restart_hw(tp
, true);
18083 tg3_timer_start(tp
);
18085 netif_device_attach(dev
);
18086 tg3_netif_start(tp
);
18089 tg3_full_unlock(tp
);
18100 static int tg3_resume(struct device
*device
)
18102 struct net_device
*dev
= dev_get_drvdata(device
);
18103 struct tg3
*tp
= netdev_priv(dev
);
18108 if (!netif_running(dev
))
18111 netif_device_attach(dev
);
18113 tg3_full_lock(tp
, 0);
18115 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18117 tg3_flag_set(tp
, INIT_COMPLETE
);
18118 err
= tg3_restart_hw(tp
,
18119 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18123 tg3_timer_start(tp
);
18125 tg3_netif_start(tp
);
18128 tg3_full_unlock(tp
);
18137 #endif /* CONFIG_PM_SLEEP */
18139 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18141 static void tg3_shutdown(struct pci_dev
*pdev
)
18143 struct net_device
*dev
= pci_get_drvdata(pdev
);
18144 struct tg3
*tp
= netdev_priv(dev
);
18147 netif_device_detach(dev
);
18149 if (netif_running(dev
))
18152 if (system_state
== SYSTEM_POWER_OFF
)
18153 tg3_power_down(tp
);
18159 * tg3_io_error_detected - called when PCI error is detected
18160 * @pdev: Pointer to PCI device
18161 * @state: The current pci connection state
18163 * This function is called after a PCI bus error affecting
18164 * this device has been detected.
18166 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18167 pci_channel_state_t state
)
18169 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18170 struct tg3
*tp
= netdev_priv(netdev
);
18171 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18173 netdev_info(netdev
, "PCI I/O error detected\n");
18177 /* We probably don't have netdev yet */
18178 if (!netdev
|| !netif_running(netdev
))
18181 /* We needn't recover from permanent error */
18182 if (state
== pci_channel_io_frozen
)
18183 tp
->pcierr_recovery
= true;
18187 tg3_netif_stop(tp
);
18189 tg3_timer_stop(tp
);
18191 /* Want to make sure that the reset task doesn't run */
18192 tg3_reset_task_cancel(tp
);
18194 netif_device_detach(netdev
);
18196 /* Clean up software state, even if MMIO is blocked */
18197 tg3_full_lock(tp
, 0);
18198 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18199 tg3_full_unlock(tp
);
18202 if (state
== pci_channel_io_perm_failure
) {
18204 tg3_napi_enable(tp
);
18207 err
= PCI_ERS_RESULT_DISCONNECT
;
18209 pci_disable_device(pdev
);
18218 * tg3_io_slot_reset - called after the pci bus has been reset.
18219 * @pdev: Pointer to PCI device
18221 * Restart the card from scratch, as if from a cold-boot.
18222 * At this point, the card has exprienced a hard reset,
18223 * followed by fixups by BIOS, and has its config space
18224 * set up identically to what it was at cold boot.
18226 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18228 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18229 struct tg3
*tp
= netdev_priv(netdev
);
18230 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18235 if (pci_enable_device(pdev
)) {
18236 dev_err(&pdev
->dev
,
18237 "Cannot re-enable PCI device after reset.\n");
18241 pci_set_master(pdev
);
18242 pci_restore_state(pdev
);
18243 pci_save_state(pdev
);
18245 if (!netdev
|| !netif_running(netdev
)) {
18246 rc
= PCI_ERS_RESULT_RECOVERED
;
18250 err
= tg3_power_up(tp
);
18254 rc
= PCI_ERS_RESULT_RECOVERED
;
18257 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18258 tg3_napi_enable(tp
);
18267 * tg3_io_resume - called when traffic can start flowing again.
18268 * @pdev: Pointer to PCI device
18270 * This callback is called when the error recovery driver tells
18271 * us that its OK to resume normal operation.
18273 static void tg3_io_resume(struct pci_dev
*pdev
)
18275 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18276 struct tg3
*tp
= netdev_priv(netdev
);
18281 if (!netdev
|| !netif_running(netdev
))
18284 tg3_full_lock(tp
, 0);
18285 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18286 tg3_flag_set(tp
, INIT_COMPLETE
);
18287 err
= tg3_restart_hw(tp
, true);
18289 tg3_full_unlock(tp
);
18290 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18294 netif_device_attach(netdev
);
18296 tg3_timer_start(tp
);
18298 tg3_netif_start(tp
);
18300 tg3_full_unlock(tp
);
18305 tp
->pcierr_recovery
= false;
18309 static const struct pci_error_handlers tg3_err_handler
= {
18310 .error_detected
= tg3_io_error_detected
,
18311 .slot_reset
= tg3_io_slot_reset
,
18312 .resume
= tg3_io_resume
18315 static struct pci_driver tg3_driver
= {
18316 .name
= DRV_MODULE_NAME
,
18317 .id_table
= tg3_pci_tbl
,
18318 .probe
= tg3_init_one
,
18319 .remove
= tg3_remove_one
,
18320 .err_handler
= &tg3_err_handler
,
18321 .driver
.pm
= &tg3_pm_ops
,
18322 .shutdown
= tg3_shutdown
,
18325 module_pci_driver(tg3_driver
);