2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
70 #include <asm/idprom.h>
79 /* Functions & macros to verify TG3_FLAGS types */
81 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
83 return test_bit(flag
, bits
);
86 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
91 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
93 clear_bit(flag
, bits
);
96 #define tg3_flag(tp, flag) \
97 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define tg3_flag_set(tp, flag) \
99 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100 #define tg3_flag_clear(tp, flag) \
101 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
103 #define DRV_MODULE_NAME "tg3"
104 #define TG3_MAJ_NUM 3
105 #define TG3_MIN_NUM 137
106 #define DRV_MODULE_VERSION \
107 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108 #define DRV_MODULE_RELDATE "May 11, 2014"
110 #define RESET_KIND_SHUTDOWN 0
111 #define RESET_KIND_INIT 1
112 #define RESET_KIND_SUSPEND 2
114 #define TG3_DEF_RX_MODE 0
115 #define TG3_DEF_TX_MODE 0
116 #define TG3_DEF_MSG_ENABLE \
126 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
128 /* length of time before we decide the hardware is borked,
129 * and dev->tx_timeout() should be called to fix the problem
132 #define TG3_TX_TIMEOUT (5 * HZ)
134 /* hardware minimum and maximum for a single frame's data payload */
135 #define TG3_MIN_MTU ETH_ZLEN
136 #define TG3_MAX_MTU(tp) \
137 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
139 /* These numbers seem to be hard coded in the NIC firmware somehow.
140 * You can't change the ring sizes, but you can change where you place
141 * them in the NIC onboard memory.
143 #define TG3_RX_STD_RING_SIZE(tp) \
144 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146 #define TG3_DEF_RX_RING_PENDING 200
147 #define TG3_RX_JMB_RING_SIZE(tp) \
148 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
152 /* Do not place this n-ring entries value into the tp struct itself,
153 * we really want to expose these constants to GCC so that modulo et
154 * al. operations are done with shifts and masks instead of with
155 * hw multiply/modulo instructions. Another solution would be to
156 * replace things like '% foo' with '& (foo - 1)'.
159 #define TG3_TX_RING_SIZE 512
160 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
162 #define TG3_RX_STD_RING_BYTES(tp) \
163 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164 #define TG3_RX_JMB_RING_BYTES(tp) \
165 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166 #define TG3_RX_RCB_RING_BYTES(tp) \
167 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
170 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
172 #define TG3_DMA_BYTE_ENAB 64
174 #define TG3_RX_STD_DMA_SZ 1536
175 #define TG3_RX_JMB_DMA_SZ 9046
177 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
179 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
182 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
185 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
188 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
189 * that are at least dword aligned when used in PCIX mode. The driver
190 * works around this bug by double copying the packet. This workaround
191 * is built into the normal double copy length check for efficiency.
193 * However, the double copy is only necessary on those architectures
194 * where unaligned memory accesses are inefficient. For those architectures
195 * where unaligned memory accesses incur little penalty, we can reintegrate
196 * the 5701 in the normal rx path. Doing so saves a device structure
197 * dereference by hardcoding the double copy threshold in place.
199 #define TG3_RX_COPY_THRESHOLD 256
200 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
203 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
206 #if (NET_IP_ALIGN != 0)
207 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
209 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
212 /* minimum number of free TX descriptors required to wake up TX process */
213 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
214 #define TG3_TX_BD_DMA_MAX_2K 2048
215 #define TG3_TX_BD_DMA_MAX_4K 4096
217 #define TG3_RAW_IP_ALIGN 2
219 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
222 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
223 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
225 #define FIRMWARE_TG3 "tigon/tg3.bin"
226 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
227 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
228 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
230 static char version
[] =
231 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
233 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
234 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235 MODULE_LICENSE("GPL");
236 MODULE_VERSION(DRV_MODULE_VERSION
);
237 MODULE_FIRMWARE(FIRMWARE_TG3
);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
239 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
241 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
242 module_param(tg3_debug
, int, 0);
243 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
245 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
246 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
248 static const struct pci_device_id tg3_pci_tbl
[] = {
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
268 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
269 TG3_DRV_DATA_FLAG_5705_10_100
},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
271 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
272 TG3_DRV_DATA_FLAG_5705_10_100
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
275 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
276 TG3_DRV_DATA_FLAG_5705_10_100
},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
283 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
297 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
298 PCI_VENDOR_ID_LENOVO
,
299 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
300 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
303 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
322 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
323 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
324 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
325 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
326 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
327 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
331 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
341 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
343 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
355 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
356 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
357 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
358 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
359 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
360 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
361 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
362 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
363 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
367 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
369 static const struct {
370 const char string
[ETH_GSTRING_LEN
];
371 } ethtool_stats_keys
[] = {
374 { "rx_ucast_packets" },
375 { "rx_mcast_packets" },
376 { "rx_bcast_packets" },
378 { "rx_align_errors" },
379 { "rx_xon_pause_rcvd" },
380 { "rx_xoff_pause_rcvd" },
381 { "rx_mac_ctrl_rcvd" },
382 { "rx_xoff_entered" },
383 { "rx_frame_too_long_errors" },
385 { "rx_undersize_packets" },
386 { "rx_in_length_errors" },
387 { "rx_out_length_errors" },
388 { "rx_64_or_less_octet_packets" },
389 { "rx_65_to_127_octet_packets" },
390 { "rx_128_to_255_octet_packets" },
391 { "rx_256_to_511_octet_packets" },
392 { "rx_512_to_1023_octet_packets" },
393 { "rx_1024_to_1522_octet_packets" },
394 { "rx_1523_to_2047_octet_packets" },
395 { "rx_2048_to_4095_octet_packets" },
396 { "rx_4096_to_8191_octet_packets" },
397 { "rx_8192_to_9022_octet_packets" },
404 { "tx_flow_control" },
406 { "tx_single_collisions" },
407 { "tx_mult_collisions" },
409 { "tx_excessive_collisions" },
410 { "tx_late_collisions" },
411 { "tx_collide_2times" },
412 { "tx_collide_3times" },
413 { "tx_collide_4times" },
414 { "tx_collide_5times" },
415 { "tx_collide_6times" },
416 { "tx_collide_7times" },
417 { "tx_collide_8times" },
418 { "tx_collide_9times" },
419 { "tx_collide_10times" },
420 { "tx_collide_11times" },
421 { "tx_collide_12times" },
422 { "tx_collide_13times" },
423 { "tx_collide_14times" },
424 { "tx_collide_15times" },
425 { "tx_ucast_packets" },
426 { "tx_mcast_packets" },
427 { "tx_bcast_packets" },
428 { "tx_carrier_sense_errors" },
432 { "dma_writeq_full" },
433 { "dma_write_prioq_full" },
437 { "rx_threshold_hit" },
439 { "dma_readq_full" },
440 { "dma_read_prioq_full" },
441 { "tx_comp_queue_full" },
443 { "ring_set_send_prod_index" },
444 { "ring_status_update" },
446 { "nic_avoided_irqs" },
447 { "nic_tx_threshold_hit" },
449 { "mbuf_lwm_thresh_hit" },
452 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
453 #define TG3_NVRAM_TEST 0
454 #define TG3_LINK_TEST 1
455 #define TG3_REGISTER_TEST 2
456 #define TG3_MEMORY_TEST 3
457 #define TG3_MAC_LOOPB_TEST 4
458 #define TG3_PHY_LOOPB_TEST 5
459 #define TG3_EXT_LOOPB_TEST 6
460 #define TG3_INTERRUPT_TEST 7
463 static const struct {
464 const char string
[ETH_GSTRING_LEN
];
465 } ethtool_test_keys
[] = {
466 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
467 [TG3_LINK_TEST
] = { "link test (online) " },
468 [TG3_REGISTER_TEST
] = { "register test (offline)" },
469 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
470 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
471 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
472 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
473 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
476 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
479 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
481 writel(val
, tp
->regs
+ off
);
484 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
486 return readl(tp
->regs
+ off
);
489 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
491 writel(val
, tp
->aperegs
+ off
);
494 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
496 return readl(tp
->aperegs
+ off
);
499 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
503 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
504 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
506 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
509 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
511 writel(val
, tp
->regs
+ off
);
512 readl(tp
->regs
+ off
);
515 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
520 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
521 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
522 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
523 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
527 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
531 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
532 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
533 TG3_64BIT_REG_LOW
, val
);
536 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
537 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
538 TG3_64BIT_REG_LOW
, val
);
542 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
543 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
544 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
545 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
547 /* In indirect mode when disabling interrupts, we also need
548 * to clear the interrupt bit in the GRC local ctrl register.
550 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
552 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
553 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
557 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
562 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
563 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
564 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
565 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
569 /* usec_wait specifies the wait time in usec when writing to certain registers
570 * where it is unsafe to read back the register without some delay.
571 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
574 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
576 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
577 /* Non-posted methods */
578 tp
->write32(tp
, off
, val
);
581 tg3_write32(tp
, off
, val
);
586 /* Wait again after the read for the posted method to guarantee that
587 * the wait time is met.
593 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
595 tp
->write32_mbox(tp
, off
, val
);
596 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
597 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
598 !tg3_flag(tp
, ICH_WORKAROUND
)))
599 tp
->read32_mbox(tp
, off
);
602 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
604 void __iomem
*mbox
= tp
->regs
+ off
;
606 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
608 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
609 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
613 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
615 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
618 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
620 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
623 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
624 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
625 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
626 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
627 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
629 #define tw32(reg, val) tp->write32(tp, reg, val)
630 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
631 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
632 #define tr32(reg) tp->read32(tp, reg)
634 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
638 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
639 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
642 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
643 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
644 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
645 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
647 /* Always leave this as zero. */
648 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
650 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
651 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
653 /* Always leave this as zero. */
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
656 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
659 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
663 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
664 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
669 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
670 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
671 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
672 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
674 /* Always leave this as zero. */
675 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
677 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
678 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
680 /* Always leave this as zero. */
681 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
683 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
686 static void tg3_ape_lock_init(struct tg3
*tp
)
691 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
692 regbase
= TG3_APE_LOCK_GRANT
;
694 regbase
= TG3_APE_PER_LOCK_GRANT
;
696 /* Make sure the driver hasn't any stale locks. */
697 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
699 case TG3_APE_LOCK_PHY0
:
700 case TG3_APE_LOCK_PHY1
:
701 case TG3_APE_LOCK_PHY2
:
702 case TG3_APE_LOCK_PHY3
:
703 bit
= APE_LOCK_GRANT_DRIVER
;
707 bit
= APE_LOCK_GRANT_DRIVER
;
709 bit
= 1 << tp
->pci_fn
;
711 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
716 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
720 u32 status
, req
, gnt
, bit
;
722 if (!tg3_flag(tp
, ENABLE_APE
))
726 case TG3_APE_LOCK_GPIO
:
727 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
729 /* else: fall through */
730 case TG3_APE_LOCK_GRC
:
731 case TG3_APE_LOCK_MEM
:
733 bit
= APE_LOCK_REQ_DRIVER
;
735 bit
= 1 << tp
->pci_fn
;
737 case TG3_APE_LOCK_PHY0
:
738 case TG3_APE_LOCK_PHY1
:
739 case TG3_APE_LOCK_PHY2
:
740 case TG3_APE_LOCK_PHY3
:
741 bit
= APE_LOCK_REQ_DRIVER
;
747 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
748 req
= TG3_APE_LOCK_REQ
;
749 gnt
= TG3_APE_LOCK_GRANT
;
751 req
= TG3_APE_PER_LOCK_REQ
;
752 gnt
= TG3_APE_PER_LOCK_GRANT
;
757 tg3_ape_write32(tp
, req
+ off
, bit
);
759 /* Wait for up to 1 millisecond to acquire lock. */
760 for (i
= 0; i
< 100; i
++) {
761 status
= tg3_ape_read32(tp
, gnt
+ off
);
764 if (pci_channel_offline(tp
->pdev
))
771 /* Revoke the lock request. */
772 tg3_ape_write32(tp
, gnt
+ off
, bit
);
779 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
783 if (!tg3_flag(tp
, ENABLE_APE
))
787 case TG3_APE_LOCK_GPIO
:
788 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
790 /* else: fall through */
791 case TG3_APE_LOCK_GRC
:
792 case TG3_APE_LOCK_MEM
:
794 bit
= APE_LOCK_GRANT_DRIVER
;
796 bit
= 1 << tp
->pci_fn
;
798 case TG3_APE_LOCK_PHY0
:
799 case TG3_APE_LOCK_PHY1
:
800 case TG3_APE_LOCK_PHY2
:
801 case TG3_APE_LOCK_PHY3
:
802 bit
= APE_LOCK_GRANT_DRIVER
;
808 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
809 gnt
= TG3_APE_LOCK_GRANT
;
811 gnt
= TG3_APE_PER_LOCK_GRANT
;
813 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
816 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
821 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
824 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
825 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
828 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
831 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
834 return timeout_us
? 0 : -EBUSY
;
837 #ifdef CONFIG_TIGON3_HWMON
838 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
842 for (i
= 0; i
< timeout_us
/ 10; i
++) {
843 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
845 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
851 return i
== timeout_us
/ 10;
854 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
858 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
860 if (!tg3_flag(tp
, APE_HAS_NCSI
))
863 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
864 if (apedata
!= APE_SEG_SIG_MAGIC
)
867 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
868 if (!(apedata
& APE_FW_STATUS_READY
))
871 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
873 msgoff
= bufoff
+ 2 * sizeof(u32
);
874 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
879 /* Cap xfer sizes to scratchpad limits. */
880 length
= (len
> maxlen
) ? maxlen
: len
;
883 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
884 if (!(apedata
& APE_FW_STATUS_READY
))
887 /* Wait for up to 1 msec for APE to service previous event. */
888 err
= tg3_ape_event_lock(tp
, 1000);
892 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
893 APE_EVENT_STATUS_SCRTCHPD_READ
|
894 APE_EVENT_STATUS_EVENT_PENDING
;
895 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
897 tg3_ape_write32(tp
, bufoff
, base_off
);
898 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
900 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
901 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
905 if (tg3_ape_wait_for_event(tp
, 30000))
908 for (i
= 0; length
; i
+= 4, length
-= 4) {
909 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
910 memcpy(data
, &val
, sizeof(u32
));
919 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
924 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
925 if (apedata
!= APE_SEG_SIG_MAGIC
)
928 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
929 if (!(apedata
& APE_FW_STATUS_READY
))
932 /* Wait for up to 20 millisecond for APE to service previous event. */
933 err
= tg3_ape_event_lock(tp
, 20000);
937 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
938 event
| APE_EVENT_STATUS_EVENT_PENDING
);
940 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
941 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
946 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
951 if (!tg3_flag(tp
, ENABLE_APE
))
955 case RESET_KIND_INIT
:
956 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
957 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
958 APE_HOST_SEG_SIG_MAGIC
);
959 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
960 APE_HOST_SEG_LEN_MAGIC
);
961 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
962 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
963 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
964 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
965 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
966 APE_HOST_BEHAV_NO_PHYLOCK
);
967 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
968 TG3_APE_HOST_DRVR_STATE_START
);
970 event
= APE_EVENT_STATUS_STATE_START
;
972 case RESET_KIND_SHUTDOWN
:
973 if (device_may_wakeup(&tp
->pdev
->dev
) &&
974 tg3_flag(tp
, WOL_ENABLE
)) {
975 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
976 TG3_APE_HOST_WOL_SPEED_AUTO
);
977 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
979 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
981 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
983 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
989 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
991 tg3_ape_send_event(tp
, event
);
994 static void tg3_send_ape_heartbeat(struct tg3
*tp
,
995 unsigned long interval
)
997 /* Check if hb interval has exceeded */
998 if (!tg3_flag(tp
, ENABLE_APE
) ||
999 time_before(jiffies
, tp
->ape_hb_jiffies
+ interval
))
1002 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
1003 tp
->ape_hb_jiffies
= jiffies
;
1006 static void tg3_disable_ints(struct tg3
*tp
)
1010 tw32(TG3PCI_MISC_HOST_CTRL
,
1011 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
1012 for (i
= 0; i
< tp
->irq_max
; i
++)
1013 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
1016 static void tg3_enable_ints(struct tg3
*tp
)
1023 tw32(TG3PCI_MISC_HOST_CTRL
,
1024 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1026 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1027 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1028 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1030 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1031 if (tg3_flag(tp
, 1SHOT_MSI
))
1032 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1034 tp
->coal_now
|= tnapi
->coal_now
;
1037 /* Force an initial interrupt */
1038 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1039 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1040 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1042 tw32(HOSTCC_MODE
, tp
->coal_now
);
1044 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1047 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1049 struct tg3
*tp
= tnapi
->tp
;
1050 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1051 unsigned int work_exists
= 0;
1053 /* check for phy events */
1054 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1055 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1059 /* check for TX work to do */
1060 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1063 /* check for RX work to do */
1064 if (tnapi
->rx_rcb_prod_idx
&&
1065 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1072 * similar to tg3_enable_ints, but it accurately determines whether there
1073 * is new work pending and can return without flushing the PIO write
1074 * which reenables interrupts
1076 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1078 struct tg3
*tp
= tnapi
->tp
;
1080 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1083 /* When doing tagged status, this work check is unnecessary.
1084 * The last_tag we write above tells the chip which piece of
1085 * work we've completed.
1087 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1088 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1089 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1092 static void tg3_switch_clocks(struct tg3
*tp
)
1095 u32 orig_clock_ctrl
;
1097 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1100 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1102 orig_clock_ctrl
= clock_ctrl
;
1103 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1104 CLOCK_CTRL_CLKRUN_OENABLE
|
1106 tp
->pci_clock_ctrl
= clock_ctrl
;
1108 if (tg3_flag(tp
, 5705_PLUS
)) {
1109 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1111 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1113 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1114 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1116 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1118 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1119 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1122 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1125 #define PHY_BUSY_LOOPS 5000
1127 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1134 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1136 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1140 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1144 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1145 MI_COM_PHY_ADDR_MASK
);
1146 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1147 MI_COM_REG_ADDR_MASK
);
1148 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1150 tw32_f(MAC_MI_COM
, frame_val
);
1152 loops
= PHY_BUSY_LOOPS
;
1153 while (loops
!= 0) {
1155 frame_val
= tr32(MAC_MI_COM
);
1157 if ((frame_val
& MI_COM_BUSY
) == 0) {
1159 frame_val
= tr32(MAC_MI_COM
);
1167 *val
= frame_val
& MI_COM_DATA_MASK
;
1171 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1172 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1176 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1181 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1183 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1186 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1193 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1194 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1197 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1199 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1203 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1205 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1206 MI_COM_PHY_ADDR_MASK
);
1207 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1208 MI_COM_REG_ADDR_MASK
);
1209 frame_val
|= (val
& MI_COM_DATA_MASK
);
1210 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1212 tw32_f(MAC_MI_COM
, frame_val
);
1214 loops
= PHY_BUSY_LOOPS
;
1215 while (loops
!= 0) {
1217 frame_val
= tr32(MAC_MI_COM
);
1218 if ((frame_val
& MI_COM_BUSY
) == 0) {
1220 frame_val
= tr32(MAC_MI_COM
);
1230 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1231 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1235 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1240 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1242 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1245 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1257 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1258 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1262 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1268 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1272 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1276 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1280 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1281 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1285 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1291 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1295 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1297 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1302 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1306 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1308 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1313 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1317 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1318 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1319 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1321 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1326 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1328 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1329 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1331 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1334 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1339 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1345 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1347 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1349 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1350 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1355 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1357 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1358 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1361 static int tg3_bmcr_reset(struct tg3
*tp
)
1366 /* OK, reset it, and poll the BMCR_RESET bit until it
1367 * clears or we time out.
1369 phy_control
= BMCR_RESET
;
1370 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1376 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1380 if ((phy_control
& BMCR_RESET
) == 0) {
1392 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1394 struct tg3
*tp
= bp
->priv
;
1397 spin_lock_bh(&tp
->lock
);
1399 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1402 spin_unlock_bh(&tp
->lock
);
1407 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1409 struct tg3
*tp
= bp
->priv
;
1412 spin_lock_bh(&tp
->lock
);
1414 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1417 spin_unlock_bh(&tp
->lock
);
1422 static void tg3_mdio_config_5785(struct tg3
*tp
)
1425 struct phy_device
*phydev
;
1427 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1428 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1429 case PHY_ID_BCM50610
:
1430 case PHY_ID_BCM50610M
:
1431 val
= MAC_PHYCFG2_50610_LED_MODES
;
1433 case PHY_ID_BCMAC131
:
1434 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1436 case PHY_ID_RTL8211C
:
1437 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1439 case PHY_ID_RTL8201E
:
1440 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1446 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1447 tw32(MAC_PHYCFG2
, val
);
1449 val
= tr32(MAC_PHYCFG1
);
1450 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1451 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1452 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1453 tw32(MAC_PHYCFG1
, val
);
1458 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1459 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1460 MAC_PHYCFG2_FMODE_MASK_MASK
|
1461 MAC_PHYCFG2_GMODE_MASK_MASK
|
1462 MAC_PHYCFG2_ACT_MASK_MASK
|
1463 MAC_PHYCFG2_QUAL_MASK_MASK
|
1464 MAC_PHYCFG2_INBAND_ENABLE
;
1466 tw32(MAC_PHYCFG2
, val
);
1468 val
= tr32(MAC_PHYCFG1
);
1469 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1470 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1471 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1472 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1473 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1474 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1475 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1477 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1478 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1479 tw32(MAC_PHYCFG1
, val
);
1481 val
= tr32(MAC_EXT_RGMII_MODE
);
1482 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1483 MAC_RGMII_MODE_RX_QUALITY
|
1484 MAC_RGMII_MODE_RX_ACTIVITY
|
1485 MAC_RGMII_MODE_RX_ENG_DET
|
1486 MAC_RGMII_MODE_TX_ENABLE
|
1487 MAC_RGMII_MODE_TX_LOWPWR
|
1488 MAC_RGMII_MODE_TX_RESET
);
1489 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1490 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1491 val
|= MAC_RGMII_MODE_RX_INT_B
|
1492 MAC_RGMII_MODE_RX_QUALITY
|
1493 MAC_RGMII_MODE_RX_ACTIVITY
|
1494 MAC_RGMII_MODE_RX_ENG_DET
;
1495 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1496 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1497 MAC_RGMII_MODE_TX_LOWPWR
|
1498 MAC_RGMII_MODE_TX_RESET
;
1500 tw32(MAC_EXT_RGMII_MODE
, val
);
1503 static void tg3_mdio_start(struct tg3
*tp
)
1505 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1506 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1509 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1510 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1511 tg3_mdio_config_5785(tp
);
1514 static int tg3_mdio_init(struct tg3
*tp
)
1518 struct phy_device
*phydev
;
1520 if (tg3_flag(tp
, 5717_PLUS
)) {
1523 tp
->phy_addr
= tp
->pci_fn
+ 1;
1525 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1526 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1528 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1529 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1532 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1535 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1538 tp
->phy_addr
= addr
;
1540 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1544 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1547 tp
->mdio_bus
= mdiobus_alloc();
1548 if (tp
->mdio_bus
== NULL
)
1551 tp
->mdio_bus
->name
= "tg3 mdio bus";
1552 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1553 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1554 tp
->mdio_bus
->priv
= tp
;
1555 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1556 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1557 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1558 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1560 /* The bus registration will look for all the PHYs on the mdio bus.
1561 * Unfortunately, it does not ensure the PHY is powered up before
1562 * accessing the PHY ID registers. A chip reset is the
1563 * quickest way to bring the device back to an operational state..
1565 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1568 i
= mdiobus_register(tp
->mdio_bus
);
1570 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1571 mdiobus_free(tp
->mdio_bus
);
1575 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1577 if (!phydev
|| !phydev
->drv
) {
1578 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1579 mdiobus_unregister(tp
->mdio_bus
);
1580 mdiobus_free(tp
->mdio_bus
);
1584 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1585 case PHY_ID_BCM57780
:
1586 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1587 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1589 case PHY_ID_BCM50610
:
1590 case PHY_ID_BCM50610M
:
1591 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1592 PHY_BRCM_RX_REFCLK_UNUSED
|
1593 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1594 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1595 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1596 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1597 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1598 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1599 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1600 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1602 case PHY_ID_RTL8211C
:
1603 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1605 case PHY_ID_RTL8201E
:
1606 case PHY_ID_BCMAC131
:
1607 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1608 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1609 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1613 tg3_flag_set(tp
, MDIOBUS_INITED
);
1615 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1616 tg3_mdio_config_5785(tp
);
1621 static void tg3_mdio_fini(struct tg3
*tp
)
1623 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1624 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1625 mdiobus_unregister(tp
->mdio_bus
);
1626 mdiobus_free(tp
->mdio_bus
);
1630 /* tp->lock is held. */
1631 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1635 val
= tr32(GRC_RX_CPU_EVENT
);
1636 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1637 tw32_f(GRC_RX_CPU_EVENT
, val
);
1639 tp
->last_event_jiffies
= jiffies
;
1642 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1644 /* tp->lock is held. */
1645 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1648 unsigned int delay_cnt
;
1651 /* If enough time has passed, no wait is necessary. */
1652 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1653 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1655 if (time_remain
< 0)
1658 /* Check if we can shorten the wait time. */
1659 delay_cnt
= jiffies_to_usecs(time_remain
);
1660 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1661 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1662 delay_cnt
= (delay_cnt
>> 3) + 1;
1664 for (i
= 0; i
< delay_cnt
; i
++) {
1665 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1667 if (pci_channel_offline(tp
->pdev
))
1674 /* tp->lock is held. */
1675 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1680 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1682 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1683 val
|= (reg
& 0xffff);
1687 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1689 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1690 val
|= (reg
& 0xffff);
1694 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1695 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1697 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1698 val
|= (reg
& 0xffff);
1702 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1709 /* tp->lock is held. */
1710 static void tg3_ump_link_report(struct tg3
*tp
)
1714 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1717 tg3_phy_gather_ump_data(tp
, data
);
1719 tg3_wait_for_event_ack(tp
);
1721 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1722 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1723 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1724 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1725 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1726 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1728 tg3_generate_fw_event(tp
);
1731 /* tp->lock is held. */
1732 static void tg3_stop_fw(struct tg3
*tp
)
1734 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1735 /* Wait for RX cpu to ACK the previous event. */
1736 tg3_wait_for_event_ack(tp
);
1738 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1740 tg3_generate_fw_event(tp
);
1742 /* Wait for RX cpu to ACK this event. */
1743 tg3_wait_for_event_ack(tp
);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1750 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1751 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1753 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1755 case RESET_KIND_INIT
:
1756 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1760 case RESET_KIND_SHUTDOWN
:
1761 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1765 case RESET_KIND_SUSPEND
:
1766 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1776 /* tp->lock is held. */
1777 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1779 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1781 case RESET_KIND_INIT
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1783 DRV_STATE_START_DONE
);
1786 case RESET_KIND_SHUTDOWN
:
1787 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1788 DRV_STATE_UNLOAD_DONE
);
1797 /* tp->lock is held. */
1798 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1800 if (tg3_flag(tp
, ENABLE_ASF
)) {
1802 case RESET_KIND_INIT
:
1803 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1807 case RESET_KIND_SHUTDOWN
:
1808 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1812 case RESET_KIND_SUSPEND
:
1813 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1823 static int tg3_poll_fw(struct tg3
*tp
)
1828 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1831 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1832 /* We don't use firmware. */
1836 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1837 /* Wait up to 20ms for init done. */
1838 for (i
= 0; i
< 200; i
++) {
1839 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1841 if (pci_channel_offline(tp
->pdev
))
1849 /* Wait for firmware initialization to complete. */
1850 for (i
= 0; i
< 100000; i
++) {
1851 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1852 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1854 if (pci_channel_offline(tp
->pdev
)) {
1855 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1856 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1857 netdev_info(tp
->dev
, "No firmware running\n");
1866 /* Chip might not be fitted with firmware. Some Sun onboard
1867 * parts are configured like that. So don't signal the timeout
1868 * of the above loop as an error, but do report the lack of
1869 * running firmware once.
1871 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1872 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1874 netdev_info(tp
->dev
, "No firmware running\n");
1877 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1878 /* The 57765 A0 needs a little more
1879 * time to do some important work.
1887 static void tg3_link_report(struct tg3
*tp
)
1889 if (!netif_carrier_ok(tp
->dev
)) {
1890 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1891 tg3_ump_link_report(tp
);
1892 } else if (netif_msg_link(tp
)) {
1893 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1894 (tp
->link_config
.active_speed
== SPEED_1000
?
1896 (tp
->link_config
.active_speed
== SPEED_100
?
1898 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1901 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1902 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1904 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1907 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1908 netdev_info(tp
->dev
, "EEE is %s\n",
1909 tp
->setlpicnt
? "enabled" : "disabled");
1911 tg3_ump_link_report(tp
);
1914 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1917 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1921 if (adv
& ADVERTISE_PAUSE_CAP
) {
1922 flowctrl
|= FLOW_CTRL_RX
;
1923 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1924 flowctrl
|= FLOW_CTRL_TX
;
1925 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1926 flowctrl
|= FLOW_CTRL_TX
;
1931 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1935 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1936 miireg
= ADVERTISE_1000XPAUSE
;
1937 else if (flow_ctrl
& FLOW_CTRL_TX
)
1938 miireg
= ADVERTISE_1000XPSE_ASYM
;
1939 else if (flow_ctrl
& FLOW_CTRL_RX
)
1940 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1947 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1951 if (adv
& ADVERTISE_1000XPAUSE
) {
1952 flowctrl
|= FLOW_CTRL_RX
;
1953 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1954 flowctrl
|= FLOW_CTRL_TX
;
1955 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1956 flowctrl
|= FLOW_CTRL_TX
;
1961 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1965 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1966 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1967 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1968 if (lcladv
& ADVERTISE_1000XPAUSE
)
1970 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1977 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1981 u32 old_rx_mode
= tp
->rx_mode
;
1982 u32 old_tx_mode
= tp
->tx_mode
;
1984 if (tg3_flag(tp
, USE_PHYLIB
))
1985 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1987 autoneg
= tp
->link_config
.autoneg
;
1989 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1990 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1991 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1993 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1995 flowctrl
= tp
->link_config
.flowctrl
;
1997 tp
->link_config
.active_flowctrl
= flowctrl
;
1999 if (flowctrl
& FLOW_CTRL_RX
)
2000 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
2002 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
2004 if (old_rx_mode
!= tp
->rx_mode
)
2005 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
2007 if (flowctrl
& FLOW_CTRL_TX
)
2008 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
2010 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2012 if (old_tx_mode
!= tp
->tx_mode
)
2013 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2016 static void tg3_adjust_link(struct net_device
*dev
)
2018 u8 oldflowctrl
, linkmesg
= 0;
2019 u32 mac_mode
, lcl_adv
, rmt_adv
;
2020 struct tg3
*tp
= netdev_priv(dev
);
2021 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2023 spin_lock_bh(&tp
->lock
);
2025 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2026 MAC_MODE_HALF_DUPLEX
);
2028 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2034 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2035 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2036 else if (phydev
->speed
== SPEED_1000
||
2037 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2038 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2040 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2042 if (phydev
->duplex
== DUPLEX_HALF
)
2043 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2045 lcl_adv
= mii_advertise_flowctrl(
2046 tp
->link_config
.flowctrl
);
2049 rmt_adv
= LPA_PAUSE_CAP
;
2050 if (phydev
->asym_pause
)
2051 rmt_adv
|= LPA_PAUSE_ASYM
;
2054 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2056 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2058 if (mac_mode
!= tp
->mac_mode
) {
2059 tp
->mac_mode
= mac_mode
;
2060 tw32_f(MAC_MODE
, tp
->mac_mode
);
2064 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2065 if (phydev
->speed
== SPEED_10
)
2067 MAC_MI_STAT_10MBPS_MODE
|
2068 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2070 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2073 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2074 tw32(MAC_TX_LENGTHS
,
2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2076 (6 << TX_LENGTHS_IPG_SHIFT
) |
2077 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2079 tw32(MAC_TX_LENGTHS
,
2080 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2081 (6 << TX_LENGTHS_IPG_SHIFT
) |
2082 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2084 if (phydev
->link
!= tp
->old_link
||
2085 phydev
->speed
!= tp
->link_config
.active_speed
||
2086 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2087 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2090 tp
->old_link
= phydev
->link
;
2091 tp
->link_config
.active_speed
= phydev
->speed
;
2092 tp
->link_config
.active_duplex
= phydev
->duplex
;
2094 spin_unlock_bh(&tp
->lock
);
2097 tg3_link_report(tp
);
2100 static int tg3_phy_init(struct tg3
*tp
)
2102 struct phy_device
*phydev
;
2104 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2107 /* Bring the PHY back to a known state. */
2110 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2112 /* Attach the MAC to the PHY. */
2113 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2114 tg3_adjust_link
, phydev
->interface
);
2115 if (IS_ERR(phydev
)) {
2116 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2117 return PTR_ERR(phydev
);
2120 /* Mask with MAC supported features. */
2121 switch (phydev
->interface
) {
2122 case PHY_INTERFACE_MODE_GMII
:
2123 case PHY_INTERFACE_MODE_RGMII
:
2124 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2125 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2127 SUPPORTED_Asym_Pause
);
2131 case PHY_INTERFACE_MODE_MII
:
2132 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2134 SUPPORTED_Asym_Pause
);
2137 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2141 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2143 phydev
->advertising
= phydev
->supported
;
2145 phy_attached_info(phydev
);
2150 static void tg3_phy_start(struct tg3
*tp
)
2152 struct phy_device
*phydev
;
2154 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2157 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2159 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2160 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2161 phydev
->speed
= tp
->link_config
.speed
;
2162 phydev
->duplex
= tp
->link_config
.duplex
;
2163 phydev
->autoneg
= tp
->link_config
.autoneg
;
2164 phydev
->advertising
= tp
->link_config
.advertising
;
2169 phy_start_aneg(phydev
);
2172 static void tg3_phy_stop(struct tg3
*tp
)
2174 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2177 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2180 static void tg3_phy_fini(struct tg3
*tp
)
2182 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2183 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2184 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2188 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2193 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2196 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2197 /* Cannot do read-modify-write on 5401 */
2198 err
= tg3_phy_auxctl_write(tp
,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2200 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2205 err
= tg3_phy_auxctl_read(tp
,
2206 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2210 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2211 err
= tg3_phy_auxctl_write(tp
,
2212 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2218 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2222 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2225 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2226 phytest
| MII_TG3_FET_SHADOW_EN
);
2227 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2229 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2231 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2232 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2234 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2238 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2242 if (!tg3_flag(tp
, 5705_PLUS
) ||
2243 (tg3_flag(tp
, 5717_PLUS
) &&
2244 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2247 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2248 tg3_phy_fet_toggle_apd(tp
, enable
);
2252 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2253 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2254 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2255 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2256 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2257 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2259 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2262 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2264 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2266 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2269 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2273 if (!tg3_flag(tp
, 5705_PLUS
) ||
2274 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2277 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2280 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2281 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2283 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2284 ephy
| MII_TG3_FET_SHADOW_EN
);
2285 if (!tg3_readphy(tp
, reg
, &phy
)) {
2287 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2289 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2290 tg3_writephy(tp
, reg
, phy
);
2292 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2297 ret
= tg3_phy_auxctl_read(tp
,
2298 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2301 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2303 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2304 tg3_phy_auxctl_write(tp
,
2305 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2310 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2315 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2318 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2320 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2321 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2324 static void tg3_phy_apply_otp(struct tg3
*tp
)
2333 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2336 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2337 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2338 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2340 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2341 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2342 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2344 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2345 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2346 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2348 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2349 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2351 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2352 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2354 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2355 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2356 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2358 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2361 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2364 struct ethtool_eee
*dest
= &tp
->eee
;
2366 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2372 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2375 /* Pull eee_active */
2376 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2377 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2378 dest
->eee_active
= 1;
2380 dest
->eee_active
= 0;
2382 /* Pull lp advertised settings */
2383 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2385 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2387 /* Pull advertised and eee_enabled settings */
2388 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2390 dest
->eee_enabled
= !!val
;
2391 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2393 /* Pull tx_lpi_enabled */
2394 val
= tr32(TG3_CPMU_EEE_MODE
);
2395 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2397 /* Pull lpi timer value */
2398 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2401 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2405 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2410 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2412 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2413 (tp
->link_config
.active_speed
== SPEED_100
||
2414 tp
->link_config
.active_speed
== SPEED_1000
)) {
2417 if (tp
->link_config
.active_speed
== SPEED_1000
)
2418 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2420 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2422 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2424 tg3_eee_pull_config(tp
, NULL
);
2425 if (tp
->eee
.eee_active
)
2429 if (!tp
->setlpicnt
) {
2430 if (current_link_up
&&
2431 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2432 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2433 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2436 val
= tr32(TG3_CPMU_EEE_MODE
);
2437 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2441 static void tg3_phy_eee_enable(struct tg3
*tp
)
2445 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2446 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2447 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2448 tg3_flag(tp
, 57765_CLASS
)) &&
2449 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2450 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2451 MII_TG3_DSP_TAP26_RMRXSTO
;
2452 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2453 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2456 val
= tr32(TG3_CPMU_EEE_MODE
);
2457 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2460 static int tg3_wait_macro_done(struct tg3
*tp
)
2467 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2468 if ((tmp32
& 0x1000) == 0)
2478 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2480 static const u32 test_pat
[4][6] = {
2481 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2482 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2483 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2484 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2488 for (chan
= 0; chan
< 4; chan
++) {
2491 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2492 (chan
* 0x2000) | 0x0200);
2493 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2495 for (i
= 0; i
< 6; i
++)
2496 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2499 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2500 if (tg3_wait_macro_done(tp
)) {
2505 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2506 (chan
* 0x2000) | 0x0200);
2507 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2508 if (tg3_wait_macro_done(tp
)) {
2513 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2514 if (tg3_wait_macro_done(tp
)) {
2519 for (i
= 0; i
< 6; i
+= 2) {
2522 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2523 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2524 tg3_wait_macro_done(tp
)) {
2530 if (low
!= test_pat
[chan
][i
] ||
2531 high
!= test_pat
[chan
][i
+1]) {
2532 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2533 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2534 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2544 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2548 for (chan
= 0; chan
< 4; chan
++) {
2551 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2552 (chan
* 0x2000) | 0x0200);
2553 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2554 for (i
= 0; i
< 6; i
++)
2555 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2556 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2557 if (tg3_wait_macro_done(tp
))
2564 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2566 u32 reg32
, phy9_orig
;
2567 int retries
, do_phy_reset
, err
;
2573 err
= tg3_bmcr_reset(tp
);
2579 /* Disable transmitter and interrupt. */
2580 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2584 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2586 /* Set full-duplex, 1000 mbps. */
2587 tg3_writephy(tp
, MII_BMCR
,
2588 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2590 /* Set to master mode. */
2591 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2594 tg3_writephy(tp
, MII_CTRL1000
,
2595 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2597 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2601 /* Block the PHY control access. */
2602 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2604 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2607 } while (--retries
);
2609 err
= tg3_phy_reset_chanpat(tp
);
2613 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2615 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2616 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2618 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2620 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2622 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2627 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2632 static void tg3_carrier_off(struct tg3
*tp
)
2634 netif_carrier_off(tp
->dev
);
2635 tp
->link_up
= false;
2638 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2640 if (tg3_flag(tp
, ENABLE_ASF
))
2641 netdev_warn(tp
->dev
,
2642 "Management side-band traffic will be interrupted during phy settings change\n");
2645 /* This will reset the tigon3 PHY if there is no valid
2646 * link unless the FORCE argument is non-zero.
2648 static int tg3_phy_reset(struct tg3
*tp
)
2653 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2654 val
= tr32(GRC_MISC_CFG
);
2655 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2658 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2659 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2663 if (netif_running(tp
->dev
) && tp
->link_up
) {
2664 netif_carrier_off(tp
->dev
);
2665 tg3_link_report(tp
);
2668 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2669 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2670 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2671 err
= tg3_phy_reset_5703_4_5(tp
);
2678 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2679 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2680 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2681 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2683 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2686 err
= tg3_bmcr_reset(tp
);
2690 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2691 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2692 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2694 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2697 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2698 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2699 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2700 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2701 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2702 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2704 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2708 if (tg3_flag(tp
, 5717_PLUS
) &&
2709 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2712 tg3_phy_apply_otp(tp
);
2714 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2715 tg3_phy_toggle_apd(tp
, true);
2717 tg3_phy_toggle_apd(tp
, false);
2720 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2721 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2722 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2723 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2724 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2727 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2728 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2729 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2732 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2733 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2734 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2735 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2736 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2737 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2739 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2740 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2741 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2742 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2743 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2744 tg3_writephy(tp
, MII_TG3_TEST1
,
2745 MII_TG3_TEST1_TRIM_EN
| 0x4);
2747 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2749 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2753 /* Set Extended packet length bit (bit 14) on all chips that */
2754 /* support jumbo frames */
2755 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2756 /* Cannot do read-modify-write on 5401 */
2757 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2758 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2759 /* Set bit 14 with read-modify-write to preserve other bits */
2760 err
= tg3_phy_auxctl_read(tp
,
2761 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2763 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2764 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2767 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2768 * jumbo frames transmission.
2770 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2771 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2772 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2773 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2776 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2777 /* adjust output voltage */
2778 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2781 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2782 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2784 tg3_phy_toggle_automdix(tp
, true);
2785 tg3_phy_set_wirespeed(tp
);
2789 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2790 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2791 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2792 TG3_GPIO_MSG_NEED_VAUX)
2793 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2794 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2795 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2796 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2797 (TG3_GPIO_MSG_DRVR_PRES << 12))
2799 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2800 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2801 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2802 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2803 (TG3_GPIO_MSG_NEED_VAUX << 12))
2805 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2809 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2810 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2811 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2813 status
= tr32(TG3_CPMU_DRV_STATUS
);
2815 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2816 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2817 status
|= (newstat
<< shift
);
2819 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2820 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2821 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2823 tw32(TG3_CPMU_DRV_STATUS
, status
);
2825 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2828 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2830 if (!tg3_flag(tp
, IS_NIC
))
2833 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2834 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2835 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2836 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2839 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2841 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2844 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2846 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2853 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2857 if (!tg3_flag(tp
, IS_NIC
) ||
2858 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2859 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2862 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2864 tw32_wait_f(GRC_LOCAL_CTRL
,
2865 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2868 tw32_wait_f(GRC_LOCAL_CTRL
,
2870 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2872 tw32_wait_f(GRC_LOCAL_CTRL
,
2873 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2874 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2877 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2879 if (!tg3_flag(tp
, IS_NIC
))
2882 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2883 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2884 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2885 (GRC_LCLCTRL_GPIO_OE0
|
2886 GRC_LCLCTRL_GPIO_OE1
|
2887 GRC_LCLCTRL_GPIO_OE2
|
2888 GRC_LCLCTRL_GPIO_OUTPUT0
|
2889 GRC_LCLCTRL_GPIO_OUTPUT1
),
2890 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2891 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2892 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2893 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2894 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2895 GRC_LCLCTRL_GPIO_OE1
|
2896 GRC_LCLCTRL_GPIO_OE2
|
2897 GRC_LCLCTRL_GPIO_OUTPUT0
|
2898 GRC_LCLCTRL_GPIO_OUTPUT1
|
2900 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2903 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2904 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2905 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2907 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2908 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2912 u32 grc_local_ctrl
= 0;
2914 /* Workaround to prevent overdrawing Amps. */
2915 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2916 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2917 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2919 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2922 /* On 5753 and variants, GPIO2 cannot be used. */
2923 no_gpio2
= tp
->nic_sram_data_cfg
&
2924 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2926 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2927 GRC_LCLCTRL_GPIO_OE1
|
2928 GRC_LCLCTRL_GPIO_OE2
|
2929 GRC_LCLCTRL_GPIO_OUTPUT1
|
2930 GRC_LCLCTRL_GPIO_OUTPUT2
;
2932 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2933 GRC_LCLCTRL_GPIO_OUTPUT2
);
2935 tw32_wait_f(GRC_LOCAL_CTRL
,
2936 tp
->grc_local_ctrl
| grc_local_ctrl
,
2937 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2939 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2941 tw32_wait_f(GRC_LOCAL_CTRL
,
2942 tp
->grc_local_ctrl
| grc_local_ctrl
,
2943 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2946 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2947 tw32_wait_f(GRC_LOCAL_CTRL
,
2948 tp
->grc_local_ctrl
| grc_local_ctrl
,
2949 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2954 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2958 /* Serialize power state transitions */
2959 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2962 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2963 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2965 msg
= tg3_set_function_status(tp
, msg
);
2967 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2970 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2971 tg3_pwrsrc_switch_to_vaux(tp
);
2973 tg3_pwrsrc_die_with_vmain(tp
);
2976 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2979 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2981 bool need_vaux
= false;
2983 /* The GPIOs do something completely different on 57765. */
2984 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2987 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2988 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2989 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2990 tg3_frob_aux_power_5717(tp
, include_wol
?
2991 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2995 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2996 struct net_device
*dev_peer
;
2998 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
3000 /* remove_one() may have been run on the peer. */
3002 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
3004 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
3007 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
3008 tg3_flag(tp_peer
, ENABLE_ASF
))
3013 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
3014 tg3_flag(tp
, ENABLE_ASF
))
3018 tg3_pwrsrc_switch_to_vaux(tp
);
3020 tg3_pwrsrc_die_with_vmain(tp
);
3023 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3025 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3027 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3028 if (speed
!= SPEED_10
)
3030 } else if (speed
== SPEED_10
)
3036 static bool tg3_phy_power_bug(struct tg3
*tp
)
3038 switch (tg3_asic_rev(tp
)) {
3043 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3052 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3061 static bool tg3_phy_led_bug(struct tg3
*tp
)
3063 switch (tg3_asic_rev(tp
)) {
3066 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3075 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3079 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3082 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3083 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3084 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3085 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3088 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3089 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3090 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3095 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3097 val
= tr32(GRC_MISC_CFG
);
3098 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3101 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3103 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3106 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3107 tg3_writephy(tp
, MII_BMCR
,
3108 BMCR_ANENABLE
| BMCR_ANRESTART
);
3110 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3111 phytest
| MII_TG3_FET_SHADOW_EN
);
3112 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3113 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3115 MII_TG3_FET_SHDW_AUXMODE4
,
3118 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3121 } else if (do_low_power
) {
3122 if (!tg3_phy_led_bug(tp
))
3123 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3124 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3126 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3127 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3128 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3129 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3132 /* The PHY should not be powered down on some chips because
3135 if (tg3_phy_power_bug(tp
))
3138 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3139 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3140 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3141 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3142 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3143 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3146 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3149 /* tp->lock is held. */
3150 static int tg3_nvram_lock(struct tg3
*tp
)
3152 if (tg3_flag(tp
, NVRAM
)) {
3155 if (tp
->nvram_lock_cnt
== 0) {
3156 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3157 for (i
= 0; i
< 8000; i
++) {
3158 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3163 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3167 tp
->nvram_lock_cnt
++;
3172 /* tp->lock is held. */
3173 static void tg3_nvram_unlock(struct tg3
*tp
)
3175 if (tg3_flag(tp
, NVRAM
)) {
3176 if (tp
->nvram_lock_cnt
> 0)
3177 tp
->nvram_lock_cnt
--;
3178 if (tp
->nvram_lock_cnt
== 0)
3179 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3183 /* tp->lock is held. */
3184 static void tg3_enable_nvram_access(struct tg3
*tp
)
3186 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3187 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3189 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3193 /* tp->lock is held. */
3194 static void tg3_disable_nvram_access(struct tg3
*tp
)
3196 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3197 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3199 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3203 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3204 u32 offset
, u32
*val
)
3209 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3212 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3213 EEPROM_ADDR_DEVID_MASK
|
3215 tw32(GRC_EEPROM_ADDR
,
3217 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3218 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3219 EEPROM_ADDR_ADDR_MASK
) |
3220 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3222 for (i
= 0; i
< 1000; i
++) {
3223 tmp
= tr32(GRC_EEPROM_ADDR
);
3225 if (tmp
& EEPROM_ADDR_COMPLETE
)
3229 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3232 tmp
= tr32(GRC_EEPROM_DATA
);
3235 * The data will always be opposite the native endian
3236 * format. Perform a blind byteswap to compensate.
3243 #define NVRAM_CMD_TIMEOUT 10000
3245 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3249 tw32(NVRAM_CMD
, nvram_cmd
);
3250 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3251 usleep_range(10, 40);
3252 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3258 if (i
== NVRAM_CMD_TIMEOUT
)
3264 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3266 if (tg3_flag(tp
, NVRAM
) &&
3267 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3268 tg3_flag(tp
, FLASH
) &&
3269 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3270 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3272 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3273 ATMEL_AT45DB0X1B_PAGE_POS
) +
3274 (addr
% tp
->nvram_pagesize
);
3279 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3281 if (tg3_flag(tp
, NVRAM
) &&
3282 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3283 tg3_flag(tp
, FLASH
) &&
3284 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3285 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3287 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3288 tp
->nvram_pagesize
) +
3289 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3294 /* NOTE: Data read in from NVRAM is byteswapped according to
3295 * the byteswapping settings for all other register accesses.
3296 * tg3 devices are BE devices, so on a BE machine, the data
3297 * returned will be exactly as it is seen in NVRAM. On a LE
3298 * machine, the 32-bit value will be byteswapped.
3300 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3304 if (!tg3_flag(tp
, NVRAM
))
3305 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3307 offset
= tg3_nvram_phys_addr(tp
, offset
);
3309 if (offset
> NVRAM_ADDR_MSK
)
3312 ret
= tg3_nvram_lock(tp
);
3316 tg3_enable_nvram_access(tp
);
3318 tw32(NVRAM_ADDR
, offset
);
3319 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3320 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3323 *val
= tr32(NVRAM_RDDATA
);
3325 tg3_disable_nvram_access(tp
);
3327 tg3_nvram_unlock(tp
);
3332 /* Ensures NVRAM data is in bytestream format. */
3333 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3336 int res
= tg3_nvram_read(tp
, offset
, &v
);
3338 *val
= cpu_to_be32(v
);
3342 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3343 u32 offset
, u32 len
, u8
*buf
)
3348 for (i
= 0; i
< len
; i
+= 4) {
3354 memcpy(&data
, buf
+ i
, 4);
3357 * The SEEPROM interface expects the data to always be opposite
3358 * the native endian format. We accomplish this by reversing
3359 * all the operations that would have been performed on the
3360 * data from a call to tg3_nvram_read_be32().
3362 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3364 val
= tr32(GRC_EEPROM_ADDR
);
3365 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3367 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3369 tw32(GRC_EEPROM_ADDR
, val
|
3370 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3371 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3375 for (j
= 0; j
< 1000; j
++) {
3376 val
= tr32(GRC_EEPROM_ADDR
);
3378 if (val
& EEPROM_ADDR_COMPLETE
)
3382 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3391 /* offset and length are dword aligned */
3392 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3396 u32 pagesize
= tp
->nvram_pagesize
;
3397 u32 pagemask
= pagesize
- 1;
3401 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3407 u32 phy_addr
, page_off
, size
;
3409 phy_addr
= offset
& ~pagemask
;
3411 for (j
= 0; j
< pagesize
; j
+= 4) {
3412 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3413 (__be32
*) (tmp
+ j
));
3420 page_off
= offset
& pagemask
;
3427 memcpy(tmp
+ page_off
, buf
, size
);
3429 offset
= offset
+ (pagesize
- page_off
);
3431 tg3_enable_nvram_access(tp
);
3434 * Before we can erase the flash page, we need
3435 * to issue a special "write enable" command.
3437 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3439 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3442 /* Erase the target page */
3443 tw32(NVRAM_ADDR
, phy_addr
);
3445 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3446 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3448 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3451 /* Issue another write enable to start the write. */
3452 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3454 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3457 for (j
= 0; j
< pagesize
; j
+= 4) {
3460 data
= *((__be32
*) (tmp
+ j
));
3462 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3464 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3466 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3470 nvram_cmd
|= NVRAM_CMD_FIRST
;
3471 else if (j
== (pagesize
- 4))
3472 nvram_cmd
|= NVRAM_CMD_LAST
;
3474 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3482 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3483 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3490 /* offset and length are dword aligned */
3491 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3496 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3497 u32 page_off
, phy_addr
, nvram_cmd
;
3500 memcpy(&data
, buf
+ i
, 4);
3501 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3503 page_off
= offset
% tp
->nvram_pagesize
;
3505 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3507 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3509 if (page_off
== 0 || i
== 0)
3510 nvram_cmd
|= NVRAM_CMD_FIRST
;
3511 if (page_off
== (tp
->nvram_pagesize
- 4))
3512 nvram_cmd
|= NVRAM_CMD_LAST
;
3515 nvram_cmd
|= NVRAM_CMD_LAST
;
3517 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3518 !tg3_flag(tp
, FLASH
) ||
3519 !tg3_flag(tp
, 57765_PLUS
))
3520 tw32(NVRAM_ADDR
, phy_addr
);
3522 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3523 !tg3_flag(tp
, 5755_PLUS
) &&
3524 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3525 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3528 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3529 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3533 if (!tg3_flag(tp
, FLASH
)) {
3534 /* We always do complete word writes to eeprom. */
3535 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3538 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3545 /* offset and length are dword aligned */
3546 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3550 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3551 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3552 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3556 if (!tg3_flag(tp
, NVRAM
)) {
3557 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3561 ret
= tg3_nvram_lock(tp
);
3565 tg3_enable_nvram_access(tp
);
3566 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3567 tw32(NVRAM_WRITE1
, 0x406);
3569 grc_mode
= tr32(GRC_MODE
);
3570 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3572 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3573 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3576 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3580 grc_mode
= tr32(GRC_MODE
);
3581 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3583 tg3_disable_nvram_access(tp
);
3584 tg3_nvram_unlock(tp
);
3587 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3588 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3595 #define RX_CPU_SCRATCH_BASE 0x30000
3596 #define RX_CPU_SCRATCH_SIZE 0x04000
3597 #define TX_CPU_SCRATCH_BASE 0x34000
3598 #define TX_CPU_SCRATCH_SIZE 0x04000
3600 /* tp->lock is held. */
3601 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3604 const int iters
= 10000;
3606 for (i
= 0; i
< iters
; i
++) {
3607 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3608 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3609 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3611 if (pci_channel_offline(tp
->pdev
))
3615 return (i
== iters
) ? -EBUSY
: 0;
3618 /* tp->lock is held. */
3619 static int tg3_rxcpu_pause(struct tg3
*tp
)
3621 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3623 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3624 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3630 /* tp->lock is held. */
3631 static int tg3_txcpu_pause(struct tg3
*tp
)
3633 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3636 /* tp->lock is held. */
3637 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3639 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3640 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3643 /* tp->lock is held. */
3644 static void tg3_rxcpu_resume(struct tg3
*tp
)
3646 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3649 /* tp->lock is held. */
3650 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3654 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3656 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3657 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3659 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3662 if (cpu_base
== RX_CPU_BASE
) {
3663 rc
= tg3_rxcpu_pause(tp
);
3666 * There is only an Rx CPU for the 5750 derivative in the
3669 if (tg3_flag(tp
, IS_SSB_CORE
))
3672 rc
= tg3_txcpu_pause(tp
);
3676 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3677 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3681 /* Clear firmware's nvram arbitration. */
3682 if (tg3_flag(tp
, NVRAM
))
3683 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3687 static int tg3_fw_data_len(struct tg3
*tp
,
3688 const struct tg3_firmware_hdr
*fw_hdr
)
3692 /* Non fragmented firmware have one firmware header followed by a
3693 * contiguous chunk of data to be written. The length field in that
3694 * header is not the length of data to be written but the complete
3695 * length of the bss. The data length is determined based on
3696 * tp->fw->size minus headers.
3698 * Fragmented firmware have a main header followed by multiple
3699 * fragments. Each fragment is identical to non fragmented firmware
3700 * with a firmware header followed by a contiguous chunk of data. In
3701 * the main header, the length field is unused and set to 0xffffffff.
3702 * In each fragment header the length is the entire size of that
3703 * fragment i.e. fragment data + header length. Data length is
3704 * therefore length field in the header minus TG3_FW_HDR_LEN.
3706 if (tp
->fw_len
== 0xffffffff)
3707 fw_len
= be32_to_cpu(fw_hdr
->len
);
3709 fw_len
= tp
->fw
->size
;
3711 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3714 /* tp->lock is held. */
3715 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3716 u32 cpu_scratch_base
, int cpu_scratch_size
,
3717 const struct tg3_firmware_hdr
*fw_hdr
)
3720 void (*write_op
)(struct tg3
*, u32
, u32
);
3721 int total_len
= tp
->fw
->size
;
3723 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3725 "%s: Trying to load TX cpu firmware which is 5705\n",
3730 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3731 write_op
= tg3_write_mem
;
3733 write_op
= tg3_write_indirect_reg32
;
3735 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3736 /* It is possible that bootcode is still loading at this point.
3737 * Get the nvram lock first before halting the cpu.
3739 int lock_err
= tg3_nvram_lock(tp
);
3740 err
= tg3_halt_cpu(tp
, cpu_base
);
3742 tg3_nvram_unlock(tp
);
3746 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3747 write_op(tp
, cpu_scratch_base
+ i
, 0);
3748 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3749 tw32(cpu_base
+ CPU_MODE
,
3750 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3752 /* Subtract additional main header for fragmented firmware and
3753 * advance to the first fragment
3755 total_len
-= TG3_FW_HDR_LEN
;
3760 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3761 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3762 write_op(tp
, cpu_scratch_base
+
3763 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3765 be32_to_cpu(fw_data
[i
]));
3767 total_len
-= be32_to_cpu(fw_hdr
->len
);
3769 /* Advance to next fragment */
3770 fw_hdr
= (struct tg3_firmware_hdr
*)
3771 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3772 } while (total_len
> 0);
3780 /* tp->lock is held. */
3781 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3784 const int iters
= 5;
3786 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3787 tw32_f(cpu_base
+ CPU_PC
, pc
);
3789 for (i
= 0; i
< iters
; i
++) {
3790 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3792 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3793 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3794 tw32_f(cpu_base
+ CPU_PC
, pc
);
3798 return (i
== iters
) ? -EBUSY
: 0;
3801 /* tp->lock is held. */
3802 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3804 const struct tg3_firmware_hdr
*fw_hdr
;
3807 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3809 /* Firmware blob starts with version numbers, followed by
3810 start address and length. We are setting complete length.
3811 length = end_address_of_bss - start_address_of_text.
3812 Remainder is the blob to be loaded contiguously
3813 from start address. */
3815 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3816 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3821 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3822 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3827 /* Now startup only the RX cpu. */
3828 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3829 be32_to_cpu(fw_hdr
->base_addr
));
3831 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3832 "should be %08x\n", __func__
,
3833 tr32(RX_CPU_BASE
+ CPU_PC
),
3834 be32_to_cpu(fw_hdr
->base_addr
));
3838 tg3_rxcpu_resume(tp
);
3843 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3845 const int iters
= 1000;
3849 /* Wait for boot code to complete initialization and enter service
3850 * loop. It is then safe to download service patches
3852 for (i
= 0; i
< iters
; i
++) {
3853 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3860 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3864 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3866 netdev_warn(tp
->dev
,
3867 "Other patches exist. Not downloading EEE patch\n");
3874 /* tp->lock is held. */
3875 static void tg3_load_57766_firmware(struct tg3
*tp
)
3877 struct tg3_firmware_hdr
*fw_hdr
;
3879 if (!tg3_flag(tp
, NO_NVRAM
))
3882 if (tg3_validate_rxcpu_state(tp
))
3888 /* This firmware blob has a different format than older firmware
3889 * releases as given below. The main difference is we have fragmented
3890 * data to be written to non-contiguous locations.
3892 * In the beginning we have a firmware header identical to other
3893 * firmware which consists of version, base addr and length. The length
3894 * here is unused and set to 0xffffffff.
3896 * This is followed by a series of firmware fragments which are
3897 * individually identical to previous firmware. i.e. they have the
3898 * firmware header and followed by data for that fragment. The version
3899 * field of the individual fragment header is unused.
3902 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3903 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3906 if (tg3_rxcpu_pause(tp
))
3909 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3910 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3912 tg3_rxcpu_resume(tp
);
3915 /* tp->lock is held. */
3916 static int tg3_load_tso_firmware(struct tg3
*tp
)
3918 const struct tg3_firmware_hdr
*fw_hdr
;
3919 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3922 if (!tg3_flag(tp
, FW_TSO
))
3925 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3927 /* Firmware blob starts with version numbers, followed by
3928 start address and length. We are setting complete length.
3929 length = end_address_of_bss - start_address_of_text.
3930 Remainder is the blob to be loaded contiguously
3931 from start address. */
3933 cpu_scratch_size
= tp
->fw_len
;
3935 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3936 cpu_base
= RX_CPU_BASE
;
3937 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3939 cpu_base
= TX_CPU_BASE
;
3940 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3941 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3944 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3945 cpu_scratch_base
, cpu_scratch_size
,
3950 /* Now startup the cpu. */
3951 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3952 be32_to_cpu(fw_hdr
->base_addr
));
3955 "%s fails to set CPU PC, is %08x should be %08x\n",
3956 __func__
, tr32(cpu_base
+ CPU_PC
),
3957 be32_to_cpu(fw_hdr
->base_addr
));
3961 tg3_resume_cpu(tp
, cpu_base
);
3965 /* tp->lock is held. */
3966 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3968 u32 addr_high
, addr_low
;
3970 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3971 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3972 (mac_addr
[4] << 8) | mac_addr
[5]);
3975 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3976 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3979 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3980 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3984 /* tp->lock is held. */
3985 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3990 for (i
= 0; i
< 4; i
++) {
3991 if (i
== 1 && skip_mac_1
)
3993 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3996 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3997 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3998 for (i
= 4; i
< 16; i
++)
3999 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
4002 addr_high
= (tp
->dev
->dev_addr
[0] +
4003 tp
->dev
->dev_addr
[1] +
4004 tp
->dev
->dev_addr
[2] +
4005 tp
->dev
->dev_addr
[3] +
4006 tp
->dev
->dev_addr
[4] +
4007 tp
->dev
->dev_addr
[5]) &
4008 TX_BACKOFF_SEED_MASK
;
4009 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
4012 static void tg3_enable_register_access(struct tg3
*tp
)
4015 * Make sure register accesses (indirect or otherwise) will function
4018 pci_write_config_dword(tp
->pdev
,
4019 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4022 static int tg3_power_up(struct tg3
*tp
)
4026 tg3_enable_register_access(tp
);
4028 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4030 /* Switch out of Vaux if it is a NIC */
4031 tg3_pwrsrc_switch_to_vmain(tp
);
4033 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4039 static int tg3_setup_phy(struct tg3
*, bool);
4041 static int tg3_power_down_prepare(struct tg3
*tp
)
4044 bool device_should_wake
, do_low_power
;
4046 tg3_enable_register_access(tp
);
4048 /* Restore the CLKREQ setting. */
4049 if (tg3_flag(tp
, CLKREQ_BUG
))
4050 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4051 PCI_EXP_LNKCTL_CLKREQ_EN
);
4053 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4054 tw32(TG3PCI_MISC_HOST_CTRL
,
4055 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4057 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4058 tg3_flag(tp
, WOL_ENABLE
);
4060 if (tg3_flag(tp
, USE_PHYLIB
)) {
4061 do_low_power
= false;
4062 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4063 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4064 struct phy_device
*phydev
;
4065 u32 phyid
, advertising
;
4067 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4069 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4071 tp
->link_config
.speed
= phydev
->speed
;
4072 tp
->link_config
.duplex
= phydev
->duplex
;
4073 tp
->link_config
.autoneg
= phydev
->autoneg
;
4074 tp
->link_config
.advertising
= phydev
->advertising
;
4076 advertising
= ADVERTISED_TP
|
4078 ADVERTISED_Autoneg
|
4079 ADVERTISED_10baseT_Half
;
4081 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4082 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4084 ADVERTISED_100baseT_Half
|
4085 ADVERTISED_100baseT_Full
|
4086 ADVERTISED_10baseT_Full
;
4088 advertising
|= ADVERTISED_10baseT_Full
;
4091 phydev
->advertising
= advertising
;
4093 phy_start_aneg(phydev
);
4095 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4096 if (phyid
!= PHY_ID_BCMAC131
) {
4097 phyid
&= PHY_BCM_OUI_MASK
;
4098 if (phyid
== PHY_BCM_OUI_1
||
4099 phyid
== PHY_BCM_OUI_2
||
4100 phyid
== PHY_BCM_OUI_3
)
4101 do_low_power
= true;
4105 do_low_power
= true;
4107 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4108 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4110 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4111 tg3_setup_phy(tp
, false);
4114 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4117 val
= tr32(GRC_VCPU_EXT_CTRL
);
4118 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4119 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4123 for (i
= 0; i
< 200; i
++) {
4124 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4125 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4130 if (tg3_flag(tp
, WOL_CAP
))
4131 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4132 WOL_DRV_STATE_SHUTDOWN
|
4136 if (device_should_wake
) {
4139 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4141 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4142 tg3_phy_auxctl_write(tp
,
4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4144 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4150 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4151 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4152 else if (tp
->phy_flags
&
4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4154 if (tp
->link_config
.active_speed
== SPEED_1000
)
4155 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4157 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4159 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4161 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4162 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4163 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4164 SPEED_100
: SPEED_10
;
4165 if (tg3_5700_link_polarity(tp
, speed
))
4166 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4168 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4171 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4174 if (!tg3_flag(tp
, 5750_PLUS
))
4175 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4177 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4178 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4179 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4180 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4182 if (tg3_flag(tp
, ENABLE_APE
))
4183 mac_mode
|= MAC_MODE_APE_TX_EN
|
4184 MAC_MODE_APE_RX_EN
|
4185 MAC_MODE_TDE_ENABLE
;
4187 tw32_f(MAC_MODE
, mac_mode
);
4190 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4194 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4195 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4196 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4199 base_val
= tp
->pci_clock_ctrl
;
4200 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4201 CLOCK_CTRL_TXCLK_DISABLE
);
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4204 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4205 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4206 tg3_flag(tp
, CPMU_PRESENT
) ||
4207 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4209 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4210 u32 newbits1
, newbits2
;
4212 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4213 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4214 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4215 CLOCK_CTRL_TXCLK_DISABLE
|
4217 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4218 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4219 newbits1
= CLOCK_CTRL_625_CORE
;
4220 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4222 newbits1
= CLOCK_CTRL_ALTCLK
;
4223 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4232 if (!tg3_flag(tp
, 5705_PLUS
)) {
4235 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4236 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4237 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4238 CLOCK_CTRL_TXCLK_DISABLE
|
4239 CLOCK_CTRL_44MHZ_CORE
);
4241 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4244 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4245 tp
->pci_clock_ctrl
| newbits3
, 40);
4249 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4250 tg3_power_down_phy(tp
, do_low_power
);
4252 tg3_frob_aux_power(tp
, true);
4254 /* Workaround for unstable PLL clock */
4255 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4256 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4257 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4258 u32 val
= tr32(0x7d00);
4260 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4262 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4265 err
= tg3_nvram_lock(tp
);
4266 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4268 tg3_nvram_unlock(tp
);
4272 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4274 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4279 static void tg3_power_down(struct tg3
*tp
)
4281 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4282 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4285 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4287 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4288 case MII_TG3_AUX_STAT_10HALF
:
4290 *duplex
= DUPLEX_HALF
;
4293 case MII_TG3_AUX_STAT_10FULL
:
4295 *duplex
= DUPLEX_FULL
;
4298 case MII_TG3_AUX_STAT_100HALF
:
4300 *duplex
= DUPLEX_HALF
;
4303 case MII_TG3_AUX_STAT_100FULL
:
4305 *duplex
= DUPLEX_FULL
;
4308 case MII_TG3_AUX_STAT_1000HALF
:
4309 *speed
= SPEED_1000
;
4310 *duplex
= DUPLEX_HALF
;
4313 case MII_TG3_AUX_STAT_1000FULL
:
4314 *speed
= SPEED_1000
;
4315 *duplex
= DUPLEX_FULL
;
4319 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4320 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4322 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4326 *speed
= SPEED_UNKNOWN
;
4327 *duplex
= DUPLEX_UNKNOWN
;
4332 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4337 new_adv
= ADVERTISE_CSMA
;
4338 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4339 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4341 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4345 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4346 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4348 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4349 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4350 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4352 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4357 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4360 tw32(TG3_CPMU_EEE_MODE
,
4361 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4363 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4368 /* Advertise 100-BaseTX EEE ability */
4369 if (advertise
& ADVERTISED_100baseT_Full
)
4370 val
|= MDIO_AN_EEE_ADV_100TX
;
4371 /* Advertise 1000-BaseT EEE ability */
4372 if (advertise
& ADVERTISED_1000baseT_Full
)
4373 val
|= MDIO_AN_EEE_ADV_1000T
;
4375 if (!tp
->eee
.eee_enabled
) {
4377 tp
->eee
.advertised
= 0;
4379 tp
->eee
.advertised
= advertise
&
4380 (ADVERTISED_100baseT_Full
|
4381 ADVERTISED_1000baseT_Full
);
4384 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4388 switch (tg3_asic_rev(tp
)) {
4390 case ASIC_REV_57765
:
4391 case ASIC_REV_57766
:
4393 /* If we advertised any eee advertisements above... */
4395 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4396 MII_TG3_DSP_TAP26_RMRXSTO
|
4397 MII_TG3_DSP_TAP26_OPCSINPT
;
4398 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4402 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4403 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4404 MII_TG3_DSP_CH34TP2_HIBW01
);
4407 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4416 static void tg3_phy_copper_begin(struct tg3
*tp
)
4418 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4419 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4422 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4423 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4424 adv
= ADVERTISED_10baseT_Half
|
4425 ADVERTISED_10baseT_Full
;
4426 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4427 adv
|= ADVERTISED_100baseT_Half
|
4428 ADVERTISED_100baseT_Full
;
4429 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4430 if (!(tp
->phy_flags
&
4431 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4432 adv
|= ADVERTISED_1000baseT_Half
;
4433 adv
|= ADVERTISED_1000baseT_Full
;
4436 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4438 adv
= tp
->link_config
.advertising
;
4439 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4440 adv
&= ~(ADVERTISED_1000baseT_Half
|
4441 ADVERTISED_1000baseT_Full
);
4443 fc
= tp
->link_config
.flowctrl
;
4446 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4448 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4449 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4450 /* Normally during power down we want to autonegotiate
4451 * the lowest possible speed for WOL. However, to avoid
4452 * link flap, we leave it untouched.
4457 tg3_writephy(tp
, MII_BMCR
,
4458 BMCR_ANENABLE
| BMCR_ANRESTART
);
4461 u32 bmcr
, orig_bmcr
;
4463 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4464 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4466 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4467 /* With autoneg disabled, 5715 only links up when the
4468 * advertisement register has the configured speed
4471 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4475 switch (tp
->link_config
.speed
) {
4481 bmcr
|= BMCR_SPEED100
;
4485 bmcr
|= BMCR_SPEED1000
;
4489 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4490 bmcr
|= BMCR_FULLDPLX
;
4492 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4493 (bmcr
!= orig_bmcr
)) {
4494 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4495 for (i
= 0; i
< 1500; i
++) {
4499 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4500 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4502 if (!(tmp
& BMSR_LSTATUS
)) {
4507 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4513 static int tg3_phy_pull_config(struct tg3
*tp
)
4518 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4522 if (!(val
& BMCR_ANENABLE
)) {
4523 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4524 tp
->link_config
.advertising
= 0;
4525 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4529 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4531 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4534 tp
->link_config
.speed
= SPEED_10
;
4537 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4540 tp
->link_config
.speed
= SPEED_100
;
4542 case BMCR_SPEED1000
:
4543 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4544 tp
->link_config
.speed
= SPEED_1000
;
4552 if (val
& BMCR_FULLDPLX
)
4553 tp
->link_config
.duplex
= DUPLEX_FULL
;
4555 tp
->link_config
.duplex
= DUPLEX_HALF
;
4557 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4563 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4564 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4565 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4567 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4570 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4574 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4575 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4577 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4579 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4582 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4585 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4586 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4590 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4592 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4596 adv
= tg3_decode_flowctrl_1000X(val
);
4597 tp
->link_config
.flowctrl
= adv
;
4599 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4600 adv
= mii_adv_to_ethtool_adv_x(val
);
4603 tp
->link_config
.advertising
|= adv
;
4610 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4614 /* Turn off tap power management. */
4615 /* Set Extended packet length bit */
4616 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4618 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4619 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4620 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4621 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4622 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4629 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4631 struct ethtool_eee eee
;
4633 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4636 tg3_eee_pull_config(tp
, &eee
);
4638 if (tp
->eee
.eee_enabled
) {
4639 if (tp
->eee
.advertised
!= eee
.advertised
||
4640 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4641 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4644 /* EEE is disabled but we're advertising */
4652 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4654 u32 advmsk
, tgtadv
, advertising
;
4656 advertising
= tp
->link_config
.advertising
;
4657 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4659 advmsk
= ADVERTISE_ALL
;
4660 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4661 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4662 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4665 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4668 if ((*lcladv
& advmsk
) != tgtadv
)
4671 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4674 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4676 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4680 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4681 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4682 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4683 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4684 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4686 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4689 if (tg3_ctrl
!= tgtadv
)
4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4700 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4703 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4706 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4709 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4712 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4713 tp
->link_config
.rmt_adv
= lpeth
;
4718 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4720 if (curr_link_up
!= tp
->link_up
) {
4722 netif_carrier_on(tp
->dev
);
4724 netif_carrier_off(tp
->dev
);
4725 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4726 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4729 tg3_link_report(tp
);
4736 static void tg3_clear_mac_status(struct tg3
*tp
)
4741 MAC_STATUS_SYNC_CHANGED
|
4742 MAC_STATUS_CFG_CHANGED
|
4743 MAC_STATUS_MI_COMPLETION
|
4744 MAC_STATUS_LNKSTATE_CHANGED
);
4748 static void tg3_setup_eee(struct tg3
*tp
)
4752 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4753 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4754 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4755 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4759 tw32_f(TG3_CPMU_EEE_CTRL
,
4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4762 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4763 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4764 TG3_CPMU_EEEMD_LPI_IN_RX
|
4765 TG3_CPMU_EEEMD_EEE_ENABLE
;
4767 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4768 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4770 if (tg3_flag(tp
, ENABLE_APE
))
4771 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4773 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4775 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4777 (tp
->eee
.tx_lpi_timer
& 0xffff));
4779 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4780 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4784 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4786 bool current_link_up
;
4788 u32 lcl_adv
, rmt_adv
;
4793 tg3_clear_mac_status(tp
);
4795 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4797 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4801 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4803 /* Some third-party PHYs need to be reset on link going
4806 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4807 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4808 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4810 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4811 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4812 !(bmsr
& BMSR_LSTATUS
))
4818 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4819 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4820 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4821 !tg3_flag(tp
, INIT_COMPLETE
))
4824 if (!(bmsr
& BMSR_LSTATUS
)) {
4825 err
= tg3_init_5401phy_dsp(tp
);
4829 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4830 for (i
= 0; i
< 1000; i
++) {
4832 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4833 (bmsr
& BMSR_LSTATUS
)) {
4839 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4840 TG3_PHY_REV_BCM5401_B0
&&
4841 !(bmsr
& BMSR_LSTATUS
) &&
4842 tp
->link_config
.active_speed
== SPEED_1000
) {
4843 err
= tg3_phy_reset(tp
);
4845 err
= tg3_init_5401phy_dsp(tp
);
4850 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4851 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4852 /* 5701 {A0,B0} CRC bug workaround */
4853 tg3_writephy(tp
, 0x15, 0x0a75);
4854 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4855 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4856 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4859 /* Clear pending interrupts... */
4860 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4861 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4863 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4864 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4865 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4866 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4868 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4869 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4870 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4871 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4874 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4877 current_link_up
= false;
4878 current_speed
= SPEED_UNKNOWN
;
4879 current_duplex
= DUPLEX_UNKNOWN
;
4880 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4881 tp
->link_config
.rmt_adv
= 0;
4883 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4884 err
= tg3_phy_auxctl_read(tp
,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4887 if (!err
&& !(val
& (1 << 10))) {
4888 tg3_phy_auxctl_write(tp
,
4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4896 for (i
= 0; i
< 100; i
++) {
4897 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4898 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4899 (bmsr
& BMSR_LSTATUS
))
4904 if (bmsr
& BMSR_LSTATUS
) {
4907 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4908 for (i
= 0; i
< 2000; i
++) {
4910 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4915 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4920 for (i
= 0; i
< 200; i
++) {
4921 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4922 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4924 if (bmcr
&& bmcr
!= 0x7fff)
4932 tp
->link_config
.active_speed
= current_speed
;
4933 tp
->link_config
.active_duplex
= current_duplex
;
4935 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4936 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4938 if ((bmcr
& BMCR_ANENABLE
) &&
4940 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4941 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4942 current_link_up
= true;
4944 /* EEE settings changes take effect only after a phy
4945 * reset. If we have skipped a reset due to Link Flap
4946 * Avoidance being enabled, do it now.
4948 if (!eee_config_ok
&&
4949 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4955 if (!(bmcr
& BMCR_ANENABLE
) &&
4956 tp
->link_config
.speed
== current_speed
&&
4957 tp
->link_config
.duplex
== current_duplex
) {
4958 current_link_up
= true;
4962 if (current_link_up
&&
4963 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4966 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4967 reg
= MII_TG3_FET_GEN_STAT
;
4968 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4970 reg
= MII_TG3_EXT_STAT
;
4971 bit
= MII_TG3_EXT_STAT_MDIX
;
4974 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4975 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4977 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4982 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4983 tg3_phy_copper_begin(tp
);
4985 if (tg3_flag(tp
, ROBOSWITCH
)) {
4986 current_link_up
= true;
4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 current_speed
= SPEED_1000
;
4989 current_duplex
= DUPLEX_FULL
;
4990 tp
->link_config
.active_speed
= current_speed
;
4991 tp
->link_config
.active_duplex
= current_duplex
;
4994 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4995 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4996 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4997 current_link_up
= true;
5000 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5001 if (current_link_up
) {
5002 if (tp
->link_config
.active_speed
== SPEED_100
||
5003 tp
->link_config
.active_speed
== SPEED_10
)
5004 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5006 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5007 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
5008 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5010 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5012 /* In order for the 5750 core in BCM4785 chip to work properly
5013 * in RGMII mode, the Led Control Register must be set up.
5015 if (tg3_flag(tp
, RGMII_MODE
)) {
5016 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5017 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5019 if (tp
->link_config
.active_speed
== SPEED_10
)
5020 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5021 else if (tp
->link_config
.active_speed
== SPEED_100
)
5022 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5023 LED_CTRL_100MBPS_ON
);
5024 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5025 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5026 LED_CTRL_1000MBPS_ON
);
5028 tw32(MAC_LED_CTRL
, led_ctrl
);
5032 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5033 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5034 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5036 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5037 if (current_link_up
&&
5038 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5039 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5041 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5044 /* ??? Without this setting Netgear GA302T PHY does not
5045 * ??? send/receive packets...
5047 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5048 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5049 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5050 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5054 tw32_f(MAC_MODE
, tp
->mac_mode
);
5057 tg3_phy_eee_adjust(tp
, current_link_up
);
5059 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5060 /* Polled via timer. */
5061 tw32_f(MAC_EVENT
, 0);
5063 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5067 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5069 tp
->link_config
.active_speed
== SPEED_1000
&&
5070 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5073 (MAC_STATUS_SYNC_CHANGED
|
5074 MAC_STATUS_CFG_CHANGED
));
5077 NIC_SRAM_FIRMWARE_MBOX
,
5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5081 /* Prevent send BD corruption. */
5082 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5083 if (tp
->link_config
.active_speed
== SPEED_100
||
5084 tp
->link_config
.active_speed
== SPEED_10
)
5085 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5086 PCI_EXP_LNKCTL_CLKREQ_EN
);
5088 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5089 PCI_EXP_LNKCTL_CLKREQ_EN
);
5092 tg3_test_and_report_link_chg(tp
, current_link_up
);
5097 struct tg3_fiber_aneginfo
{
5099 #define ANEG_STATE_UNKNOWN 0
5100 #define ANEG_STATE_AN_ENABLE 1
5101 #define ANEG_STATE_RESTART_INIT 2
5102 #define ANEG_STATE_RESTART 3
5103 #define ANEG_STATE_DISABLE_LINK_OK 4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5105 #define ANEG_STATE_ABILITY_DETECT 6
5106 #define ANEG_STATE_ACK_DETECT_INIT 7
5107 #define ANEG_STATE_ACK_DETECT 8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5109 #define ANEG_STATE_COMPLETE_ACK 10
5110 #define ANEG_STATE_IDLE_DETECT_INIT 11
5111 #define ANEG_STATE_IDLE_DETECT 12
5112 #define ANEG_STATE_LINK_OK 13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5117 #define MR_AN_ENABLE 0x00000001
5118 #define MR_RESTART_AN 0x00000002
5119 #define MR_AN_COMPLETE 0x00000004
5120 #define MR_PAGE_RX 0x00000008
5121 #define MR_NP_LOADED 0x00000010
5122 #define MR_TOGGLE_TX 0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5130 #define MR_TOGGLE_RX 0x00002000
5131 #define MR_NP_RX 0x00004000
5133 #define MR_LINK_OK 0x80000000
5135 unsigned long link_time
, cur_time
;
5137 u32 ability_match_cfg
;
5138 int ability_match_count
;
5140 char ability_match
, idle_match
, ack_match
;
5142 u32 txconfig
, rxconfig
;
5143 #define ANEG_CFG_NP 0x00000080
5144 #define ANEG_CFG_ACK 0x00000040
5145 #define ANEG_CFG_RF2 0x00000020
5146 #define ANEG_CFG_RF1 0x00000010
5147 #define ANEG_CFG_PS2 0x00000001
5148 #define ANEG_CFG_PS1 0x00008000
5149 #define ANEG_CFG_HD 0x00004000
5150 #define ANEG_CFG_FD 0x00002000
5151 #define ANEG_CFG_INVAL 0x00001f06
5156 #define ANEG_TIMER_ENAB 2
5157 #define ANEG_FAILED -1
5159 #define ANEG_STATE_SETTLE_TIME 10000
5161 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5162 struct tg3_fiber_aneginfo
*ap
)
5165 unsigned long delta
;
5169 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5173 ap
->ability_match_cfg
= 0;
5174 ap
->ability_match_count
= 0;
5175 ap
->ability_match
= 0;
5181 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5182 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5184 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5185 ap
->ability_match_cfg
= rx_cfg_reg
;
5186 ap
->ability_match
= 0;
5187 ap
->ability_match_count
= 0;
5189 if (++ap
->ability_match_count
> 1) {
5190 ap
->ability_match
= 1;
5191 ap
->ability_match_cfg
= rx_cfg_reg
;
5194 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5202 ap
->ability_match_cfg
= 0;
5203 ap
->ability_match_count
= 0;
5204 ap
->ability_match
= 0;
5210 ap
->rxconfig
= rx_cfg_reg
;
5213 switch (ap
->state
) {
5214 case ANEG_STATE_UNKNOWN
:
5215 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5216 ap
->state
= ANEG_STATE_AN_ENABLE
;
5219 case ANEG_STATE_AN_ENABLE
:
5220 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5221 if (ap
->flags
& MR_AN_ENABLE
) {
5224 ap
->ability_match_cfg
= 0;
5225 ap
->ability_match_count
= 0;
5226 ap
->ability_match
= 0;
5230 ap
->state
= ANEG_STATE_RESTART_INIT
;
5232 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5236 case ANEG_STATE_RESTART_INIT
:
5237 ap
->link_time
= ap
->cur_time
;
5238 ap
->flags
&= ~(MR_NP_LOADED
);
5240 tw32(MAC_TX_AUTO_NEG
, 0);
5241 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5242 tw32_f(MAC_MODE
, tp
->mac_mode
);
5245 ret
= ANEG_TIMER_ENAB
;
5246 ap
->state
= ANEG_STATE_RESTART
;
5249 case ANEG_STATE_RESTART
:
5250 delta
= ap
->cur_time
- ap
->link_time
;
5251 if (delta
> ANEG_STATE_SETTLE_TIME
)
5252 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5254 ret
= ANEG_TIMER_ENAB
;
5257 case ANEG_STATE_DISABLE_LINK_OK
:
5261 case ANEG_STATE_ABILITY_DETECT_INIT
:
5262 ap
->flags
&= ~(MR_TOGGLE_TX
);
5263 ap
->txconfig
= ANEG_CFG_FD
;
5264 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5265 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5266 ap
->txconfig
|= ANEG_CFG_PS1
;
5267 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5268 ap
->txconfig
|= ANEG_CFG_PS2
;
5269 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5270 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5271 tw32_f(MAC_MODE
, tp
->mac_mode
);
5274 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5277 case ANEG_STATE_ABILITY_DETECT
:
5278 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5279 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5282 case ANEG_STATE_ACK_DETECT_INIT
:
5283 ap
->txconfig
|= ANEG_CFG_ACK
;
5284 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5285 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5286 tw32_f(MAC_MODE
, tp
->mac_mode
);
5289 ap
->state
= ANEG_STATE_ACK_DETECT
;
5292 case ANEG_STATE_ACK_DETECT
:
5293 if (ap
->ack_match
!= 0) {
5294 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5295 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5296 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5298 ap
->state
= ANEG_STATE_AN_ENABLE
;
5300 } else if (ap
->ability_match
!= 0 &&
5301 ap
->rxconfig
== 0) {
5302 ap
->state
= ANEG_STATE_AN_ENABLE
;
5306 case ANEG_STATE_COMPLETE_ACK_INIT
:
5307 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5311 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5312 MR_LP_ADV_HALF_DUPLEX
|
5313 MR_LP_ADV_SYM_PAUSE
|
5314 MR_LP_ADV_ASYM_PAUSE
|
5315 MR_LP_ADV_REMOTE_FAULT1
|
5316 MR_LP_ADV_REMOTE_FAULT2
|
5317 MR_LP_ADV_NEXT_PAGE
|
5320 if (ap
->rxconfig
& ANEG_CFG_FD
)
5321 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5322 if (ap
->rxconfig
& ANEG_CFG_HD
)
5323 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5324 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5325 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5326 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5327 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5328 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5329 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5330 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5331 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5332 if (ap
->rxconfig
& ANEG_CFG_NP
)
5333 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5335 ap
->link_time
= ap
->cur_time
;
5337 ap
->flags
^= (MR_TOGGLE_TX
);
5338 if (ap
->rxconfig
& 0x0008)
5339 ap
->flags
|= MR_TOGGLE_RX
;
5340 if (ap
->rxconfig
& ANEG_CFG_NP
)
5341 ap
->flags
|= MR_NP_RX
;
5342 ap
->flags
|= MR_PAGE_RX
;
5344 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5345 ret
= ANEG_TIMER_ENAB
;
5348 case ANEG_STATE_COMPLETE_ACK
:
5349 if (ap
->ability_match
!= 0 &&
5350 ap
->rxconfig
== 0) {
5351 ap
->state
= ANEG_STATE_AN_ENABLE
;
5354 delta
= ap
->cur_time
- ap
->link_time
;
5355 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5356 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5357 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5359 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5360 !(ap
->flags
& MR_NP_RX
)) {
5361 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5369 case ANEG_STATE_IDLE_DETECT_INIT
:
5370 ap
->link_time
= ap
->cur_time
;
5371 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5372 tw32_f(MAC_MODE
, tp
->mac_mode
);
5375 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5376 ret
= ANEG_TIMER_ENAB
;
5379 case ANEG_STATE_IDLE_DETECT
:
5380 if (ap
->ability_match
!= 0 &&
5381 ap
->rxconfig
== 0) {
5382 ap
->state
= ANEG_STATE_AN_ENABLE
;
5385 delta
= ap
->cur_time
- ap
->link_time
;
5386 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5387 /* XXX another gem from the Broadcom driver :( */
5388 ap
->state
= ANEG_STATE_LINK_OK
;
5392 case ANEG_STATE_LINK_OK
:
5393 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5398 /* ??? unimplemented */
5401 case ANEG_STATE_NEXT_PAGE_WAIT
:
5402 /* ??? unimplemented */
5413 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5416 struct tg3_fiber_aneginfo aninfo
;
5417 int status
= ANEG_FAILED
;
5421 tw32_f(MAC_TX_AUTO_NEG
, 0);
5423 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5424 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5427 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5430 memset(&aninfo
, 0, sizeof(aninfo
));
5431 aninfo
.flags
|= MR_AN_ENABLE
;
5432 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5433 aninfo
.cur_time
= 0;
5435 while (++tick
< 195000) {
5436 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5437 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5443 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5444 tw32_f(MAC_MODE
, tp
->mac_mode
);
5447 *txflags
= aninfo
.txconfig
;
5448 *rxflags
= aninfo
.flags
;
5450 if (status
== ANEG_DONE
&&
5451 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5452 MR_LP_ADV_FULL_DUPLEX
)))
5458 static void tg3_init_bcm8002(struct tg3
*tp
)
5460 u32 mac_status
= tr32(MAC_STATUS
);
5463 /* Reset when initting first time or we have a link. */
5464 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5465 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5468 /* Set PLL lock range. */
5469 tg3_writephy(tp
, 0x16, 0x8007);
5472 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5474 /* Wait for reset to complete. */
5475 /* XXX schedule_timeout() ... */
5476 for (i
= 0; i
< 500; i
++)
5479 /* Config mode; select PMA/Ch 1 regs. */
5480 tg3_writephy(tp
, 0x10, 0x8411);
5482 /* Enable auto-lock and comdet, select txclk for tx. */
5483 tg3_writephy(tp
, 0x11, 0x0a10);
5485 tg3_writephy(tp
, 0x18, 0x00a0);
5486 tg3_writephy(tp
, 0x16, 0x41ff);
5488 /* Assert and deassert POR. */
5489 tg3_writephy(tp
, 0x13, 0x0400);
5491 tg3_writephy(tp
, 0x13, 0x0000);
5493 tg3_writephy(tp
, 0x11, 0x0a50);
5495 tg3_writephy(tp
, 0x11, 0x0a10);
5497 /* Wait for signal to stabilize */
5498 /* XXX schedule_timeout() ... */
5499 for (i
= 0; i
< 15000; i
++)
5502 /* Deselect the channel register so we can read the PHYID
5505 tg3_writephy(tp
, 0x10, 0x8011);
5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5511 bool current_link_up
;
5512 u32 sg_dig_ctrl
, sg_dig_status
;
5513 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5514 int workaround
, port_a
;
5517 expected_sg_dig_ctrl
= 0;
5520 current_link_up
= false;
5522 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5523 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5525 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 /* preserve bits 20-23 for voltage regulator */
5530 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5533 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5535 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5536 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5538 u32 val
= serdes_cfg
;
5544 tw32_f(MAC_SERDES_CFG
, val
);
5547 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5549 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5550 tg3_setup_flow_control(tp
, 0, 0);
5551 current_link_up
= true;
5556 /* Want auto-negotiation. */
5557 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5559 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5560 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5561 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5562 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5563 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5565 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5566 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5567 tp
->serdes_counter
&&
5568 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5569 MAC_STATUS_RCVD_CFG
)) ==
5570 MAC_STATUS_PCS_SYNCED
)) {
5571 tp
->serdes_counter
--;
5572 current_link_up
= true;
5577 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5578 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5580 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5582 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5583 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5584 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5585 MAC_STATUS_SIGNAL_DET
)) {
5586 sg_dig_status
= tr32(SG_DIG_STATUS
);
5587 mac_status
= tr32(MAC_STATUS
);
5589 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5590 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5591 u32 local_adv
= 0, remote_adv
= 0;
5593 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5594 local_adv
|= ADVERTISE_1000XPAUSE
;
5595 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5596 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5598 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5599 remote_adv
|= LPA_1000XPAUSE
;
5600 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5601 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5603 tp
->link_config
.rmt_adv
=
5604 mii_adv_to_ethtool_adv_x(remote_adv
);
5606 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5607 current_link_up
= true;
5608 tp
->serdes_counter
= 0;
5609 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5610 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5611 if (tp
->serdes_counter
)
5612 tp
->serdes_counter
--;
5615 u32 val
= serdes_cfg
;
5622 tw32_f(MAC_SERDES_CFG
, val
);
5625 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5628 /* Link parallel detection - link is up */
5629 /* only if we have PCS_SYNC and not */
5630 /* receiving config code words */
5631 mac_status
= tr32(MAC_STATUS
);
5632 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5633 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5634 tg3_setup_flow_control(tp
, 0, 0);
5635 current_link_up
= true;
5637 TG3_PHYFLG_PARALLEL_DETECT
;
5638 tp
->serdes_counter
=
5639 SERDES_PARALLEL_DET_TIMEOUT
;
5641 goto restart_autoneg
;
5645 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5646 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5650 return current_link_up
;
5653 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5655 bool current_link_up
= false;
5657 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5660 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5661 u32 txflags
, rxflags
;
5664 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5665 u32 local_adv
= 0, remote_adv
= 0;
5667 if (txflags
& ANEG_CFG_PS1
)
5668 local_adv
|= ADVERTISE_1000XPAUSE
;
5669 if (txflags
& ANEG_CFG_PS2
)
5670 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5672 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5673 remote_adv
|= LPA_1000XPAUSE
;
5674 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5675 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5677 tp
->link_config
.rmt_adv
=
5678 mii_adv_to_ethtool_adv_x(remote_adv
);
5680 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5682 current_link_up
= true;
5684 for (i
= 0; i
< 30; i
++) {
5687 (MAC_STATUS_SYNC_CHANGED
|
5688 MAC_STATUS_CFG_CHANGED
));
5690 if ((tr32(MAC_STATUS
) &
5691 (MAC_STATUS_SYNC_CHANGED
|
5692 MAC_STATUS_CFG_CHANGED
)) == 0)
5696 mac_status
= tr32(MAC_STATUS
);
5697 if (!current_link_up
&&
5698 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5699 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5700 current_link_up
= true;
5702 tg3_setup_flow_control(tp
, 0, 0);
5704 /* Forcing 1000FD link up. */
5705 current_link_up
= true;
5707 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5710 tw32_f(MAC_MODE
, tp
->mac_mode
);
5715 return current_link_up
;
5718 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5721 u16 orig_active_speed
;
5722 u8 orig_active_duplex
;
5724 bool current_link_up
;
5727 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5728 orig_active_speed
= tp
->link_config
.active_speed
;
5729 orig_active_duplex
= tp
->link_config
.active_duplex
;
5731 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5733 tg3_flag(tp
, INIT_COMPLETE
)) {
5734 mac_status
= tr32(MAC_STATUS
);
5735 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5736 MAC_STATUS_SIGNAL_DET
|
5737 MAC_STATUS_CFG_CHANGED
|
5738 MAC_STATUS_RCVD_CFG
);
5739 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5740 MAC_STATUS_SIGNAL_DET
)) {
5741 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5742 MAC_STATUS_CFG_CHANGED
));
5747 tw32_f(MAC_TX_AUTO_NEG
, 0);
5749 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5750 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5751 tw32_f(MAC_MODE
, tp
->mac_mode
);
5754 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5755 tg3_init_bcm8002(tp
);
5757 /* Enable link change event even when serdes polling. */
5758 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5761 current_link_up
= false;
5762 tp
->link_config
.rmt_adv
= 0;
5763 mac_status
= tr32(MAC_STATUS
);
5765 if (tg3_flag(tp
, HW_AUTONEG
))
5766 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5768 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5770 tp
->napi
[0].hw_status
->status
=
5771 (SD_STATUS_UPDATED
|
5772 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5774 for (i
= 0; i
< 100; i
++) {
5775 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5776 MAC_STATUS_CFG_CHANGED
));
5778 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5779 MAC_STATUS_CFG_CHANGED
|
5780 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5784 mac_status
= tr32(MAC_STATUS
);
5785 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5786 current_link_up
= false;
5787 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5788 tp
->serdes_counter
== 0) {
5789 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5790 MAC_MODE_SEND_CONFIGS
));
5792 tw32_f(MAC_MODE
, tp
->mac_mode
);
5796 if (current_link_up
) {
5797 tp
->link_config
.active_speed
= SPEED_1000
;
5798 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5799 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5800 LED_CTRL_LNKLED_OVERRIDE
|
5801 LED_CTRL_1000MBPS_ON
));
5803 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5804 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5805 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5806 LED_CTRL_LNKLED_OVERRIDE
|
5807 LED_CTRL_TRAFFIC_OVERRIDE
));
5810 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5811 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5812 if (orig_pause_cfg
!= now_pause_cfg
||
5813 orig_active_speed
!= tp
->link_config
.active_speed
||
5814 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5815 tg3_link_report(tp
);
5821 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5825 u16 current_speed
= SPEED_UNKNOWN
;
5826 u8 current_duplex
= DUPLEX_UNKNOWN
;
5827 bool current_link_up
= false;
5828 u32 local_adv
, remote_adv
, sgsr
;
5830 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5831 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5832 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5833 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5838 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5840 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5841 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5843 current_link_up
= true;
5844 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5845 current_speed
= SPEED_1000
;
5846 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5847 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5848 current_speed
= SPEED_100
;
5849 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5851 current_speed
= SPEED_10
;
5852 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5855 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5856 current_duplex
= DUPLEX_FULL
;
5858 current_duplex
= DUPLEX_HALF
;
5861 tw32_f(MAC_MODE
, tp
->mac_mode
);
5864 tg3_clear_mac_status(tp
);
5866 goto fiber_setup_done
;
5869 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5870 tw32_f(MAC_MODE
, tp
->mac_mode
);
5873 tg3_clear_mac_status(tp
);
5878 tp
->link_config
.rmt_adv
= 0;
5880 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5881 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5882 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5883 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5884 bmsr
|= BMSR_LSTATUS
;
5886 bmsr
&= ~BMSR_LSTATUS
;
5889 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5891 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5892 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5893 /* do nothing, just check for link up at the end */
5894 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5897 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5898 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5899 ADVERTISE_1000XPAUSE
|
5900 ADVERTISE_1000XPSE_ASYM
|
5903 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5904 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5906 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5907 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5908 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5909 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5911 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5912 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5913 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5920 bmcr
&= ~BMCR_SPEED1000
;
5921 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5923 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5924 new_bmcr
|= BMCR_FULLDPLX
;
5926 if (new_bmcr
!= bmcr
) {
5927 /* BMCR_SPEED1000 is a reserved bit that needs
5928 * to be set on write.
5930 new_bmcr
|= BMCR_SPEED1000
;
5932 /* Force a linkdown */
5936 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5937 adv
&= ~(ADVERTISE_1000XFULL
|
5938 ADVERTISE_1000XHALF
|
5940 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5941 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5945 tg3_carrier_off(tp
);
5947 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5949 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5950 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5951 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5952 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5953 bmsr
|= BMSR_LSTATUS
;
5955 bmsr
&= ~BMSR_LSTATUS
;
5957 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5961 if (bmsr
& BMSR_LSTATUS
) {
5962 current_speed
= SPEED_1000
;
5963 current_link_up
= true;
5964 if (bmcr
& BMCR_FULLDPLX
)
5965 current_duplex
= DUPLEX_FULL
;
5967 current_duplex
= DUPLEX_HALF
;
5972 if (bmcr
& BMCR_ANENABLE
) {
5975 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5976 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5977 common
= local_adv
& remote_adv
;
5978 if (common
& (ADVERTISE_1000XHALF
|
5979 ADVERTISE_1000XFULL
)) {
5980 if (common
& ADVERTISE_1000XFULL
)
5981 current_duplex
= DUPLEX_FULL
;
5983 current_duplex
= DUPLEX_HALF
;
5985 tp
->link_config
.rmt_adv
=
5986 mii_adv_to_ethtool_adv_x(remote_adv
);
5987 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5988 /* Link is up via parallel detect */
5990 current_link_up
= false;
5996 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5997 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5999 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6000 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6001 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
6003 tw32_f(MAC_MODE
, tp
->mac_mode
);
6006 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
6008 tp
->link_config
.active_speed
= current_speed
;
6009 tp
->link_config
.active_duplex
= current_duplex
;
6011 tg3_test_and_report_link_chg(tp
, current_link_up
);
6015 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6017 if (tp
->serdes_counter
) {
6018 /* Give autoneg time to complete. */
6019 tp
->serdes_counter
--;
6024 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6027 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6028 if (bmcr
& BMCR_ANENABLE
) {
6031 /* Select shadow register 0x1f */
6032 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6033 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6037 MII_TG3_DSP_EXP1_INT_STAT
);
6038 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6039 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6041 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6042 /* We have signal detect and not receiving
6043 * config code words, link is up by parallel
6047 bmcr
&= ~BMCR_ANENABLE
;
6048 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6049 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6050 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6053 } else if (tp
->link_up
&&
6054 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6055 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6058 /* Select expansion interrupt status register */
6059 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6060 MII_TG3_DSP_EXP1_INT_STAT
);
6061 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6065 /* Config code words received, turn on autoneg. */
6066 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6067 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6069 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6075 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6080 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6081 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6082 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6083 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6085 err
= tg3_setup_copper_phy(tp
, force_reset
);
6087 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6090 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6091 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6093 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6098 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6099 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6100 tw32(GRC_MISC_CFG
, val
);
6103 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6104 (6 << TX_LENGTHS_IPG_SHIFT
);
6105 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6106 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6107 val
|= tr32(MAC_TX_LENGTHS
) &
6108 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6109 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6111 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6112 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6113 tw32(MAC_TX_LENGTHS
, val
|
6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6116 tw32(MAC_TX_LENGTHS
, val
|
6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6119 if (!tg3_flag(tp
, 5705_PLUS
)) {
6121 tw32(HOSTCC_STAT_COAL_TICKS
,
6122 tp
->coal
.stats_block_coalesce_usecs
);
6124 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6128 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6129 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6131 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6134 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6135 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6141 /* tp->lock must be held */
6142 static u64
tg3_refclk_read(struct tg3
*tp
)
6144 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6145 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6148 /* tp->lock must be held */
6149 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6151 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6153 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6154 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6155 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6156 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6159 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6160 static inline void tg3_full_unlock(struct tg3
*tp
);
6161 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6163 struct tg3
*tp
= netdev_priv(dev
);
6165 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6166 SOF_TIMESTAMPING_RX_SOFTWARE
|
6167 SOF_TIMESTAMPING_SOFTWARE
;
6169 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6170 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6171 SOF_TIMESTAMPING_RX_HARDWARE
|
6172 SOF_TIMESTAMPING_RAW_HARDWARE
;
6176 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6178 info
->phc_index
= -1;
6180 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6182 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6183 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6184 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6185 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6189 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6191 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6192 bool neg_adj
= false;
6200 /* Frequency adjustment is performed using hardware with a 24 bit
6201 * accumulator and a programmable correction value. On each clk, the
6202 * correction value gets added to the accumulator and when it
6203 * overflows, the time counter is incremented/decremented.
6205 * So conversion from ppb to correction value is
6206 * ppb * (1 << 24) / 1000000000
6208 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6209 TG3_EAV_REF_CLK_CORRECT_MASK
;
6211 tg3_full_lock(tp
, 0);
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6215 TG3_EAV_REF_CLK_CORRECT_EN
|
6216 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6218 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6220 tg3_full_unlock(tp
);
6225 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6227 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6229 tg3_full_lock(tp
, 0);
6230 tp
->ptp_adjust
+= delta
;
6231 tg3_full_unlock(tp
);
6236 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
6239 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6241 tg3_full_lock(tp
, 0);
6242 ns
= tg3_refclk_read(tp
);
6243 ns
+= tp
->ptp_adjust
;
6244 tg3_full_unlock(tp
);
6246 *ts
= ns_to_timespec64(ns
);
6251 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6252 const struct timespec64
*ts
)
6255 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6257 ns
= timespec64_to_ns(ts
);
6259 tg3_full_lock(tp
, 0);
6260 tg3_refclk_write(tp
, ns
);
6262 tg3_full_unlock(tp
);
6267 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6268 struct ptp_clock_request
*rq
, int on
)
6270 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6275 case PTP_CLK_REQ_PEROUT
:
6276 if (rq
->perout
.index
!= 0)
6279 tg3_full_lock(tp
, 0);
6280 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6281 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6286 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6287 rq
->perout
.start
.nsec
;
6289 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6290 netdev_warn(tp
->dev
,
6291 "Device supports only a one-shot timesync output, period must be 0\n");
6296 if (nsec
& (1ULL << 63)) {
6297 netdev_warn(tp
->dev
,
6298 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6303 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6304 tw32(TG3_EAV_WATCHDOG0_MSB
,
6305 TG3_EAV_WATCHDOG0_EN
|
6306 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6308 tw32(TG3_EAV_REF_CLCK_CTL
,
6309 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6311 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6312 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6316 tg3_full_unlock(tp
);
6326 static const struct ptp_clock_info tg3_ptp_caps
= {
6327 .owner
= THIS_MODULE
,
6328 .name
= "tg3 clock",
6329 .max_adj
= 250000000,
6335 .adjfreq
= tg3_ptp_adjfreq
,
6336 .adjtime
= tg3_ptp_adjtime
,
6337 .gettime64
= tg3_ptp_gettime
,
6338 .settime64
= tg3_ptp_settime
,
6339 .enable
= tg3_ptp_enable
,
6342 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6343 struct skb_shared_hwtstamps
*timestamp
)
6345 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6346 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6350 /* tp->lock must be held */
6351 static void tg3_ptp_init(struct tg3
*tp
)
6353 if (!tg3_flag(tp
, PTP_CAPABLE
))
6356 /* Initialize the hardware clock to the system time. */
6357 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6359 tp
->ptp_info
= tg3_ptp_caps
;
6362 /* tp->lock must be held */
6363 static void tg3_ptp_resume(struct tg3
*tp
)
6365 if (!tg3_flag(tp
, PTP_CAPABLE
))
6368 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6372 static void tg3_ptp_fini(struct tg3
*tp
)
6374 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6377 ptp_clock_unregister(tp
->ptp_clock
);
6378 tp
->ptp_clock
= NULL
;
6382 static inline int tg3_irq_sync(struct tg3
*tp
)
6384 return tp
->irq_sync
;
6387 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6391 dst
= (u32
*)((u8
*)dst
+ off
);
6392 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6393 *dst
++ = tr32(off
+ i
);
6396 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6398 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6399 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6400 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6401 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6402 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6403 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6404 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6405 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6406 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6407 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6408 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6409 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6410 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6411 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6412 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6413 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6414 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6415 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6416 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6418 if (tg3_flag(tp
, SUPPORT_MSIX
))
6419 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6421 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6422 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6423 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6424 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6425 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6426 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6427 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6428 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6430 if (!tg3_flag(tp
, 5705_PLUS
)) {
6431 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6432 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6433 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6436 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6437 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6438 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6439 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6440 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6442 if (tg3_flag(tp
, NVRAM
))
6443 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6446 static void tg3_dump_state(struct tg3
*tp
)
6451 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6455 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6456 /* Read up to but not including private PCI registers */
6457 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6458 regs
[i
/ sizeof(u32
)] = tr32(i
);
6460 tg3_dump_legacy_regs(tp
, regs
);
6462 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6463 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6464 !regs
[i
+ 2] && !regs
[i
+ 3])
6467 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6469 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6474 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6475 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6477 /* SW status block */
6479 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6481 tnapi
->hw_status
->status
,
6482 tnapi
->hw_status
->status_tag
,
6483 tnapi
->hw_status
->rx_jumbo_consumer
,
6484 tnapi
->hw_status
->rx_consumer
,
6485 tnapi
->hw_status
->rx_mini_consumer
,
6486 tnapi
->hw_status
->idx
[0].rx_producer
,
6487 tnapi
->hw_status
->idx
[0].tx_consumer
);
6490 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6492 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6493 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6495 tnapi
->prodring
.rx_std_prod_idx
,
6496 tnapi
->prodring
.rx_std_cons_idx
,
6497 tnapi
->prodring
.rx_jmb_prod_idx
,
6498 tnapi
->prodring
.rx_jmb_cons_idx
);
6502 /* This is called whenever we suspect that the system chipset is re-
6503 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6504 * is bogus tx completions. We try to recover by setting the
6505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6508 static void tg3_tx_recover(struct tg3
*tp
)
6510 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6511 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6513 netdev_warn(tp
->dev
,
6514 "The system may be re-ordering memory-mapped I/O "
6515 "cycles to the network device, attempting to recover. "
6516 "Please report the problem to the driver maintainer "
6517 "and include system chipset information.\n");
6519 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6522 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6524 /* Tell compiler to fetch tx indices from memory. */
6526 return tnapi
->tx_pending
-
6527 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6530 /* Tigon3 never reports partial packet sends. So we do not
6531 * need special logic to handle SKBs that have not had all
6532 * of their frags sent yet, like SunGEM does.
6534 static void tg3_tx(struct tg3_napi
*tnapi
)
6536 struct tg3
*tp
= tnapi
->tp
;
6537 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6538 u32 sw_idx
= tnapi
->tx_cons
;
6539 struct netdev_queue
*txq
;
6540 int index
= tnapi
- tp
->napi
;
6541 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6543 if (tg3_flag(tp
, ENABLE_TSS
))
6546 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6548 while (sw_idx
!= hw_idx
) {
6549 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6550 struct sk_buff
*skb
= ri
->skb
;
6553 if (unlikely(skb
== NULL
)) {
6558 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6559 struct skb_shared_hwtstamps timestamp
;
6560 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6561 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6563 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6565 skb_tstamp_tx(skb
, ×tamp
);
6568 pci_unmap_single(tp
->pdev
,
6569 dma_unmap_addr(ri
, mapping
),
6575 while (ri
->fragmented
) {
6576 ri
->fragmented
= false;
6577 sw_idx
= NEXT_TX(sw_idx
);
6578 ri
= &tnapi
->tx_buffers
[sw_idx
];
6581 sw_idx
= NEXT_TX(sw_idx
);
6583 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6584 ri
= &tnapi
->tx_buffers
[sw_idx
];
6585 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6588 pci_unmap_page(tp
->pdev
,
6589 dma_unmap_addr(ri
, mapping
),
6590 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6593 while (ri
->fragmented
) {
6594 ri
->fragmented
= false;
6595 sw_idx
= NEXT_TX(sw_idx
);
6596 ri
= &tnapi
->tx_buffers
[sw_idx
];
6599 sw_idx
= NEXT_TX(sw_idx
);
6603 bytes_compl
+= skb
->len
;
6605 dev_consume_skb_any(skb
);
6607 if (unlikely(tx_bug
)) {
6613 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6615 tnapi
->tx_cons
= sw_idx
;
6617 /* Need to make the tx_cons update visible to tg3_start_xmit()
6618 * before checking for netif_queue_stopped(). Without the
6619 * memory barrier, there is a small possibility that tg3_start_xmit()
6620 * will miss it and cause the queue to be stopped forever.
6624 if (unlikely(netif_tx_queue_stopped(txq
) &&
6625 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6626 __netif_tx_lock(txq
, smp_processor_id());
6627 if (netif_tx_queue_stopped(txq
) &&
6628 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6629 netif_tx_wake_queue(txq
);
6630 __netif_tx_unlock(txq
);
6634 static void tg3_frag_free(bool is_frag
, void *data
)
6637 skb_free_frag(data
);
6642 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6644 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6645 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6650 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6651 map_sz
, PCI_DMA_FROMDEVICE
);
6652 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6657 /* Returns size of skb allocated or < 0 on error.
6659 * We only need to fill in the address because the other members
6660 * of the RX descriptor are invariant, see tg3_init_rings.
6662 * Note the purposeful assymetry of cpu vs. chip accesses. For
6663 * posting buffers we only dirty the first cache line of the RX
6664 * descriptor (containing the address). Whereas for the RX status
6665 * buffers the cpu only reads the last cacheline of the RX descriptor
6666 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6668 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6669 u32 opaque_key
, u32 dest_idx_unmasked
,
6670 unsigned int *frag_size
)
6672 struct tg3_rx_buffer_desc
*desc
;
6673 struct ring_info
*map
;
6676 int skb_size
, data_size
, dest_idx
;
6678 switch (opaque_key
) {
6679 case RXD_OPAQUE_RING_STD
:
6680 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6681 desc
= &tpr
->rx_std
[dest_idx
];
6682 map
= &tpr
->rx_std_buffers
[dest_idx
];
6683 data_size
= tp
->rx_pkt_map_sz
;
6686 case RXD_OPAQUE_RING_JUMBO
:
6687 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6688 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6689 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6690 data_size
= TG3_RX_JMB_MAP_SZ
;
6697 /* Do not overwrite any of the map or rp information
6698 * until we are sure we can commit to a new buffer.
6700 * Callers depend upon this behavior and assume that
6701 * we leave everything unchanged if we fail.
6703 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6704 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6705 if (skb_size
<= PAGE_SIZE
) {
6706 data
= netdev_alloc_frag(skb_size
);
6707 *frag_size
= skb_size
;
6709 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6715 mapping
= pci_map_single(tp
->pdev
,
6716 data
+ TG3_RX_OFFSET(tp
),
6718 PCI_DMA_FROMDEVICE
);
6719 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6720 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6725 dma_unmap_addr_set(map
, mapping
, mapping
);
6727 desc
->addr_hi
= ((u64
)mapping
>> 32);
6728 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6733 /* We only need to move over in the address because the other
6734 * members of the RX descriptor are invariant. See notes above
6735 * tg3_alloc_rx_data for full details.
6737 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6738 struct tg3_rx_prodring_set
*dpr
,
6739 u32 opaque_key
, int src_idx
,
6740 u32 dest_idx_unmasked
)
6742 struct tg3
*tp
= tnapi
->tp
;
6743 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6744 struct ring_info
*src_map
, *dest_map
;
6745 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6748 switch (opaque_key
) {
6749 case RXD_OPAQUE_RING_STD
:
6750 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6751 dest_desc
= &dpr
->rx_std
[dest_idx
];
6752 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6753 src_desc
= &spr
->rx_std
[src_idx
];
6754 src_map
= &spr
->rx_std_buffers
[src_idx
];
6757 case RXD_OPAQUE_RING_JUMBO
:
6758 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6759 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6760 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6761 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6762 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6769 dest_map
->data
= src_map
->data
;
6770 dma_unmap_addr_set(dest_map
, mapping
,
6771 dma_unmap_addr(src_map
, mapping
));
6772 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6773 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6775 /* Ensure that the update to the skb happens after the physical
6776 * addresses have been transferred to the new BD location.
6780 src_map
->data
= NULL
;
6783 /* The RX ring scheme is composed of multiple rings which post fresh
6784 * buffers to the chip, and one special ring the chip uses to report
6785 * status back to the host.
6787 * The special ring reports the status of received packets to the
6788 * host. The chip does not write into the original descriptor the
6789 * RX buffer was obtained from. The chip simply takes the original
6790 * descriptor as provided by the host, updates the status and length
6791 * field, then writes this into the next status ring entry.
6793 * Each ring the host uses to post buffers to the chip is described
6794 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6795 * it is first placed into the on-chip ram. When the packet's length
6796 * is known, it walks down the TG3_BDINFO entries to select the ring.
6797 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6798 * which is within the range of the new packet's length is chosen.
6800 * The "separate ring for rx status" scheme may sound queer, but it makes
6801 * sense from a cache coherency perspective. If only the host writes
6802 * to the buffer post rings, and only the chip writes to the rx status
6803 * rings, then cache lines never move beyond shared-modified state.
6804 * If both the host and chip were to write into the same ring, cache line
6805 * eviction could occur since both entities want it in an exclusive state.
6807 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6809 struct tg3
*tp
= tnapi
->tp
;
6810 u32 work_mask
, rx_std_posted
= 0;
6811 u32 std_prod_idx
, jmb_prod_idx
;
6812 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6815 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6817 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6819 * We need to order the read of hw_idx and the read of
6820 * the opaque cookie.
6825 std_prod_idx
= tpr
->rx_std_prod_idx
;
6826 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6827 while (sw_idx
!= hw_idx
&& budget
> 0) {
6828 struct ring_info
*ri
;
6829 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6831 struct sk_buff
*skb
;
6832 dma_addr_t dma_addr
;
6833 u32 opaque_key
, desc_idx
, *post_ptr
;
6837 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6838 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6839 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6840 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6841 dma_addr
= dma_unmap_addr(ri
, mapping
);
6843 post_ptr
= &std_prod_idx
;
6845 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6846 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6847 dma_addr
= dma_unmap_addr(ri
, mapping
);
6849 post_ptr
= &jmb_prod_idx
;
6851 goto next_pkt_nopost
;
6853 work_mask
|= opaque_key
;
6855 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6857 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6858 desc_idx
, *post_ptr
);
6860 /* Other statistics kept track of by card. */
6865 prefetch(data
+ TG3_RX_OFFSET(tp
));
6866 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6869 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6870 RXD_FLAG_PTPSTAT_PTPV1
||
6871 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6872 RXD_FLAG_PTPSTAT_PTPV2
) {
6873 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6874 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6877 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6879 unsigned int frag_size
;
6881 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6882 *post_ptr
, &frag_size
);
6886 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6887 PCI_DMA_FROMDEVICE
);
6889 /* Ensure that the update to the data happens
6890 * after the usage of the old DMA mapping.
6896 skb
= build_skb(data
, frag_size
);
6898 tg3_frag_free(frag_size
!= 0, data
);
6899 goto drop_it_no_recycle
;
6901 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6903 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6904 desc_idx
, *post_ptr
);
6906 skb
= netdev_alloc_skb(tp
->dev
,
6907 len
+ TG3_RAW_IP_ALIGN
);
6909 goto drop_it_no_recycle
;
6911 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6912 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6914 data
+ TG3_RX_OFFSET(tp
),
6916 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6921 tg3_hwclock_to_timestamp(tp
, tstamp
,
6922 skb_hwtstamps(skb
));
6924 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6925 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6926 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6927 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6928 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6930 skb_checksum_none_assert(skb
);
6932 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6934 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6935 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6936 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6937 dev_kfree_skb_any(skb
);
6938 goto drop_it_no_recycle
;
6941 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6942 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6943 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6944 desc
->err_vlan
& RXD_VLAN_MASK
);
6946 napi_gro_receive(&tnapi
->napi
, skb
);
6954 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6955 tpr
->rx_std_prod_idx
= std_prod_idx
&
6956 tp
->rx_std_ring_mask
;
6957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6958 tpr
->rx_std_prod_idx
);
6959 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6964 sw_idx
&= tp
->rx_ret_ring_mask
;
6966 /* Refresh hw_idx to see if there is new work */
6967 if (sw_idx
== hw_idx
) {
6968 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6973 /* ACK the status ring. */
6974 tnapi
->rx_rcb_ptr
= sw_idx
;
6975 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6977 /* Refill RX ring(s). */
6978 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6979 /* Sync BD data before updating mailbox */
6982 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6983 tpr
->rx_std_prod_idx
= std_prod_idx
&
6984 tp
->rx_std_ring_mask
;
6985 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6986 tpr
->rx_std_prod_idx
);
6988 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6989 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6990 tp
->rx_jmb_ring_mask
;
6991 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6992 tpr
->rx_jmb_prod_idx
);
6995 } else if (work_mask
) {
6996 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6997 * updated before the producer indices can be updated.
7001 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
7002 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
7004 if (tnapi
!= &tp
->napi
[1]) {
7005 tp
->rx_refill
= true;
7006 napi_schedule(&tp
->napi
[1].napi
);
7013 static void tg3_poll_link(struct tg3
*tp
)
7015 /* handle link change and other phy events */
7016 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7017 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7019 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7020 sblk
->status
= SD_STATUS_UPDATED
|
7021 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7022 spin_lock(&tp
->lock
);
7023 if (tg3_flag(tp
, USE_PHYLIB
)) {
7025 (MAC_STATUS_SYNC_CHANGED
|
7026 MAC_STATUS_CFG_CHANGED
|
7027 MAC_STATUS_MI_COMPLETION
|
7028 MAC_STATUS_LNKSTATE_CHANGED
));
7031 tg3_setup_phy(tp
, false);
7032 spin_unlock(&tp
->lock
);
7037 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7038 struct tg3_rx_prodring_set
*dpr
,
7039 struct tg3_rx_prodring_set
*spr
)
7041 u32 si
, di
, cpycnt
, src_prod_idx
;
7045 src_prod_idx
= spr
->rx_std_prod_idx
;
7047 /* Make sure updates to the rx_std_buffers[] entries and the
7048 * standard producer index are seen in the correct order.
7052 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7055 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7056 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7058 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7059 spr
->rx_std_cons_idx
;
7061 cpycnt
= min(cpycnt
,
7062 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7064 si
= spr
->rx_std_cons_idx
;
7065 di
= dpr
->rx_std_prod_idx
;
7067 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7068 if (dpr
->rx_std_buffers
[i
].data
) {
7078 /* Ensure that updates to the rx_std_buffers ring and the
7079 * shadowed hardware producer ring from tg3_recycle_skb() are
7080 * ordered correctly WRT the skb check above.
7084 memcpy(&dpr
->rx_std_buffers
[di
],
7085 &spr
->rx_std_buffers
[si
],
7086 cpycnt
* sizeof(struct ring_info
));
7088 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7089 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7090 sbd
= &spr
->rx_std
[si
];
7091 dbd
= &dpr
->rx_std
[di
];
7092 dbd
->addr_hi
= sbd
->addr_hi
;
7093 dbd
->addr_lo
= sbd
->addr_lo
;
7096 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7097 tp
->rx_std_ring_mask
;
7098 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7099 tp
->rx_std_ring_mask
;
7103 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7105 /* Make sure updates to the rx_jmb_buffers[] entries and
7106 * the jumbo producer index are seen in the correct order.
7110 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7113 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7114 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7116 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7117 spr
->rx_jmb_cons_idx
;
7119 cpycnt
= min(cpycnt
,
7120 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7122 si
= spr
->rx_jmb_cons_idx
;
7123 di
= dpr
->rx_jmb_prod_idx
;
7125 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7126 if (dpr
->rx_jmb_buffers
[i
].data
) {
7136 /* Ensure that updates to the rx_jmb_buffers ring and the
7137 * shadowed hardware producer ring from tg3_recycle_skb() are
7138 * ordered correctly WRT the skb check above.
7142 memcpy(&dpr
->rx_jmb_buffers
[di
],
7143 &spr
->rx_jmb_buffers
[si
],
7144 cpycnt
* sizeof(struct ring_info
));
7146 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7147 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7148 sbd
= &spr
->rx_jmb
[si
].std
;
7149 dbd
= &dpr
->rx_jmb
[di
].std
;
7150 dbd
->addr_hi
= sbd
->addr_hi
;
7151 dbd
->addr_lo
= sbd
->addr_lo
;
7154 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7155 tp
->rx_jmb_ring_mask
;
7156 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7157 tp
->rx_jmb_ring_mask
;
7163 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7165 struct tg3
*tp
= tnapi
->tp
;
7167 /* run TX completion thread */
7168 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7170 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7174 if (!tnapi
->rx_rcb_prod_idx
)
7177 /* run RX thread, within the bounds set by NAPI.
7178 * All RX "locking" is done by ensuring outside
7179 * code synchronizes with tg3->napi.poll()
7181 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7182 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7184 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7185 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7187 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7188 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7190 tp
->rx_refill
= false;
7191 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7192 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7193 &tp
->napi
[i
].prodring
);
7197 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7198 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7199 dpr
->rx_std_prod_idx
);
7201 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7202 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7203 dpr
->rx_jmb_prod_idx
);
7208 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7214 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7216 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7217 schedule_work(&tp
->reset_task
);
7220 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7222 cancel_work_sync(&tp
->reset_task
);
7223 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7224 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7227 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7229 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7230 struct tg3
*tp
= tnapi
->tp
;
7232 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7235 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7237 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7240 if (unlikely(work_done
>= budget
))
7243 /* tp->last_tag is used in tg3_int_reenable() below
7244 * to tell the hw how much work has been processed,
7245 * so we must read it before checking for more work.
7247 tnapi
->last_tag
= sblk
->status_tag
;
7248 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7251 /* check for RX/TX work to do */
7252 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7253 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7255 /* This test here is not race free, but will reduce
7256 * the number of interrupts by looping again.
7258 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7261 napi_complete_done(napi
, work_done
);
7262 /* Reenable interrupts. */
7263 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7265 /* This test here is synchronized by napi_schedule()
7266 * and napi_complete() to close the race condition.
7268 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7269 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7270 HOSTCC_MODE_ENABLE
|
7278 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7282 /* work_done is guaranteed to be less than budget. */
7283 napi_complete(napi
);
7284 tg3_reset_task_schedule(tp
);
7288 static void tg3_process_error(struct tg3
*tp
)
7291 bool real_error
= false;
7293 if (tg3_flag(tp
, ERROR_PROCESSED
))
7296 /* Check Flow Attention register */
7297 val
= tr32(HOSTCC_FLOW_ATTN
);
7298 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7299 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7303 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7304 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7308 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7309 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7318 tg3_flag_set(tp
, ERROR_PROCESSED
);
7319 tg3_reset_task_schedule(tp
);
7322 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7324 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7325 struct tg3
*tp
= tnapi
->tp
;
7327 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7330 if (sblk
->status
& SD_STATUS_ERROR
)
7331 tg3_process_error(tp
);
7335 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7337 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7340 if (unlikely(work_done
>= budget
))
7343 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7344 /* tp->last_tag is used in tg3_int_reenable() below
7345 * to tell the hw how much work has been processed,
7346 * so we must read it before checking for more work.
7348 tnapi
->last_tag
= sblk
->status_tag
;
7349 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7352 sblk
->status
&= ~SD_STATUS_UPDATED
;
7354 if (likely(!tg3_has_work(tnapi
))) {
7355 napi_complete_done(napi
, work_done
);
7356 tg3_int_reenable(tnapi
);
7361 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7365 /* work_done is guaranteed to be less than budget. */
7366 napi_complete(napi
);
7367 tg3_reset_task_schedule(tp
);
7371 static void tg3_napi_disable(struct tg3
*tp
)
7375 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7376 napi_disable(&tp
->napi
[i
].napi
);
7379 static void tg3_napi_enable(struct tg3
*tp
)
7383 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7384 napi_enable(&tp
->napi
[i
].napi
);
7387 static void tg3_napi_init(struct tg3
*tp
)
7391 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7392 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7393 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7396 static void tg3_napi_fini(struct tg3
*tp
)
7400 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7401 netif_napi_del(&tp
->napi
[i
].napi
);
7404 static inline void tg3_netif_stop(struct tg3
*tp
)
7406 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7407 tg3_napi_disable(tp
);
7408 netif_carrier_off(tp
->dev
);
7409 netif_tx_disable(tp
->dev
);
7412 /* tp->lock must be held */
7413 static inline void tg3_netif_start(struct tg3
*tp
)
7417 /* NOTE: unconditional netif_tx_wake_all_queues is only
7418 * appropriate so long as all callers are assured to
7419 * have free tx slots (such as after tg3_init_hw)
7421 netif_tx_wake_all_queues(tp
->dev
);
7424 netif_carrier_on(tp
->dev
);
7426 tg3_napi_enable(tp
);
7427 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7428 tg3_enable_ints(tp
);
7431 static void tg3_irq_quiesce(struct tg3
*tp
)
7432 __releases(tp
->lock
)
7433 __acquires(tp
->lock
)
7437 BUG_ON(tp
->irq_sync
);
7442 spin_unlock_bh(&tp
->lock
);
7444 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7445 synchronize_irq(tp
->napi
[i
].irq_vec
);
7447 spin_lock_bh(&tp
->lock
);
7450 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7451 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7452 * with as well. Most of the time, this is not necessary except when
7453 * shutting down the device.
7455 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7457 spin_lock_bh(&tp
->lock
);
7459 tg3_irq_quiesce(tp
);
7462 static inline void tg3_full_unlock(struct tg3
*tp
)
7464 spin_unlock_bh(&tp
->lock
);
7467 /* One-shot MSI handler - Chip automatically disables interrupt
7468 * after sending MSI so driver doesn't have to do it.
7470 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7472 struct tg3_napi
*tnapi
= dev_id
;
7473 struct tg3
*tp
= tnapi
->tp
;
7475 prefetch(tnapi
->hw_status
);
7477 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7479 if (likely(!tg3_irq_sync(tp
)))
7480 napi_schedule(&tnapi
->napi
);
7485 /* MSI ISR - No need to check for interrupt sharing and no need to
7486 * flush status block and interrupt mailbox. PCI ordering rules
7487 * guarantee that MSI will arrive after the status block.
7489 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7491 struct tg3_napi
*tnapi
= dev_id
;
7492 struct tg3
*tp
= tnapi
->tp
;
7494 prefetch(tnapi
->hw_status
);
7496 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7498 * Writing any value to intr-mbox-0 clears PCI INTA# and
7499 * chip-internal interrupt pending events.
7500 * Writing non-zero to intr-mbox-0 additional tells the
7501 * NIC to stop sending us irqs, engaging "in-intr-handler"
7504 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7505 if (likely(!tg3_irq_sync(tp
)))
7506 napi_schedule(&tnapi
->napi
);
7508 return IRQ_RETVAL(1);
7511 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7513 struct tg3_napi
*tnapi
= dev_id
;
7514 struct tg3
*tp
= tnapi
->tp
;
7515 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7516 unsigned int handled
= 1;
7518 /* In INTx mode, it is possible for the interrupt to arrive at
7519 * the CPU before the status block posted prior to the interrupt.
7520 * Reading the PCI State register will confirm whether the
7521 * interrupt is ours and will flush the status block.
7523 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7524 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7525 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7532 * Writing any value to intr-mbox-0 clears PCI INTA# and
7533 * chip-internal interrupt pending events.
7534 * Writing non-zero to intr-mbox-0 additional tells the
7535 * NIC to stop sending us irqs, engaging "in-intr-handler"
7538 * Flush the mailbox to de-assert the IRQ immediately to prevent
7539 * spurious interrupts. The flush impacts performance but
7540 * excessive spurious interrupts can be worse in some cases.
7542 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7543 if (tg3_irq_sync(tp
))
7545 sblk
->status
&= ~SD_STATUS_UPDATED
;
7546 if (likely(tg3_has_work(tnapi
))) {
7547 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7548 napi_schedule(&tnapi
->napi
);
7550 /* No work, shared interrupt perhaps? re-enable
7551 * interrupts, and flush that PCI write
7553 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7557 return IRQ_RETVAL(handled
);
7560 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7562 struct tg3_napi
*tnapi
= dev_id
;
7563 struct tg3
*tp
= tnapi
->tp
;
7564 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7565 unsigned int handled
= 1;
7567 /* In INTx mode, it is possible for the interrupt to arrive at
7568 * the CPU before the status block posted prior to the interrupt.
7569 * Reading the PCI State register will confirm whether the
7570 * interrupt is ours and will flush the status block.
7572 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7573 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7574 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7581 * writing any value to intr-mbox-0 clears PCI INTA# and
7582 * chip-internal interrupt pending events.
7583 * writing non-zero to intr-mbox-0 additional tells the
7584 * NIC to stop sending us irqs, engaging "in-intr-handler"
7587 * Flush the mailbox to de-assert the IRQ immediately to prevent
7588 * spurious interrupts. The flush impacts performance but
7589 * excessive spurious interrupts can be worse in some cases.
7591 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7594 * In a shared interrupt configuration, sometimes other devices'
7595 * interrupts will scream. We record the current status tag here
7596 * so that the above check can report that the screaming interrupts
7597 * are unhandled. Eventually they will be silenced.
7599 tnapi
->last_irq_tag
= sblk
->status_tag
;
7601 if (tg3_irq_sync(tp
))
7604 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7606 napi_schedule(&tnapi
->napi
);
7609 return IRQ_RETVAL(handled
);
7612 /* ISR for interrupt test */
7613 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7615 struct tg3_napi
*tnapi
= dev_id
;
7616 struct tg3
*tp
= tnapi
->tp
;
7617 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7619 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7620 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7621 tg3_disable_ints(tp
);
7622 return IRQ_RETVAL(1);
7624 return IRQ_RETVAL(0);
7627 #ifdef CONFIG_NET_POLL_CONTROLLER
7628 static void tg3_poll_controller(struct net_device
*dev
)
7631 struct tg3
*tp
= netdev_priv(dev
);
7633 if (tg3_irq_sync(tp
))
7636 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7637 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7641 static void tg3_tx_timeout(struct net_device
*dev
)
7643 struct tg3
*tp
= netdev_priv(dev
);
7645 if (netif_msg_tx_err(tp
)) {
7646 netdev_err(dev
, "transmit timed out, resetting\n");
7650 tg3_reset_task_schedule(tp
);
7653 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7654 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7656 u32 base
= (u32
) mapping
& 0xffffffff;
7658 return base
+ len
+ 8 < base
;
7661 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7662 * of any 4GB boundaries: 4G, 8G, etc
7664 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7667 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7668 u32 base
= (u32
) mapping
& 0xffffffff;
7670 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7675 /* Test for DMA addresses > 40-bit */
7676 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7679 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7680 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7681 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7688 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7689 dma_addr_t mapping
, u32 len
, u32 flags
,
7692 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7693 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7694 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7695 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7698 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7699 dma_addr_t map
, u32 len
, u32 flags
,
7702 struct tg3
*tp
= tnapi
->tp
;
7705 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7708 if (tg3_4g_overflow_test(map
, len
))
7711 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7714 if (tg3_40bit_overflow_test(tp
, map
, len
))
7717 if (tp
->dma_limit
) {
7718 u32 prvidx
= *entry
;
7719 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7720 while (len
> tp
->dma_limit
&& *budget
) {
7721 u32 frag_len
= tp
->dma_limit
;
7722 len
-= tp
->dma_limit
;
7724 /* Avoid the 8byte DMA problem */
7726 len
+= tp
->dma_limit
/ 2;
7727 frag_len
= tp
->dma_limit
/ 2;
7730 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7732 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7733 frag_len
, tmp_flag
, mss
, vlan
);
7736 *entry
= NEXT_TX(*entry
);
7743 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7744 len
, flags
, mss
, vlan
);
7746 *entry
= NEXT_TX(*entry
);
7749 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7753 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7754 len
, flags
, mss
, vlan
);
7755 *entry
= NEXT_TX(*entry
);
7761 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7764 struct sk_buff
*skb
;
7765 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7770 pci_unmap_single(tnapi
->tp
->pdev
,
7771 dma_unmap_addr(txb
, mapping
),
7775 while (txb
->fragmented
) {
7776 txb
->fragmented
= false;
7777 entry
= NEXT_TX(entry
);
7778 txb
= &tnapi
->tx_buffers
[entry
];
7781 for (i
= 0; i
<= last
; i
++) {
7782 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7784 entry
= NEXT_TX(entry
);
7785 txb
= &tnapi
->tx_buffers
[entry
];
7787 pci_unmap_page(tnapi
->tp
->pdev
,
7788 dma_unmap_addr(txb
, mapping
),
7789 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7791 while (txb
->fragmented
) {
7792 txb
->fragmented
= false;
7793 entry
= NEXT_TX(entry
);
7794 txb
= &tnapi
->tx_buffers
[entry
];
7799 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7800 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7801 struct sk_buff
**pskb
,
7802 u32
*entry
, u32
*budget
,
7803 u32 base_flags
, u32 mss
, u32 vlan
)
7805 struct tg3
*tp
= tnapi
->tp
;
7806 struct sk_buff
*new_skb
, *skb
= *pskb
;
7807 dma_addr_t new_addr
= 0;
7810 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7811 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7813 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7815 new_skb
= skb_copy_expand(skb
,
7816 skb_headroom(skb
) + more_headroom
,
7817 skb_tailroom(skb
), GFP_ATOMIC
);
7823 /* New SKB is guaranteed to be linear. */
7824 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7826 /* Make sure the mapping succeeded */
7827 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7828 dev_kfree_skb_any(new_skb
);
7831 u32 save_entry
= *entry
;
7833 base_flags
|= TXD_FLAG_END
;
7835 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7836 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7839 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7840 new_skb
->len
, base_flags
,
7842 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7843 dev_kfree_skb_any(new_skb
);
7849 dev_consume_skb_any(skb
);
7854 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7856 /* Check if we will never have enough descriptors,
7857 * as gso_segs can be more than current ring size
7859 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7862 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7864 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7865 * indicated in tg3_tx_frag_set()
7867 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7868 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7870 struct sk_buff
*segs
, *nskb
;
7871 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7873 /* Estimate the number of fragments in the worst case */
7874 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7875 netif_tx_stop_queue(txq
);
7877 /* netif_tx_stop_queue() must be done before checking
7878 * checking tx index in tg3_tx_avail() below, because in
7879 * tg3_tx(), we update tx index before checking for
7880 * netif_tx_queue_stopped().
7883 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7884 return NETDEV_TX_BUSY
;
7886 netif_tx_wake_queue(txq
);
7889 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7890 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7891 if (IS_ERR(segs
) || !segs
)
7892 goto tg3_tso_bug_end
;
7898 tg3_start_xmit(nskb
, tp
->dev
);
7902 dev_consume_skb_any(skb
);
7904 return NETDEV_TX_OK
;
7907 /* hard_start_xmit for all devices */
7908 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7910 struct tg3
*tp
= netdev_priv(dev
);
7911 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7913 int i
= -1, would_hit_hwbug
;
7915 struct tg3_napi
*tnapi
;
7916 struct netdev_queue
*txq
;
7918 struct iphdr
*iph
= NULL
;
7919 struct tcphdr
*tcph
= NULL
;
7920 __sum16 tcp_csum
= 0, ip_csum
= 0;
7921 __be16 ip_tot_len
= 0;
7923 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7924 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7925 if (tg3_flag(tp
, ENABLE_TSS
))
7928 budget
= tg3_tx_avail(tnapi
);
7930 /* We are running in BH disabled context with netif_tx_lock
7931 * and TX reclaim runs via tp->napi.poll inside of a software
7932 * interrupt. Furthermore, IRQ processing runs lockless so we have
7933 * no IRQ context deadlocks to worry about either. Rejoice!
7935 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7936 if (!netif_tx_queue_stopped(txq
)) {
7937 netif_tx_stop_queue(txq
);
7939 /* This is a hard error, log it. */
7941 "BUG! Tx Ring full when queue awake!\n");
7943 return NETDEV_TX_BUSY
;
7946 entry
= tnapi
->tx_prod
;
7949 mss
= skb_shinfo(skb
)->gso_size
;
7951 u32 tcp_opt_len
, hdr_len
;
7953 if (skb_cow_head(skb
, 0))
7957 tcp_opt_len
= tcp_optlen(skb
);
7959 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7961 /* HW/FW can not correctly segment packets that have been
7962 * vlan encapsulated.
7964 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7965 skb
->protocol
== htons(ETH_P_8021AD
)) {
7966 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7967 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7971 if (!skb_is_gso_v6(skb
)) {
7972 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7973 tg3_flag(tp
, TSO_BUG
)) {
7974 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7975 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7978 ip_csum
= iph
->check
;
7979 ip_tot_len
= iph
->tot_len
;
7981 iph
->tot_len
= htons(mss
+ hdr_len
);
7984 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7985 TXD_FLAG_CPU_POST_DMA
);
7987 tcph
= tcp_hdr(skb
);
7988 tcp_csum
= tcph
->check
;
7990 if (tg3_flag(tp
, HW_TSO_1
) ||
7991 tg3_flag(tp
, HW_TSO_2
) ||
7992 tg3_flag(tp
, HW_TSO_3
)) {
7994 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7996 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
8000 if (tg3_flag(tp
, HW_TSO_3
)) {
8001 mss
|= (hdr_len
& 0xc) << 12;
8003 base_flags
|= 0x00000010;
8004 base_flags
|= (hdr_len
& 0x3e0) << 5;
8005 } else if (tg3_flag(tp
, HW_TSO_2
))
8006 mss
|= hdr_len
<< 9;
8007 else if (tg3_flag(tp
, HW_TSO_1
) ||
8008 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8009 if (tcp_opt_len
|| iph
->ihl
> 5) {
8012 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8013 mss
|= (tsflags
<< 11);
8016 if (tcp_opt_len
|| iph
->ihl
> 5) {
8019 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8020 base_flags
|= tsflags
<< 12;
8023 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8024 /* HW/FW can not correctly checksum packets that have been
8025 * vlan encapsulated.
8027 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8028 skb
->protocol
== htons(ETH_P_8021AD
)) {
8029 if (skb_checksum_help(skb
))
8032 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8036 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8037 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8038 base_flags
|= TXD_FLAG_JMB_PKT
;
8040 if (skb_vlan_tag_present(skb
)) {
8041 base_flags
|= TXD_FLAG_VLAN
;
8042 vlan
= skb_vlan_tag_get(skb
);
8045 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8046 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8047 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8048 base_flags
|= TXD_FLAG_HWTSTAMP
;
8051 len
= skb_headlen(skb
);
8053 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8054 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8058 tnapi
->tx_buffers
[entry
].skb
= skb
;
8059 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8061 would_hit_hwbug
= 0;
8063 if (tg3_flag(tp
, 5701_DMA_BUG
))
8064 would_hit_hwbug
= 1;
8066 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8067 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8069 would_hit_hwbug
= 1;
8070 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8073 if (!tg3_flag(tp
, HW_TSO_1
) &&
8074 !tg3_flag(tp
, HW_TSO_2
) &&
8075 !tg3_flag(tp
, HW_TSO_3
))
8078 /* Now loop through additional data
8079 * fragments, and queue them.
8081 last
= skb_shinfo(skb
)->nr_frags
- 1;
8082 for (i
= 0; i
<= last
; i
++) {
8083 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8085 len
= skb_frag_size(frag
);
8086 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8087 len
, DMA_TO_DEVICE
);
8089 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8090 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8092 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8096 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8098 ((i
== last
) ? TXD_FLAG_END
: 0),
8100 would_hit_hwbug
= 1;
8106 if (would_hit_hwbug
) {
8107 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8109 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8110 /* If it's a TSO packet, do GSO instead of
8111 * allocating and copying to a large linear SKB
8114 iph
->check
= ip_csum
;
8115 iph
->tot_len
= ip_tot_len
;
8117 tcph
->check
= tcp_csum
;
8118 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8121 /* If the workaround fails due to memory/mapping
8122 * failure, silently drop this packet.
8124 entry
= tnapi
->tx_prod
;
8125 budget
= tg3_tx_avail(tnapi
);
8126 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8127 base_flags
, mss
, vlan
))
8131 skb_tx_timestamp(skb
);
8132 netdev_tx_sent_queue(txq
, skb
->len
);
8134 /* Sync BD data before updating mailbox */
8137 tnapi
->tx_prod
= entry
;
8138 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8139 netif_tx_stop_queue(txq
);
8141 /* netif_tx_stop_queue() must be done before checking
8142 * checking tx index in tg3_tx_avail() below, because in
8143 * tg3_tx(), we update tx index before checking for
8144 * netif_tx_queue_stopped().
8147 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8148 netif_tx_wake_queue(txq
);
8151 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
)) {
8152 /* Packets are ready, update Tx producer idx on card. */
8153 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8157 return NETDEV_TX_OK
;
8160 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8161 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8163 dev_kfree_skb_any(skb
);
8166 return NETDEV_TX_OK
;
8169 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8172 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8173 MAC_MODE_PORT_MODE_MASK
);
8175 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8177 if (!tg3_flag(tp
, 5705_PLUS
))
8178 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8180 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8181 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8183 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8185 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8187 if (tg3_flag(tp
, 5705_PLUS
) ||
8188 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8189 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8190 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8193 tw32(MAC_MODE
, tp
->mac_mode
);
8197 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8199 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8201 tg3_phy_toggle_apd(tp
, false);
8202 tg3_phy_toggle_automdix(tp
, false);
8204 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8207 bmcr
= BMCR_FULLDPLX
;
8212 bmcr
|= BMCR_SPEED100
;
8216 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8218 bmcr
|= BMCR_SPEED100
;
8221 bmcr
|= BMCR_SPEED1000
;
8226 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8227 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8228 val
|= CTL1000_AS_MASTER
|
8229 CTL1000_ENABLE_MASTER
;
8230 tg3_writephy(tp
, MII_CTRL1000
, val
);
8232 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8233 MII_TG3_FET_PTEST_TRIM_2
;
8234 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8237 bmcr
|= BMCR_LOOPBACK
;
8239 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8241 /* The write needs to be flushed for the FETs */
8242 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8243 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8247 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8248 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8249 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8250 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8251 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8253 /* The write needs to be flushed for the AC131 */
8254 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8257 /* Reset to prevent losing 1st rx packet intermittently */
8258 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8259 tg3_flag(tp
, 5780_CLASS
)) {
8260 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8262 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8265 mac_mode
= tp
->mac_mode
&
8266 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8267 if (speed
== SPEED_1000
)
8268 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8270 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8272 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8273 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8275 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8276 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8277 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8278 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8280 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8281 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8284 tw32(MAC_MODE
, mac_mode
);
8290 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8292 struct tg3
*tp
= netdev_priv(dev
);
8294 if (features
& NETIF_F_LOOPBACK
) {
8295 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8298 spin_lock_bh(&tp
->lock
);
8299 tg3_mac_loopback(tp
, true);
8300 netif_carrier_on(tp
->dev
);
8301 spin_unlock_bh(&tp
->lock
);
8302 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8304 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8307 spin_lock_bh(&tp
->lock
);
8308 tg3_mac_loopback(tp
, false);
8309 /* Force link status check */
8310 tg3_setup_phy(tp
, true);
8311 spin_unlock_bh(&tp
->lock
);
8312 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8316 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8317 netdev_features_t features
)
8319 struct tg3
*tp
= netdev_priv(dev
);
8321 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8322 features
&= ~NETIF_F_ALL_TSO
;
8327 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8329 netdev_features_t changed
= dev
->features
^ features
;
8331 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8332 tg3_set_loopback(dev
, features
);
8337 static void tg3_rx_prodring_free(struct tg3
*tp
,
8338 struct tg3_rx_prodring_set
*tpr
)
8342 if (tpr
!= &tp
->napi
[0].prodring
) {
8343 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8344 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8345 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8348 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8349 for (i
= tpr
->rx_jmb_cons_idx
;
8350 i
!= tpr
->rx_jmb_prod_idx
;
8351 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8352 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8360 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8361 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8364 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8365 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8366 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8371 /* Initialize rx rings for packet processing.
8373 * The chip has been shut down and the driver detached from
8374 * the networking, so no interrupts or new tx packets will
8375 * end up in the driver. tp->{tx,}lock are held and thus
8378 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8379 struct tg3_rx_prodring_set
*tpr
)
8381 u32 i
, rx_pkt_dma_sz
;
8383 tpr
->rx_std_cons_idx
= 0;
8384 tpr
->rx_std_prod_idx
= 0;
8385 tpr
->rx_jmb_cons_idx
= 0;
8386 tpr
->rx_jmb_prod_idx
= 0;
8388 if (tpr
!= &tp
->napi
[0].prodring
) {
8389 memset(&tpr
->rx_std_buffers
[0], 0,
8390 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8391 if (tpr
->rx_jmb_buffers
)
8392 memset(&tpr
->rx_jmb_buffers
[0], 0,
8393 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8397 /* Zero out all descriptors. */
8398 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8400 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8401 if (tg3_flag(tp
, 5780_CLASS
) &&
8402 tp
->dev
->mtu
> ETH_DATA_LEN
)
8403 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8404 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8406 /* Initialize invariants of the rings, we only set this
8407 * stuff once. This works because the card does not
8408 * write into the rx buffer posting rings.
8410 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8411 struct tg3_rx_buffer_desc
*rxd
;
8413 rxd
= &tpr
->rx_std
[i
];
8414 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8415 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8416 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8417 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8420 /* Now allocate fresh SKBs for each rx ring. */
8421 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8422 unsigned int frag_size
;
8424 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8426 netdev_warn(tp
->dev
,
8427 "Using a smaller RX standard ring. Only "
8428 "%d out of %d buffers were allocated "
8429 "successfully\n", i
, tp
->rx_pending
);
8437 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8440 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8442 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8445 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8446 struct tg3_rx_buffer_desc
*rxd
;
8448 rxd
= &tpr
->rx_jmb
[i
].std
;
8449 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8450 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8452 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8453 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8456 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8457 unsigned int frag_size
;
8459 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8461 netdev_warn(tp
->dev
,
8462 "Using a smaller RX jumbo ring. Only %d "
8463 "out of %d buffers were allocated "
8464 "successfully\n", i
, tp
->rx_jumbo_pending
);
8467 tp
->rx_jumbo_pending
= i
;
8476 tg3_rx_prodring_free(tp
, tpr
);
8480 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8481 struct tg3_rx_prodring_set
*tpr
)
8483 kfree(tpr
->rx_std_buffers
);
8484 tpr
->rx_std_buffers
= NULL
;
8485 kfree(tpr
->rx_jmb_buffers
);
8486 tpr
->rx_jmb_buffers
= NULL
;
8488 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8489 tpr
->rx_std
, tpr
->rx_std_mapping
);
8493 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8494 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8499 static int tg3_rx_prodring_init(struct tg3
*tp
,
8500 struct tg3_rx_prodring_set
*tpr
)
8502 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8504 if (!tpr
->rx_std_buffers
)
8507 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8508 TG3_RX_STD_RING_BYTES(tp
),
8509 &tpr
->rx_std_mapping
,
8514 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8515 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8517 if (!tpr
->rx_jmb_buffers
)
8520 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8521 TG3_RX_JMB_RING_BYTES(tp
),
8522 &tpr
->rx_jmb_mapping
,
8531 tg3_rx_prodring_fini(tp
, tpr
);
8535 /* Free up pending packets in all rx/tx rings.
8537 * The chip has been shut down and the driver detached from
8538 * the networking, so no interrupts or new tx packets will
8539 * end up in the driver. tp->{tx,}lock is not held and we are not
8540 * in an interrupt context and thus may sleep.
8542 static void tg3_free_rings(struct tg3
*tp
)
8546 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8547 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8549 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8551 if (!tnapi
->tx_buffers
)
8554 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8555 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8560 tg3_tx_skb_unmap(tnapi
, i
,
8561 skb_shinfo(skb
)->nr_frags
- 1);
8563 dev_consume_skb_any(skb
);
8565 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8569 /* Initialize tx/rx rings for packet processing.
8571 * The chip has been shut down and the driver detached from
8572 * the networking, so no interrupts or new tx packets will
8573 * end up in the driver. tp->{tx,}lock are held and thus
8576 static int tg3_init_rings(struct tg3
*tp
)
8580 /* Free up all the SKBs. */
8583 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8584 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8586 tnapi
->last_tag
= 0;
8587 tnapi
->last_irq_tag
= 0;
8588 tnapi
->hw_status
->status
= 0;
8589 tnapi
->hw_status
->status_tag
= 0;
8590 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8595 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8597 tnapi
->rx_rcb_ptr
= 0;
8599 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8601 if (tnapi
->prodring
.rx_std
&&
8602 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8611 static void tg3_mem_tx_release(struct tg3
*tp
)
8615 for (i
= 0; i
< tp
->irq_max
; i
++) {
8616 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8618 if (tnapi
->tx_ring
) {
8619 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8620 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8621 tnapi
->tx_ring
= NULL
;
8624 kfree(tnapi
->tx_buffers
);
8625 tnapi
->tx_buffers
= NULL
;
8629 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8632 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8634 /* If multivector TSS is enabled, vector 0 does not handle
8635 * tx interrupts. Don't allocate any resources for it.
8637 if (tg3_flag(tp
, ENABLE_TSS
))
8640 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8641 tnapi
->tx_buffers
= kcalloc(TG3_TX_RING_SIZE
,
8642 sizeof(struct tg3_tx_ring_info
),
8644 if (!tnapi
->tx_buffers
)
8647 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8649 &tnapi
->tx_desc_mapping
,
8651 if (!tnapi
->tx_ring
)
8658 tg3_mem_tx_release(tp
);
8662 static void tg3_mem_rx_release(struct tg3
*tp
)
8666 for (i
= 0; i
< tp
->irq_max
; i
++) {
8667 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8669 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8674 dma_free_coherent(&tp
->pdev
->dev
,
8675 TG3_RX_RCB_RING_BYTES(tp
),
8677 tnapi
->rx_rcb_mapping
);
8678 tnapi
->rx_rcb
= NULL
;
8682 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8684 unsigned int i
, limit
;
8686 limit
= tp
->rxq_cnt
;
8688 /* If RSS is enabled, we need a (dummy) producer ring
8689 * set on vector zero. This is the true hw prodring.
8691 if (tg3_flag(tp
, ENABLE_RSS
))
8694 for (i
= 0; i
< limit
; i
++) {
8695 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8697 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8700 /* If multivector RSS is enabled, vector 0
8701 * does not handle rx or tx interrupts.
8702 * Don't allocate any resources for it.
8704 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8707 tnapi
->rx_rcb
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8708 TG3_RX_RCB_RING_BYTES(tp
),
8709 &tnapi
->rx_rcb_mapping
,
8718 tg3_mem_rx_release(tp
);
8723 * Must not be invoked with interrupt sources disabled and
8724 * the hardware shutdown down.
8726 static void tg3_free_consistent(struct tg3
*tp
)
8730 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8731 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8733 if (tnapi
->hw_status
) {
8734 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8736 tnapi
->status_mapping
);
8737 tnapi
->hw_status
= NULL
;
8741 tg3_mem_rx_release(tp
);
8742 tg3_mem_tx_release(tp
);
8744 /* tp->hw_stats can be referenced safely:
8745 * 1. under rtnl_lock
8746 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8749 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8750 tp
->hw_stats
, tp
->stats_mapping
);
8751 tp
->hw_stats
= NULL
;
8756 * Must not be invoked with interrupt sources disabled and
8757 * the hardware shutdown down. Can sleep.
8759 static int tg3_alloc_consistent(struct tg3
*tp
)
8763 tp
->hw_stats
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8764 sizeof(struct tg3_hw_stats
),
8765 &tp
->stats_mapping
, GFP_KERNEL
);
8769 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8770 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8771 struct tg3_hw_status
*sblk
;
8773 tnapi
->hw_status
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8775 &tnapi
->status_mapping
,
8777 if (!tnapi
->hw_status
)
8780 sblk
= tnapi
->hw_status
;
8782 if (tg3_flag(tp
, ENABLE_RSS
)) {
8783 u16
*prodptr
= NULL
;
8786 * When RSS is enabled, the status block format changes
8787 * slightly. The "rx_jumbo_consumer", "reserved",
8788 * and "rx_mini_consumer" members get mapped to the
8789 * other three rx return ring producer indexes.
8793 prodptr
= &sblk
->idx
[0].rx_producer
;
8796 prodptr
= &sblk
->rx_jumbo_consumer
;
8799 prodptr
= &sblk
->reserved
;
8802 prodptr
= &sblk
->rx_mini_consumer
;
8805 tnapi
->rx_rcb_prod_idx
= prodptr
;
8807 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8811 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8817 tg3_free_consistent(tp
);
8821 #define MAX_WAIT_CNT 1000
8823 /* To stop a block, clear the enable bit and poll till it
8824 * clears. tp->lock is held.
8826 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8831 if (tg3_flag(tp
, 5705_PLUS
)) {
8838 /* We can't enable/disable these bits of the
8839 * 5705/5750, just say success.
8852 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8853 if (pci_channel_offline(tp
->pdev
)) {
8854 dev_err(&tp
->pdev
->dev
,
8855 "tg3_stop_block device offline, "
8856 "ofs=%lx enable_bit=%x\n",
8863 if ((val
& enable_bit
) == 0)
8867 if (i
== MAX_WAIT_CNT
&& !silent
) {
8868 dev_err(&tp
->pdev
->dev
,
8869 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8877 /* tp->lock is held. */
8878 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8882 tg3_disable_ints(tp
);
8884 if (pci_channel_offline(tp
->pdev
)) {
8885 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8886 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8891 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8892 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8895 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8896 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8897 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8898 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8899 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8900 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8902 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8903 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8904 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8905 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8906 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8907 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8908 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8910 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8911 tw32_f(MAC_MODE
, tp
->mac_mode
);
8914 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8915 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8917 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8919 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8922 if (i
>= MAX_WAIT_CNT
) {
8923 dev_err(&tp
->pdev
->dev
,
8924 "%s timed out, TX_MODE_ENABLE will not clear "
8925 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8929 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8930 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8931 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8933 tw32(FTQ_RESET
, 0xffffffff);
8934 tw32(FTQ_RESET
, 0x00000000);
8936 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8937 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8940 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8941 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8942 if (tnapi
->hw_status
)
8943 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8949 /* Save PCI command register before chip reset */
8950 static void tg3_save_pci_state(struct tg3
*tp
)
8952 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8955 /* Restore PCI state after chip reset */
8956 static void tg3_restore_pci_state(struct tg3
*tp
)
8960 /* Re-enable indirect register accesses. */
8961 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8962 tp
->misc_host_ctrl
);
8964 /* Set MAX PCI retry to zero. */
8965 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8966 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8967 tg3_flag(tp
, PCIX_MODE
))
8968 val
|= PCISTATE_RETRY_SAME_DMA
;
8969 /* Allow reads and writes to the APE register and memory space. */
8970 if (tg3_flag(tp
, ENABLE_APE
))
8971 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8972 PCISTATE_ALLOW_APE_SHMEM_WR
|
8973 PCISTATE_ALLOW_APE_PSPACE_WR
;
8974 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8976 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8978 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8979 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8980 tp
->pci_cacheline_sz
);
8981 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8985 /* Make sure PCI-X relaxed ordering bit is clear. */
8986 if (tg3_flag(tp
, PCIX_MODE
)) {
8989 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8991 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8992 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8996 if (tg3_flag(tp
, 5780_CLASS
)) {
8998 /* Chip reset on 5780 will reset MSI enable bit,
8999 * so need to restore it.
9001 if (tg3_flag(tp
, USING_MSI
)) {
9004 pci_read_config_word(tp
->pdev
,
9005 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9007 pci_write_config_word(tp
->pdev
,
9008 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9009 ctrl
| PCI_MSI_FLAGS_ENABLE
);
9010 val
= tr32(MSGINT_MODE
);
9011 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
9016 static void tg3_override_clk(struct tg3
*tp
)
9020 switch (tg3_asic_rev(tp
)) {
9022 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9023 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9024 TG3_CPMU_MAC_ORIDE_ENABLE
);
9029 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9037 static void tg3_restore_clk(struct tg3
*tp
)
9041 switch (tg3_asic_rev(tp
)) {
9043 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9044 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9045 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9050 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9051 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9059 /* tp->lock is held. */
9060 static int tg3_chip_reset(struct tg3
*tp
)
9061 __releases(tp
->lock
)
9062 __acquires(tp
->lock
)
9065 void (*write_op
)(struct tg3
*, u32
, u32
);
9068 if (!pci_device_is_present(tp
->pdev
))
9073 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9075 /* No matching tg3_nvram_unlock() after this because
9076 * chip reset below will undo the nvram lock.
9078 tp
->nvram_lock_cnt
= 0;
9080 /* GRC_MISC_CFG core clock reset will clear the memory
9081 * enable bit in PCI register 4 and the MSI enable bit
9082 * on some chips, so we save relevant registers here.
9084 tg3_save_pci_state(tp
);
9086 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9087 tg3_flag(tp
, 5755_PLUS
))
9088 tw32(GRC_FASTBOOT_PC
, 0);
9091 * We must avoid the readl() that normally takes place.
9092 * It locks machines, causes machine checks, and other
9093 * fun things. So, temporarily disable the 5701
9094 * hardware workaround, while we do the reset.
9096 write_op
= tp
->write32
;
9097 if (write_op
== tg3_write_flush_reg32
)
9098 tp
->write32
= tg3_write32
;
9100 /* Prevent the irq handler from reading or writing PCI registers
9101 * during chip reset when the memory enable bit in the PCI command
9102 * register may be cleared. The chip does not generate interrupt
9103 * at this time, but the irq handler may still be called due to irq
9104 * sharing or irqpoll.
9106 tg3_flag_set(tp
, CHIP_RESETTING
);
9107 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9108 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9109 if (tnapi
->hw_status
) {
9110 tnapi
->hw_status
->status
= 0;
9111 tnapi
->hw_status
->status_tag
= 0;
9113 tnapi
->last_tag
= 0;
9114 tnapi
->last_irq_tag
= 0;
9118 tg3_full_unlock(tp
);
9120 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9121 synchronize_irq(tp
->napi
[i
].irq_vec
);
9123 tg3_full_lock(tp
, 0);
9125 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9126 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9127 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9131 val
= GRC_MISC_CFG_CORECLK_RESET
;
9133 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9134 /* Force PCIe 1.0a mode */
9135 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9136 !tg3_flag(tp
, 57765_PLUS
) &&
9137 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9138 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9139 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9141 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9142 tw32(GRC_MISC_CFG
, (1 << 29));
9147 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9148 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9149 tw32(GRC_VCPU_EXT_CTRL
,
9150 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9153 /* Set the clock to the highest frequency to avoid timeouts. With link
9154 * aware mode, the clock speed could be slow and bootcode does not
9155 * complete within the expected time. Override the clock to allow the
9156 * bootcode to finish sooner and then restore it.
9158 tg3_override_clk(tp
);
9160 /* Manage gphy power for all CPMU absent PCIe devices. */
9161 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9162 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9164 tw32(GRC_MISC_CFG
, val
);
9166 /* restore 5701 hardware bug workaround write method */
9167 tp
->write32
= write_op
;
9169 /* Unfortunately, we have to delay before the PCI read back.
9170 * Some 575X chips even will not respond to a PCI cfg access
9171 * when the reset command is given to the chip.
9173 * How do these hardware designers expect things to work
9174 * properly if the PCI write is posted for a long period
9175 * of time? It is always necessary to have some method by
9176 * which a register read back can occur to push the write
9177 * out which does the reset.
9179 * For most tg3 variants the trick below was working.
9184 /* Flush PCI posted writes. The normal MMIO registers
9185 * are inaccessible at this time so this is the only
9186 * way to make this reliably (actually, this is no longer
9187 * the case, see above). I tried to use indirect
9188 * register read/write but this upset some 5701 variants.
9190 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9194 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9197 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9201 /* Wait for link training to complete. */
9202 for (j
= 0; j
< 5000; j
++)
9205 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9206 pci_write_config_dword(tp
->pdev
, 0xc4,
9207 cfg_val
| (1 << 15));
9210 /* Clear the "no snoop" and "relaxed ordering" bits. */
9211 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9213 * Older PCIe devices only support the 128 byte
9214 * MPS setting. Enforce the restriction.
9216 if (!tg3_flag(tp
, CPMU_PRESENT
))
9217 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9218 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9220 /* Clear error status */
9221 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9222 PCI_EXP_DEVSTA_CED
|
9223 PCI_EXP_DEVSTA_NFED
|
9224 PCI_EXP_DEVSTA_FED
|
9225 PCI_EXP_DEVSTA_URD
);
9228 tg3_restore_pci_state(tp
);
9230 tg3_flag_clear(tp
, CHIP_RESETTING
);
9231 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9234 if (tg3_flag(tp
, 5780_CLASS
))
9235 val
= tr32(MEMARB_MODE
);
9236 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9238 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9240 tw32(0x5000, 0x400);
9243 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9245 * BCM4785: In order to avoid repercussions from using
9246 * potentially defective internal ROM, stop the Rx RISC CPU,
9247 * which is not required.
9250 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9253 err
= tg3_poll_fw(tp
);
9257 tw32(GRC_MODE
, tp
->grc_mode
);
9259 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9262 tw32(0xc4, val
| (1 << 15));
9265 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9266 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9267 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9268 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9269 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9270 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9273 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9274 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9276 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9277 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9282 tw32_f(MAC_MODE
, val
);
9285 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9289 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9290 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9291 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9292 !tg3_flag(tp
, 57765_PLUS
)) {
9295 tw32(0x7c00, val
| (1 << 25));
9298 tg3_restore_clk(tp
);
9300 /* Increase the core clock speed to fix tx timeout issue for 5762
9301 * with 100Mbps link speed.
9303 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9304 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9305 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9306 TG3_CPMU_MAC_ORIDE_ENABLE
);
9309 /* Reprobe ASF enable state. */
9310 tg3_flag_clear(tp
, ENABLE_ASF
);
9311 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9312 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9314 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9315 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9316 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9319 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9320 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9321 tg3_flag_set(tp
, ENABLE_ASF
);
9322 tp
->last_event_jiffies
= jiffies
;
9323 if (tg3_flag(tp
, 5750_PLUS
))
9324 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9326 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9327 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9328 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9329 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9330 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9337 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9338 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9339 static void __tg3_set_rx_mode(struct net_device
*);
9341 /* tp->lock is held. */
9342 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9348 tg3_write_sig_pre_reset(tp
, kind
);
9350 tg3_abort_hw(tp
, silent
);
9351 err
= tg3_chip_reset(tp
);
9353 __tg3_set_mac_addr(tp
, false);
9355 tg3_write_sig_legacy(tp
, kind
);
9356 tg3_write_sig_post_reset(tp
, kind
);
9359 /* Save the stats across chip resets... */
9360 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9361 tg3_get_estats(tp
, &tp
->estats_prev
);
9363 /* And make sure the next sample is new data */
9364 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9370 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9372 struct tg3
*tp
= netdev_priv(dev
);
9373 struct sockaddr
*addr
= p
;
9375 bool skip_mac_1
= false;
9377 if (!is_valid_ether_addr(addr
->sa_data
))
9378 return -EADDRNOTAVAIL
;
9380 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9382 if (!netif_running(dev
))
9385 if (tg3_flag(tp
, ENABLE_ASF
)) {
9386 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9388 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9389 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9390 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9391 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9393 /* Skip MAC addr 1 if ASF is using it. */
9394 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9395 !(addr1_high
== 0 && addr1_low
== 0))
9398 spin_lock_bh(&tp
->lock
);
9399 __tg3_set_mac_addr(tp
, skip_mac_1
);
9400 __tg3_set_rx_mode(dev
);
9401 spin_unlock_bh(&tp
->lock
);
9406 /* tp->lock is held. */
9407 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9408 dma_addr_t mapping
, u32 maxlen_flags
,
9412 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9413 ((u64
) mapping
>> 32));
9415 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9416 ((u64
) mapping
& 0xffffffff));
9418 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9421 if (!tg3_flag(tp
, 5705_PLUS
))
9423 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9428 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9432 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9433 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9434 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9435 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9437 tw32(HOSTCC_TXCOL_TICKS
, 0);
9438 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9439 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9441 for (; i
< tp
->txq_cnt
; i
++) {
9444 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9445 tw32(reg
, ec
->tx_coalesce_usecs
);
9446 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9447 tw32(reg
, ec
->tx_max_coalesced_frames
);
9448 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9449 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9453 for (; i
< tp
->irq_max
- 1; i
++) {
9454 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9455 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9456 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9460 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9463 u32 limit
= tp
->rxq_cnt
;
9465 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9466 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9467 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9468 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9471 tw32(HOSTCC_RXCOL_TICKS
, 0);
9472 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9473 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9476 for (; i
< limit
; i
++) {
9479 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9480 tw32(reg
, ec
->rx_coalesce_usecs
);
9481 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9482 tw32(reg
, ec
->rx_max_coalesced_frames
);
9483 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9484 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9487 for (; i
< tp
->irq_max
- 1; i
++) {
9488 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9489 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9490 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9494 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9496 tg3_coal_tx_init(tp
, ec
);
9497 tg3_coal_rx_init(tp
, ec
);
9499 if (!tg3_flag(tp
, 5705_PLUS
)) {
9500 u32 val
= ec
->stats_block_coalesce_usecs
;
9502 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9503 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9508 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9512 /* tp->lock is held. */
9513 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9517 /* Disable all transmit rings but the first. */
9518 if (!tg3_flag(tp
, 5705_PLUS
))
9519 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9520 else if (tg3_flag(tp
, 5717_PLUS
))
9521 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9522 else if (tg3_flag(tp
, 57765_CLASS
) ||
9523 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9524 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9526 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9528 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9529 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9530 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9531 BDINFO_FLAGS_DISABLED
);
9534 /* tp->lock is held. */
9535 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9538 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9540 if (tg3_flag(tp
, ENABLE_TSS
))
9543 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9544 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9546 if (!tnapi
->tx_ring
)
9549 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9550 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9551 NIC_SRAM_TX_BUFFER_DESC
);
9555 /* tp->lock is held. */
9556 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9560 /* Disable all receive return rings but the first. */
9561 if (tg3_flag(tp
, 5717_PLUS
))
9562 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9563 else if (!tg3_flag(tp
, 5705_PLUS
))
9564 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9565 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9566 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9567 tg3_flag(tp
, 57765_CLASS
))
9568 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9570 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9572 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9573 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9574 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9575 BDINFO_FLAGS_DISABLED
);
9578 /* tp->lock is held. */
9579 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9582 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9584 if (tg3_flag(tp
, ENABLE_RSS
))
9587 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9588 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9593 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9594 (tp
->rx_ret_ring_mask
+ 1) <<
9595 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9599 /* tp->lock is held. */
9600 static void tg3_rings_reset(struct tg3
*tp
)
9604 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9606 tg3_tx_rcbs_disable(tp
);
9608 tg3_rx_ret_rcbs_disable(tp
);
9610 /* Disable interrupts */
9611 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9612 tp
->napi
[0].chk_msi_cnt
= 0;
9613 tp
->napi
[0].last_rx_cons
= 0;
9614 tp
->napi
[0].last_tx_cons
= 0;
9616 /* Zero mailbox registers. */
9617 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9618 for (i
= 1; i
< tp
->irq_max
; i
++) {
9619 tp
->napi
[i
].tx_prod
= 0;
9620 tp
->napi
[i
].tx_cons
= 0;
9621 if (tg3_flag(tp
, ENABLE_TSS
))
9622 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9623 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9624 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9625 tp
->napi
[i
].chk_msi_cnt
= 0;
9626 tp
->napi
[i
].last_rx_cons
= 0;
9627 tp
->napi
[i
].last_tx_cons
= 0;
9629 if (!tg3_flag(tp
, ENABLE_TSS
))
9630 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9632 tp
->napi
[0].tx_prod
= 0;
9633 tp
->napi
[0].tx_cons
= 0;
9634 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9635 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9638 /* Make sure the NIC-based send BD rings are disabled. */
9639 if (!tg3_flag(tp
, 5705_PLUS
)) {
9640 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9641 for (i
= 0; i
< 16; i
++)
9642 tw32_tx_mbox(mbox
+ i
* 8, 0);
9645 /* Clear status block in ram. */
9646 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9648 /* Set status block DMA address */
9649 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9650 ((u64
) tnapi
->status_mapping
>> 32));
9651 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9652 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9654 stblk
= HOSTCC_STATBLCK_RING1
;
9656 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9657 u64 mapping
= (u64
)tnapi
->status_mapping
;
9658 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9659 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9662 /* Clear status block in ram. */
9663 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9666 tg3_tx_rcbs_init(tp
);
9667 tg3_rx_ret_rcbs_init(tp
);
9670 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9672 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9674 if (!tg3_flag(tp
, 5750_PLUS
) ||
9675 tg3_flag(tp
, 5780_CLASS
) ||
9676 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9677 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9678 tg3_flag(tp
, 57765_PLUS
))
9679 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9680 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9681 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9682 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9684 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9686 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9687 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9689 val
= min(nic_rep_thresh
, host_rep_thresh
);
9690 tw32(RCVBDI_STD_THRESH
, val
);
9692 if (tg3_flag(tp
, 57765_PLUS
))
9693 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9695 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9698 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9700 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9702 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9703 tw32(RCVBDI_JUMBO_THRESH
, val
);
9705 if (tg3_flag(tp
, 57765_PLUS
))
9706 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9709 static inline u32
calc_crc(unsigned char *buf
, int len
)
9717 for (j
= 0; j
< len
; j
++) {
9720 for (k
= 0; k
< 8; k
++) {
9726 reg
^= CRC32_POLY_LE
;
9733 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9735 /* accept or reject all multicast frames */
9736 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9737 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9738 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9739 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9742 static void __tg3_set_rx_mode(struct net_device
*dev
)
9744 struct tg3
*tp
= netdev_priv(dev
);
9747 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9748 RX_MODE_KEEP_VLAN_TAG
);
9750 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9751 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9754 if (!tg3_flag(tp
, ENABLE_ASF
))
9755 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9758 if (dev
->flags
& IFF_PROMISC
) {
9759 /* Promiscuous mode. */
9760 rx_mode
|= RX_MODE_PROMISC
;
9761 } else if (dev
->flags
& IFF_ALLMULTI
) {
9762 /* Accept all multicast. */
9763 tg3_set_multi(tp
, 1);
9764 } else if (netdev_mc_empty(dev
)) {
9765 /* Reject all multicast. */
9766 tg3_set_multi(tp
, 0);
9768 /* Accept one or more multicast(s). */
9769 struct netdev_hw_addr
*ha
;
9770 u32 mc_filter
[4] = { 0, };
9775 netdev_for_each_mc_addr(ha
, dev
) {
9776 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9778 regidx
= (bit
& 0x60) >> 5;
9780 mc_filter
[regidx
] |= (1 << bit
);
9783 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9784 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9785 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9786 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9789 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9790 rx_mode
|= RX_MODE_PROMISC
;
9791 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9792 /* Add all entries into to the mac addr filter list */
9794 struct netdev_hw_addr
*ha
;
9796 netdev_for_each_uc_addr(ha
, dev
) {
9797 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9798 i
+ TG3_UCAST_ADDR_IDX(tp
));
9803 if (rx_mode
!= tp
->rx_mode
) {
9804 tp
->rx_mode
= rx_mode
;
9805 tw32_f(MAC_RX_MODE
, rx_mode
);
9810 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9814 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9815 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9818 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9822 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9825 if (tp
->rxq_cnt
== 1) {
9826 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9830 /* Validate table against current IRQ count */
9831 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9832 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9836 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9837 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9840 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9843 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9845 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9846 u32 val
= tp
->rss_ind_tbl
[i
];
9848 for (; i
% 8; i
++) {
9850 val
|= tp
->rss_ind_tbl
[i
];
9857 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9859 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9860 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9862 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9865 /* tp->lock is held. */
9866 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9868 u32 val
, rdmac_mode
;
9870 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9872 tg3_disable_ints(tp
);
9876 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9878 if (tg3_flag(tp
, INIT_COMPLETE
))
9879 tg3_abort_hw(tp
, 1);
9881 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9882 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9883 tg3_phy_pull_config(tp
);
9884 tg3_eee_pull_config(tp
, NULL
);
9885 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9888 /* Enable MAC control of LPI */
9889 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9895 err
= tg3_chip_reset(tp
);
9899 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9901 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9902 val
= tr32(TG3_CPMU_CTRL
);
9903 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9904 tw32(TG3_CPMU_CTRL
, val
);
9906 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9907 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9908 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9909 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9911 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9912 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9913 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9914 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9916 val
= tr32(TG3_CPMU_HST_ACC
);
9917 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9918 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9919 tw32(TG3_CPMU_HST_ACC
, val
);
9922 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9923 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9924 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9925 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9926 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9928 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9929 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9931 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9933 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9934 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9937 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9938 u32 grc_mode
= tr32(GRC_MODE
);
9940 /* Access the lower 1K of PL PCIE block registers. */
9941 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9942 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9944 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9945 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9946 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9948 tw32(GRC_MODE
, grc_mode
);
9951 if (tg3_flag(tp
, 57765_CLASS
)) {
9952 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9953 u32 grc_mode
= tr32(GRC_MODE
);
9955 /* Access the lower 1K of PL PCIE block registers. */
9956 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9957 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9959 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9960 TG3_PCIE_PL_LO_PHYCTL5
);
9961 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9962 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9964 tw32(GRC_MODE
, grc_mode
);
9967 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9970 /* Fix transmit hangs */
9971 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9972 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9973 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9975 grc_mode
= tr32(GRC_MODE
);
9977 /* Access the lower 1K of DL PCIE block registers. */
9978 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9979 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9981 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9982 TG3_PCIE_DL_LO_FTSMAX
);
9983 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9984 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9985 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9987 tw32(GRC_MODE
, grc_mode
);
9990 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9991 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9992 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9993 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9996 /* This works around an issue with Athlon chipsets on
9997 * B3 tigon3 silicon. This bit has no effect on any
9998 * other revision. But do not set this on PCI Express
9999 * chips and don't even touch the clocks if the CPMU is present.
10001 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
10002 if (!tg3_flag(tp
, PCI_EXPRESS
))
10003 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
10004 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
10007 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
10008 tg3_flag(tp
, PCIX_MODE
)) {
10009 val
= tr32(TG3PCI_PCISTATE
);
10010 val
|= PCISTATE_RETRY_SAME_DMA
;
10011 tw32(TG3PCI_PCISTATE
, val
);
10014 if (tg3_flag(tp
, ENABLE_APE
)) {
10015 /* Allow reads and writes to the
10016 * APE register and memory space.
10018 val
= tr32(TG3PCI_PCISTATE
);
10019 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
10020 PCISTATE_ALLOW_APE_SHMEM_WR
|
10021 PCISTATE_ALLOW_APE_PSPACE_WR
;
10022 tw32(TG3PCI_PCISTATE
, val
);
10025 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
10026 /* Enable some hw fixes. */
10027 val
= tr32(TG3PCI_MSI_DATA
);
10028 val
|= (1 << 26) | (1 << 28) | (1 << 29);
10029 tw32(TG3PCI_MSI_DATA
, val
);
10032 /* Descriptor ring init may make accesses to the
10033 * NIC SRAM area to setup the TX descriptors, so we
10034 * can only do this after the hardware has been
10035 * successfully reset.
10037 err
= tg3_init_rings(tp
);
10041 if (tg3_flag(tp
, 57765_PLUS
)) {
10042 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10043 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10044 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10045 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10046 if (!tg3_flag(tp
, 57765_CLASS
) &&
10047 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10048 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10049 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10050 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10051 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10052 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10053 /* This value is determined during the probe time DMA
10054 * engine test, tg3_test_dma.
10056 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10059 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10060 GRC_MODE_4X_NIC_SEND_RINGS
|
10061 GRC_MODE_NO_TX_PHDR_CSUM
|
10062 GRC_MODE_NO_RX_PHDR_CSUM
);
10063 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10065 /* Pseudo-header checksum is done by hardware logic and not
10066 * the offload processers, so make the chip do the pseudo-
10067 * header checksums on receive. For transmit it is more
10068 * convenient to do the pseudo-header checksum in software
10069 * as Linux does that on transmit for us in all cases.
10071 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10073 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10075 tw32(TG3_RX_PTP_CTL
,
10076 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10078 if (tg3_flag(tp
, PTP_CAPABLE
))
10079 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10081 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10083 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10084 * south bridge limitation. As a workaround, Driver is setting MRRS
10085 * to 2048 instead of default 4096.
10087 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10088 tp
->pdev
->subsystem_device
== TG3PCI_SUBDEVICE_ID_DELL_5762
) {
10089 val
= tr32(TG3PCI_DEV_STATUS_CTRL
) & ~MAX_READ_REQ_MASK
;
10090 tw32(TG3PCI_DEV_STATUS_CTRL
, val
| MAX_READ_REQ_SIZE_2048
);
10093 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10094 val
= tr32(GRC_MISC_CFG
);
10096 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10097 tw32(GRC_MISC_CFG
, val
);
10099 /* Initialize MBUF/DESC pool. */
10100 if (tg3_flag(tp
, 5750_PLUS
)) {
10102 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10103 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10104 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10105 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10107 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10108 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10109 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10110 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10113 fw_len
= tp
->fw_len
;
10114 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10115 tw32(BUFMGR_MB_POOL_ADDR
,
10116 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10117 tw32(BUFMGR_MB_POOL_SIZE
,
10118 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10121 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10122 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10123 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10124 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10125 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10126 tw32(BUFMGR_MB_HIGH_WATER
,
10127 tp
->bufmgr_config
.mbuf_high_water
);
10129 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10130 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10131 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10132 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10133 tw32(BUFMGR_MB_HIGH_WATER
,
10134 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10136 tw32(BUFMGR_DMA_LOW_WATER
,
10137 tp
->bufmgr_config
.dma_low_water
);
10138 tw32(BUFMGR_DMA_HIGH_WATER
,
10139 tp
->bufmgr_config
.dma_high_water
);
10141 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10142 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10143 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10144 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10145 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10146 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10147 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10148 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10149 tw32(BUFMGR_MODE
, val
);
10150 for (i
= 0; i
< 2000; i
++) {
10151 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10156 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10160 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10161 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10163 tg3_setup_rxbd_thresholds(tp
);
10165 /* Initialize TG3_BDINFO's at:
10166 * RCVDBDI_STD_BD: standard eth size rx ring
10167 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10168 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10171 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10172 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10173 * ring attribute flags
10174 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10176 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10177 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10179 * The size of each ring is fixed in the firmware, but the location is
10182 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10183 ((u64
) tpr
->rx_std_mapping
>> 32));
10184 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10185 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10186 if (!tg3_flag(tp
, 5717_PLUS
))
10187 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10188 NIC_SRAM_RX_BUFFER_DESC
);
10190 /* Disable the mini ring */
10191 if (!tg3_flag(tp
, 5705_PLUS
))
10192 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10193 BDINFO_FLAGS_DISABLED
);
10195 /* Program the jumbo buffer descriptor ring control
10196 * blocks on those devices that have them.
10198 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10199 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10201 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10202 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10203 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10204 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10205 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10206 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10207 BDINFO_FLAGS_MAXLEN_SHIFT
;
10208 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10209 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10210 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10211 tg3_flag(tp
, 57765_CLASS
) ||
10212 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10213 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10214 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10216 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10217 BDINFO_FLAGS_DISABLED
);
10220 if (tg3_flag(tp
, 57765_PLUS
)) {
10221 val
= TG3_RX_STD_RING_SIZE(tp
);
10222 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10223 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10225 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10227 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10229 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10231 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10232 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10234 tpr
->rx_jmb_prod_idx
=
10235 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10236 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10238 tg3_rings_reset(tp
);
10240 /* Initialize MAC address and backoff seed. */
10241 __tg3_set_mac_addr(tp
, false);
10243 /* MTU + ethernet header + FCS + optional VLAN tag */
10244 tw32(MAC_RX_MTU_SIZE
,
10245 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10247 /* The slot time is changed by tg3_setup_phy if we
10248 * run at gigabit with half duplex.
10250 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10251 (6 << TX_LENGTHS_IPG_SHIFT
) |
10252 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10254 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10255 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10256 val
|= tr32(MAC_TX_LENGTHS
) &
10257 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10258 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10260 tw32(MAC_TX_LENGTHS
, val
);
10262 /* Receive rules. */
10263 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10264 tw32(RCVLPC_CONFIG
, 0x0181);
10266 /* Calculate RDMAC_MODE setting early, we need it to determine
10267 * the RCVLPC_STATE_ENABLE mask.
10269 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10270 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10271 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10272 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10273 RDMAC_MODE_LNGREAD_ENAB
);
10275 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10276 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10278 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10279 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10280 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10281 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10282 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10283 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10285 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10286 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10287 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10288 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10289 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10290 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10291 !tg3_flag(tp
, IS_5788
)) {
10292 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10296 if (tg3_flag(tp
, PCI_EXPRESS
))
10297 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10299 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10301 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10302 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10303 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10307 if (tg3_flag(tp
, HW_TSO_1
) ||
10308 tg3_flag(tp
, HW_TSO_2
) ||
10309 tg3_flag(tp
, HW_TSO_3
))
10310 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10312 if (tg3_flag(tp
, 57765_PLUS
) ||
10313 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10314 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10315 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10317 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10318 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10319 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10321 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10322 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10323 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10324 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10325 tg3_flag(tp
, 57765_PLUS
)) {
10328 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10329 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10331 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10333 val
= tr32(tgtreg
);
10334 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10335 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10336 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10337 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10338 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10339 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10340 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10341 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10343 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10346 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10347 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10348 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10351 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10352 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10354 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10356 val
= tr32(tgtreg
);
10358 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10359 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10362 /* Receive/send statistics. */
10363 if (tg3_flag(tp
, 5750_PLUS
)) {
10364 val
= tr32(RCVLPC_STATS_ENABLE
);
10365 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10366 tw32(RCVLPC_STATS_ENABLE
, val
);
10367 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10368 tg3_flag(tp
, TSO_CAPABLE
)) {
10369 val
= tr32(RCVLPC_STATS_ENABLE
);
10370 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10371 tw32(RCVLPC_STATS_ENABLE
, val
);
10373 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10375 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10376 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10377 tw32(SNDDATAI_STATSCTRL
,
10378 (SNDDATAI_SCTRL_ENABLE
|
10379 SNDDATAI_SCTRL_FASTUPD
));
10381 /* Setup host coalescing engine. */
10382 tw32(HOSTCC_MODE
, 0);
10383 for (i
= 0; i
< 2000; i
++) {
10384 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10389 __tg3_set_coalesce(tp
, &tp
->coal
);
10391 if (!tg3_flag(tp
, 5705_PLUS
)) {
10392 /* Status/statistics block address. See tg3_timer,
10393 * the tg3_periodic_fetch_stats call there, and
10394 * tg3_get_stats to see how this works for 5705/5750 chips.
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10397 ((u64
) tp
->stats_mapping
>> 32));
10398 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10399 ((u64
) tp
->stats_mapping
& 0xffffffff));
10400 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10402 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10404 /* Clear statistics and status block memory areas */
10405 for (i
= NIC_SRAM_STATS_BLK
;
10406 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10407 i
+= sizeof(u32
)) {
10408 tg3_write_mem(tp
, i
, 0);
10413 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10415 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10416 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10417 if (!tg3_flag(tp
, 5705_PLUS
))
10418 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10420 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10421 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10422 /* reset to prevent losing 1st rx packet intermittently */
10423 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10427 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10428 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10429 MAC_MODE_FHDE_ENABLE
;
10430 if (tg3_flag(tp
, ENABLE_APE
))
10431 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10432 if (!tg3_flag(tp
, 5705_PLUS
) &&
10433 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10434 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10435 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10436 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10439 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10440 * If TG3_FLAG_IS_NIC is zero, we should read the
10441 * register to preserve the GPIO settings for LOMs. The GPIOs,
10442 * whether used as inputs or outputs, are set by boot code after
10445 if (!tg3_flag(tp
, IS_NIC
)) {
10448 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10449 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10450 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10452 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10453 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10454 GRC_LCLCTRL_GPIO_OUTPUT3
;
10456 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10457 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10459 tp
->grc_local_ctrl
&= ~gpio_mask
;
10460 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10462 /* GPIO1 must be driven high for eeprom write protect */
10463 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10464 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10465 GRC_LCLCTRL_GPIO_OUTPUT1
);
10467 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10470 if (tg3_flag(tp
, USING_MSIX
)) {
10471 val
= tr32(MSGINT_MODE
);
10472 val
|= MSGINT_MODE_ENABLE
;
10473 if (tp
->irq_cnt
> 1)
10474 val
|= MSGINT_MODE_MULTIVEC_EN
;
10475 if (!tg3_flag(tp
, 1SHOT_MSI
))
10476 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10477 tw32(MSGINT_MODE
, val
);
10480 if (!tg3_flag(tp
, 5705_PLUS
)) {
10481 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10485 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10486 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10487 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10488 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10489 WDMAC_MODE_LNGREAD_ENAB
);
10491 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10492 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10493 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10494 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10495 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10497 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10498 !tg3_flag(tp
, IS_5788
)) {
10499 val
|= WDMAC_MODE_RX_ACCEL
;
10503 /* Enable host coalescing bug fix */
10504 if (tg3_flag(tp
, 5755_PLUS
))
10505 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10507 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10508 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10510 tw32_f(WDMAC_MODE
, val
);
10513 if (tg3_flag(tp
, PCIX_MODE
)) {
10516 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10518 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10519 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10520 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10521 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10522 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10523 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10525 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10529 tw32_f(RDMAC_MODE
, rdmac_mode
);
10532 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10533 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10534 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10535 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10538 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10539 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10540 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10541 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10542 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10546 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10547 if (!tg3_flag(tp
, 5705_PLUS
))
10548 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10550 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10551 tw32(SNDDATAC_MODE
,
10552 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10554 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10556 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10557 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10558 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10559 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10560 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10561 tw32(RCVDBDI_MODE
, val
);
10562 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10563 if (tg3_flag(tp
, HW_TSO_1
) ||
10564 tg3_flag(tp
, HW_TSO_2
) ||
10565 tg3_flag(tp
, HW_TSO_3
))
10566 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10567 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10568 if (tg3_flag(tp
, ENABLE_TSS
))
10569 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10570 tw32(SNDBDI_MODE
, val
);
10571 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10573 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10574 err
= tg3_load_5701_a0_firmware_fix(tp
);
10579 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10580 /* Ignore any errors for the firmware download. If download
10581 * fails, the device will operate with EEE disabled
10583 tg3_load_57766_firmware(tp
);
10586 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10587 err
= tg3_load_tso_firmware(tp
);
10592 tp
->tx_mode
= TX_MODE_ENABLE
;
10594 if (tg3_flag(tp
, 5755_PLUS
) ||
10595 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10596 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10598 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10599 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10600 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10601 tp
->tx_mode
&= ~val
;
10602 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10605 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10608 if (tg3_flag(tp
, ENABLE_RSS
)) {
10611 tg3_rss_write_indir_tbl(tp
);
10613 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10615 for (i
= 0; i
< 10 ; i
++)
10616 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10619 tp
->rx_mode
= RX_MODE_ENABLE
;
10620 if (tg3_flag(tp
, 5755_PLUS
))
10621 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10623 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10624 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10626 if (tg3_flag(tp
, ENABLE_RSS
))
10627 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10628 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10629 RX_MODE_RSS_IPV6_HASH_EN
|
10630 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10631 RX_MODE_RSS_IPV4_HASH_EN
|
10632 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10634 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10637 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10639 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10640 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10641 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10644 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10647 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10648 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10649 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10650 /* Set drive transmission level to 1.2V */
10651 /* only if the signal pre-emphasis bit is not set */
10652 val
= tr32(MAC_SERDES_CFG
);
10655 tw32(MAC_SERDES_CFG
, val
);
10657 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10658 tw32(MAC_SERDES_CFG
, 0x616000);
10661 /* Prevent chip from dropping frames when flow control
10664 if (tg3_flag(tp
, 57765_CLASS
))
10668 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10670 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10671 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10672 /* Use hardware link auto-negotiation */
10673 tg3_flag_set(tp
, HW_AUTONEG
);
10676 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10677 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10680 tmp
= tr32(SERDES_RX_CTRL
);
10681 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10682 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10683 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10684 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10687 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10688 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10689 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10691 err
= tg3_setup_phy(tp
, false);
10695 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10696 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10699 /* Clear CRC stats. */
10700 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10701 tg3_writephy(tp
, MII_TG3_TEST1
,
10702 tmp
| MII_TG3_TEST1_CRC_EN
);
10703 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10708 __tg3_set_rx_mode(tp
->dev
);
10710 /* Initialize receive rules. */
10711 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10712 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10713 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10714 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10716 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10720 if (tg3_flag(tp
, ENABLE_ASF
))
10724 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10727 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10730 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10733 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10736 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10739 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10742 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10745 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10748 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10751 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10754 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10757 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10760 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10762 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10770 if (tg3_flag(tp
, ENABLE_APE
))
10771 /* Write our heartbeat update interval to APE. */
10772 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10773 APE_HOST_HEARTBEAT_INT_5SEC
);
10775 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10780 /* Called at device open time to get the chip ready for
10781 * packet processing. Invoked with tp->lock held.
10783 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10785 /* Chip may have been just powered on. If so, the boot code may still
10786 * be running initialization. Wait for it to finish to avoid races in
10787 * accessing the hardware.
10789 tg3_enable_register_access(tp
);
10792 tg3_switch_clocks(tp
);
10794 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10796 return tg3_reset_hw(tp
, reset_phy
);
10799 #ifdef CONFIG_TIGON3_HWMON
10800 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10804 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10805 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10807 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10810 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10811 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10812 memset(ocir
, 0, TG3_OCIR_LEN
);
10816 /* sysfs attributes for hwmon */
10817 static ssize_t
tg3_show_temp(struct device
*dev
,
10818 struct device_attribute
*devattr
, char *buf
)
10820 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10821 struct tg3
*tp
= dev_get_drvdata(dev
);
10824 spin_lock_bh(&tp
->lock
);
10825 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10826 sizeof(temperature
));
10827 spin_unlock_bh(&tp
->lock
);
10828 return sprintf(buf
, "%u\n", temperature
* 1000);
10832 static SENSOR_DEVICE_ATTR(temp1_input
, 0444, tg3_show_temp
, NULL
,
10833 TG3_TEMP_SENSOR_OFFSET
);
10834 static SENSOR_DEVICE_ATTR(temp1_crit
, 0444, tg3_show_temp
, NULL
,
10835 TG3_TEMP_CAUTION_OFFSET
);
10836 static SENSOR_DEVICE_ATTR(temp1_max
, 0444, tg3_show_temp
, NULL
,
10837 TG3_TEMP_MAX_OFFSET
);
10839 static struct attribute
*tg3_attrs
[] = {
10840 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10841 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10842 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10845 ATTRIBUTE_GROUPS(tg3
);
10847 static void tg3_hwmon_close(struct tg3
*tp
)
10849 if (tp
->hwmon_dev
) {
10850 hwmon_device_unregister(tp
->hwmon_dev
);
10851 tp
->hwmon_dev
= NULL
;
10855 static void tg3_hwmon_open(struct tg3
*tp
)
10859 struct pci_dev
*pdev
= tp
->pdev
;
10860 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10862 tg3_sd_scan_scratchpad(tp
, ocirs
);
10864 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10865 if (!ocirs
[i
].src_data_length
)
10868 size
+= ocirs
[i
].src_hdr_length
;
10869 size
+= ocirs
[i
].src_data_length
;
10875 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10877 if (IS_ERR(tp
->hwmon_dev
)) {
10878 tp
->hwmon_dev
= NULL
;
10879 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10883 static inline void tg3_hwmon_close(struct tg3
*tp
) { }
10884 static inline void tg3_hwmon_open(struct tg3
*tp
) { }
10885 #endif /* CONFIG_TIGON3_HWMON */
10888 #define TG3_STAT_ADD32(PSTAT, REG) \
10889 do { u32 __val = tr32(REG); \
10890 (PSTAT)->low += __val; \
10891 if ((PSTAT)->low < __val) \
10892 (PSTAT)->high += 1; \
10895 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10897 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10902 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10903 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10904 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10905 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10906 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10907 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10908 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10909 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10910 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10911 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10912 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10913 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10914 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10915 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10916 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10917 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10920 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10921 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10922 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10923 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10926 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10927 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10928 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10929 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10930 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10931 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10932 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10933 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10934 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10935 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10936 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10937 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10938 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10939 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10941 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10942 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10943 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10944 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10945 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10946 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10948 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10949 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10951 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10952 sp
->rx_discards
.low
+= val
;
10953 if (sp
->rx_discards
.low
< val
)
10954 sp
->rx_discards
.high
+= 1;
10956 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10958 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10961 static void tg3_chk_missed_msi(struct tg3
*tp
)
10965 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10966 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10968 if (tg3_has_work(tnapi
)) {
10969 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10970 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10971 if (tnapi
->chk_msi_cnt
< 1) {
10972 tnapi
->chk_msi_cnt
++;
10978 tnapi
->chk_msi_cnt
= 0;
10979 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10980 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10984 static void tg3_timer(struct timer_list
*t
)
10986 struct tg3
*tp
= from_timer(tp
, t
, timer
);
10988 spin_lock(&tp
->lock
);
10990 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10991 spin_unlock(&tp
->lock
);
10992 goto restart_timer
;
10995 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10996 tg3_flag(tp
, 57765_CLASS
))
10997 tg3_chk_missed_msi(tp
);
10999 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
11000 /* BCM4785: Flush posted writes from GbE to host memory. */
11004 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
11005 /* All of this garbage is because when using non-tagged
11006 * IRQ status the mailbox/status_block protocol the chip
11007 * uses with the cpu is race prone.
11009 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
11010 tw32(GRC_LOCAL_CTRL
,
11011 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
11013 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
11014 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
11017 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11018 spin_unlock(&tp
->lock
);
11019 tg3_reset_task_schedule(tp
);
11020 goto restart_timer
;
11024 /* This part only runs once per second. */
11025 if (!--tp
->timer_counter
) {
11026 if (tg3_flag(tp
, 5705_PLUS
))
11027 tg3_periodic_fetch_stats(tp
);
11029 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
11030 tg3_phy_eee_enable(tp
);
11032 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
11036 mac_stat
= tr32(MAC_STATUS
);
11039 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
11040 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
11042 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
11046 tg3_setup_phy(tp
, false);
11047 } else if (tg3_flag(tp
, POLL_SERDES
)) {
11048 u32 mac_stat
= tr32(MAC_STATUS
);
11049 int need_setup
= 0;
11052 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
11055 if (!tp
->link_up
&&
11056 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11057 MAC_STATUS_SIGNAL_DET
))) {
11061 if (!tp
->serdes_counter
) {
11064 ~MAC_MODE_PORT_MODE_MASK
));
11066 tw32_f(MAC_MODE
, tp
->mac_mode
);
11069 tg3_setup_phy(tp
, false);
11071 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11072 tg3_flag(tp
, 5780_CLASS
)) {
11073 tg3_serdes_parallel_detect(tp
);
11074 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11075 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11076 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11077 TG3_CPMU_STATUS_LINK_MASK
);
11079 if (link_up
!= tp
->link_up
)
11080 tg3_setup_phy(tp
, false);
11083 tp
->timer_counter
= tp
->timer_multiplier
;
11086 /* Heartbeat is only sent once every 2 seconds.
11088 * The heartbeat is to tell the ASF firmware that the host
11089 * driver is still alive. In the event that the OS crashes,
11090 * ASF needs to reset the hardware to free up the FIFO space
11091 * that may be filled with rx packets destined for the host.
11092 * If the FIFO is full, ASF will no longer function properly.
11094 * Unintended resets have been reported on real time kernels
11095 * where the timer doesn't run on time. Netpoll will also have
11098 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11099 * to check the ring condition when the heartbeat is expiring
11100 * before doing the reset. This will prevent most unintended
11103 if (!--tp
->asf_counter
) {
11104 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11105 tg3_wait_for_event_ack(tp
);
11107 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11108 FWCMD_NICDRV_ALIVE3
);
11109 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11110 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11111 TG3_FW_UPDATE_TIMEOUT_SEC
);
11113 tg3_generate_fw_event(tp
);
11115 tp
->asf_counter
= tp
->asf_multiplier
;
11118 /* Update the APE heartbeat every 5 seconds.*/
11119 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
);
11121 spin_unlock(&tp
->lock
);
11124 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11125 add_timer(&tp
->timer
);
11128 static void tg3_timer_init(struct tg3
*tp
)
11130 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11131 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11132 !tg3_flag(tp
, 57765_CLASS
))
11133 tp
->timer_offset
= HZ
;
11135 tp
->timer_offset
= HZ
/ 10;
11137 BUG_ON(tp
->timer_offset
> HZ
);
11139 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11140 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11141 TG3_FW_UPDATE_FREQ_SEC
;
11143 timer_setup(&tp
->timer
, tg3_timer
, 0);
11146 static void tg3_timer_start(struct tg3
*tp
)
11148 tp
->asf_counter
= tp
->asf_multiplier
;
11149 tp
->timer_counter
= tp
->timer_multiplier
;
11151 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11152 add_timer(&tp
->timer
);
11155 static void tg3_timer_stop(struct tg3
*tp
)
11157 del_timer_sync(&tp
->timer
);
11160 /* Restart hardware after configuration changes, self-test, etc.
11161 * Invoked with tp->lock held.
11163 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11164 __releases(tp
->lock
)
11165 __acquires(tp
->lock
)
11169 err
= tg3_init_hw(tp
, reset_phy
);
11171 netdev_err(tp
->dev
,
11172 "Failed to re-initialize device, aborting\n");
11173 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11174 tg3_full_unlock(tp
);
11175 tg3_timer_stop(tp
);
11177 tg3_napi_enable(tp
);
11178 dev_close(tp
->dev
);
11179 tg3_full_lock(tp
, 0);
11184 static void tg3_reset_task(struct work_struct
*work
)
11186 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11190 tg3_full_lock(tp
, 0);
11192 if (!netif_running(tp
->dev
)) {
11193 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11194 tg3_full_unlock(tp
);
11199 tg3_full_unlock(tp
);
11203 tg3_netif_stop(tp
);
11205 tg3_full_lock(tp
, 1);
11207 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11208 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11209 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11210 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11211 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11214 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11215 err
= tg3_init_hw(tp
, true);
11219 tg3_netif_start(tp
);
11222 tg3_full_unlock(tp
);
11227 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11231 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11234 unsigned long flags
;
11236 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11238 if (tp
->irq_cnt
== 1)
11239 name
= tp
->dev
->name
;
11241 name
= &tnapi
->irq_lbl
[0];
11242 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11243 snprintf(name
, IFNAMSIZ
,
11244 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11245 else if (tnapi
->tx_buffers
)
11246 snprintf(name
, IFNAMSIZ
,
11247 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11248 else if (tnapi
->rx_rcb
)
11249 snprintf(name
, IFNAMSIZ
,
11250 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11252 snprintf(name
, IFNAMSIZ
,
11253 "%s-%d", tp
->dev
->name
, irq_num
);
11254 name
[IFNAMSIZ
-1] = 0;
11257 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11259 if (tg3_flag(tp
, 1SHOT_MSI
))
11260 fn
= tg3_msi_1shot
;
11263 fn
= tg3_interrupt
;
11264 if (tg3_flag(tp
, TAGGED_STATUS
))
11265 fn
= tg3_interrupt_tagged
;
11266 flags
= IRQF_SHARED
;
11269 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11272 static int tg3_test_interrupt(struct tg3
*tp
)
11274 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11275 struct net_device
*dev
= tp
->dev
;
11276 int err
, i
, intr_ok
= 0;
11279 if (!netif_running(dev
))
11282 tg3_disable_ints(tp
);
11284 free_irq(tnapi
->irq_vec
, tnapi
);
11287 * Turn off MSI one shot mode. Otherwise this test has no
11288 * observable way to know whether the interrupt was delivered.
11290 if (tg3_flag(tp
, 57765_PLUS
)) {
11291 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11292 tw32(MSGINT_MODE
, val
);
11295 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11296 IRQF_SHARED
, dev
->name
, tnapi
);
11300 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11301 tg3_enable_ints(tp
);
11303 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11306 for (i
= 0; i
< 5; i
++) {
11307 u32 int_mbox
, misc_host_ctrl
;
11309 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11310 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11312 if ((int_mbox
!= 0) ||
11313 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11318 if (tg3_flag(tp
, 57765_PLUS
) &&
11319 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11320 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11325 tg3_disable_ints(tp
);
11327 free_irq(tnapi
->irq_vec
, tnapi
);
11329 err
= tg3_request_irq(tp
, 0);
11335 /* Reenable MSI one shot mode. */
11336 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11337 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11338 tw32(MSGINT_MODE
, val
);
11346 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11347 * successfully restored
11349 static int tg3_test_msi(struct tg3
*tp
)
11354 if (!tg3_flag(tp
, USING_MSI
))
11357 /* Turn off SERR reporting in case MSI terminates with Master
11360 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11361 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11362 pci_cmd
& ~PCI_COMMAND_SERR
);
11364 err
= tg3_test_interrupt(tp
);
11366 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11371 /* other failures */
11375 /* MSI test failed, go back to INTx mode */
11376 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11377 "to INTx mode. Please report this failure to the PCI "
11378 "maintainer and include system chipset information\n");
11380 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11382 pci_disable_msi(tp
->pdev
);
11384 tg3_flag_clear(tp
, USING_MSI
);
11385 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11387 err
= tg3_request_irq(tp
, 0);
11391 /* Need to reset the chip because the MSI cycle may have terminated
11392 * with Master Abort.
11394 tg3_full_lock(tp
, 1);
11396 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11397 err
= tg3_init_hw(tp
, true);
11399 tg3_full_unlock(tp
);
11402 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11407 static int tg3_request_firmware(struct tg3
*tp
)
11409 const struct tg3_firmware_hdr
*fw_hdr
;
11411 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11412 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11417 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11419 /* Firmware blob starts with version numbers, followed by
11420 * start address and _full_ length including BSS sections
11421 * (which must be longer than the actual data, of course
11424 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11425 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11426 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11427 tp
->fw_len
, tp
->fw_needed
);
11428 release_firmware(tp
->fw
);
11433 /* We no longer need firmware; we have it. */
11434 tp
->fw_needed
= NULL
;
11438 static u32
tg3_irq_count(struct tg3
*tp
)
11440 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11443 /* We want as many rx rings enabled as there are cpus.
11444 * In multiqueue MSI-X mode, the first MSI-X vector
11445 * only deals with link interrupts, etc, so we add
11446 * one to the number of vectors we are requesting.
11448 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11454 static bool tg3_enable_msix(struct tg3
*tp
)
11457 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11459 tp
->txq_cnt
= tp
->txq_req
;
11460 tp
->rxq_cnt
= tp
->rxq_req
;
11462 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11463 if (tp
->rxq_cnt
> tp
->rxq_max
)
11464 tp
->rxq_cnt
= tp
->rxq_max
;
11466 /* Disable multiple TX rings by default. Simple round-robin hardware
11467 * scheduling of the TX rings can cause starvation of rings with
11468 * small packets when other rings have TSO or jumbo packets.
11473 tp
->irq_cnt
= tg3_irq_count(tp
);
11475 for (i
= 0; i
< tp
->irq_max
; i
++) {
11476 msix_ent
[i
].entry
= i
;
11477 msix_ent
[i
].vector
= 0;
11480 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11483 } else if (rc
< tp
->irq_cnt
) {
11484 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11487 tp
->rxq_cnt
= max(rc
- 1, 1);
11489 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11492 for (i
= 0; i
< tp
->irq_max
; i
++)
11493 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11495 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11496 pci_disable_msix(tp
->pdev
);
11500 if (tp
->irq_cnt
== 1)
11503 tg3_flag_set(tp
, ENABLE_RSS
);
11505 if (tp
->txq_cnt
> 1)
11506 tg3_flag_set(tp
, ENABLE_TSS
);
11508 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11513 static void tg3_ints_init(struct tg3
*tp
)
11515 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11516 !tg3_flag(tp
, TAGGED_STATUS
)) {
11517 /* All MSI supporting chips should support tagged
11518 * status. Assert that this is the case.
11520 netdev_warn(tp
->dev
,
11521 "MSI without TAGGED_STATUS? Not using MSI\n");
11525 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11526 tg3_flag_set(tp
, USING_MSIX
);
11527 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11528 tg3_flag_set(tp
, USING_MSI
);
11530 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11531 u32 msi_mode
= tr32(MSGINT_MODE
);
11532 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11533 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11534 if (!tg3_flag(tp
, 1SHOT_MSI
))
11535 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11536 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11539 if (!tg3_flag(tp
, USING_MSIX
)) {
11541 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11544 if (tp
->irq_cnt
== 1) {
11547 netif_set_real_num_tx_queues(tp
->dev
, 1);
11548 netif_set_real_num_rx_queues(tp
->dev
, 1);
11552 static void tg3_ints_fini(struct tg3
*tp
)
11554 if (tg3_flag(tp
, USING_MSIX
))
11555 pci_disable_msix(tp
->pdev
);
11556 else if (tg3_flag(tp
, USING_MSI
))
11557 pci_disable_msi(tp
->pdev
);
11558 tg3_flag_clear(tp
, USING_MSI
);
11559 tg3_flag_clear(tp
, USING_MSIX
);
11560 tg3_flag_clear(tp
, ENABLE_RSS
);
11561 tg3_flag_clear(tp
, ENABLE_TSS
);
11564 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11567 struct net_device
*dev
= tp
->dev
;
11571 * Setup interrupts first so we know how
11572 * many NAPI resources to allocate
11576 tg3_rss_check_indir_tbl(tp
);
11578 /* The placement of this call is tied
11579 * to the setup and use of Host TX descriptors.
11581 err
= tg3_alloc_consistent(tp
);
11583 goto out_ints_fini
;
11587 tg3_napi_enable(tp
);
11589 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11590 err
= tg3_request_irq(tp
, i
);
11592 for (i
--; i
>= 0; i
--) {
11593 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11595 free_irq(tnapi
->irq_vec
, tnapi
);
11597 goto out_napi_fini
;
11601 tg3_full_lock(tp
, 0);
11604 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11606 err
= tg3_init_hw(tp
, reset_phy
);
11608 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11609 tg3_free_rings(tp
);
11612 tg3_full_unlock(tp
);
11617 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11618 err
= tg3_test_msi(tp
);
11621 tg3_full_lock(tp
, 0);
11622 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11623 tg3_free_rings(tp
);
11624 tg3_full_unlock(tp
);
11626 goto out_napi_fini
;
11629 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11630 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11632 tw32(PCIE_TRANSACTION_CFG
,
11633 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11639 tg3_hwmon_open(tp
);
11641 tg3_full_lock(tp
, 0);
11643 tg3_timer_start(tp
);
11644 tg3_flag_set(tp
, INIT_COMPLETE
);
11645 tg3_enable_ints(tp
);
11647 tg3_ptp_resume(tp
);
11649 tg3_full_unlock(tp
);
11651 netif_tx_start_all_queues(dev
);
11654 * Reset loopback feature if it was turned on while the device was down
11655 * make sure that it's installed properly now.
11657 if (dev
->features
& NETIF_F_LOOPBACK
)
11658 tg3_set_loopback(dev
, dev
->features
);
11663 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11664 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11665 free_irq(tnapi
->irq_vec
, tnapi
);
11669 tg3_napi_disable(tp
);
11671 tg3_free_consistent(tp
);
11679 static void tg3_stop(struct tg3
*tp
)
11683 tg3_reset_task_cancel(tp
);
11684 tg3_netif_stop(tp
);
11686 tg3_timer_stop(tp
);
11688 tg3_hwmon_close(tp
);
11692 tg3_full_lock(tp
, 1);
11694 tg3_disable_ints(tp
);
11696 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11697 tg3_free_rings(tp
);
11698 tg3_flag_clear(tp
, INIT_COMPLETE
);
11700 tg3_full_unlock(tp
);
11702 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11703 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11704 free_irq(tnapi
->irq_vec
, tnapi
);
11711 tg3_free_consistent(tp
);
11714 static int tg3_open(struct net_device
*dev
)
11716 struct tg3
*tp
= netdev_priv(dev
);
11719 if (tp
->pcierr_recovery
) {
11720 netdev_err(dev
, "Failed to open device. PCI error recovery "
11725 if (tp
->fw_needed
) {
11726 err
= tg3_request_firmware(tp
);
11727 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11729 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11730 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11731 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11732 netdev_warn(tp
->dev
, "EEE capability restored\n");
11733 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11735 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11739 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11740 tg3_flag_clear(tp
, TSO_CAPABLE
);
11741 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11742 netdev_notice(tp
->dev
, "TSO capability restored\n");
11743 tg3_flag_set(tp
, TSO_CAPABLE
);
11747 tg3_carrier_off(tp
);
11749 err
= tg3_power_up(tp
);
11753 tg3_full_lock(tp
, 0);
11755 tg3_disable_ints(tp
);
11756 tg3_flag_clear(tp
, INIT_COMPLETE
);
11758 tg3_full_unlock(tp
);
11760 err
= tg3_start(tp
,
11761 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11764 tg3_frob_aux_power(tp
, false);
11765 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11771 static int tg3_close(struct net_device
*dev
)
11773 struct tg3
*tp
= netdev_priv(dev
);
11775 if (tp
->pcierr_recovery
) {
11776 netdev_err(dev
, "Failed to close device. PCI error recovery "
11783 if (pci_device_is_present(tp
->pdev
)) {
11784 tg3_power_down_prepare(tp
);
11786 tg3_carrier_off(tp
);
11791 static inline u64
get_stat64(tg3_stat64_t
*val
)
11793 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11796 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11798 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11800 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11801 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11802 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11805 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11806 tg3_writephy(tp
, MII_TG3_TEST1
,
11807 val
| MII_TG3_TEST1_CRC_EN
);
11808 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11812 tp
->phy_crc_errors
+= val
;
11814 return tp
->phy_crc_errors
;
11817 return get_stat64(&hw_stats
->rx_fcs_errors
);
11820 #define ESTAT_ADD(member) \
11821 estats->member = old_estats->member + \
11822 get_stat64(&hw_stats->member)
11824 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11826 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11827 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11829 ESTAT_ADD(rx_octets
);
11830 ESTAT_ADD(rx_fragments
);
11831 ESTAT_ADD(rx_ucast_packets
);
11832 ESTAT_ADD(rx_mcast_packets
);
11833 ESTAT_ADD(rx_bcast_packets
);
11834 ESTAT_ADD(rx_fcs_errors
);
11835 ESTAT_ADD(rx_align_errors
);
11836 ESTAT_ADD(rx_xon_pause_rcvd
);
11837 ESTAT_ADD(rx_xoff_pause_rcvd
);
11838 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11839 ESTAT_ADD(rx_xoff_entered
);
11840 ESTAT_ADD(rx_frame_too_long_errors
);
11841 ESTAT_ADD(rx_jabbers
);
11842 ESTAT_ADD(rx_undersize_packets
);
11843 ESTAT_ADD(rx_in_length_errors
);
11844 ESTAT_ADD(rx_out_length_errors
);
11845 ESTAT_ADD(rx_64_or_less_octet_packets
);
11846 ESTAT_ADD(rx_65_to_127_octet_packets
);
11847 ESTAT_ADD(rx_128_to_255_octet_packets
);
11848 ESTAT_ADD(rx_256_to_511_octet_packets
);
11849 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11850 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11851 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11852 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11853 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11854 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11856 ESTAT_ADD(tx_octets
);
11857 ESTAT_ADD(tx_collisions
);
11858 ESTAT_ADD(tx_xon_sent
);
11859 ESTAT_ADD(tx_xoff_sent
);
11860 ESTAT_ADD(tx_flow_control
);
11861 ESTAT_ADD(tx_mac_errors
);
11862 ESTAT_ADD(tx_single_collisions
);
11863 ESTAT_ADD(tx_mult_collisions
);
11864 ESTAT_ADD(tx_deferred
);
11865 ESTAT_ADD(tx_excessive_collisions
);
11866 ESTAT_ADD(tx_late_collisions
);
11867 ESTAT_ADD(tx_collide_2times
);
11868 ESTAT_ADD(tx_collide_3times
);
11869 ESTAT_ADD(tx_collide_4times
);
11870 ESTAT_ADD(tx_collide_5times
);
11871 ESTAT_ADD(tx_collide_6times
);
11872 ESTAT_ADD(tx_collide_7times
);
11873 ESTAT_ADD(tx_collide_8times
);
11874 ESTAT_ADD(tx_collide_9times
);
11875 ESTAT_ADD(tx_collide_10times
);
11876 ESTAT_ADD(tx_collide_11times
);
11877 ESTAT_ADD(tx_collide_12times
);
11878 ESTAT_ADD(tx_collide_13times
);
11879 ESTAT_ADD(tx_collide_14times
);
11880 ESTAT_ADD(tx_collide_15times
);
11881 ESTAT_ADD(tx_ucast_packets
);
11882 ESTAT_ADD(tx_mcast_packets
);
11883 ESTAT_ADD(tx_bcast_packets
);
11884 ESTAT_ADD(tx_carrier_sense_errors
);
11885 ESTAT_ADD(tx_discards
);
11886 ESTAT_ADD(tx_errors
);
11888 ESTAT_ADD(dma_writeq_full
);
11889 ESTAT_ADD(dma_write_prioq_full
);
11890 ESTAT_ADD(rxbds_empty
);
11891 ESTAT_ADD(rx_discards
);
11892 ESTAT_ADD(rx_errors
);
11893 ESTAT_ADD(rx_threshold_hit
);
11895 ESTAT_ADD(dma_readq_full
);
11896 ESTAT_ADD(dma_read_prioq_full
);
11897 ESTAT_ADD(tx_comp_queue_full
);
11899 ESTAT_ADD(ring_set_send_prod_index
);
11900 ESTAT_ADD(ring_status_update
);
11901 ESTAT_ADD(nic_irqs
);
11902 ESTAT_ADD(nic_avoided_irqs
);
11903 ESTAT_ADD(nic_tx_threshold_hit
);
11905 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11908 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11910 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11911 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11913 stats
->rx_packets
= old_stats
->rx_packets
+
11914 get_stat64(&hw_stats
->rx_ucast_packets
) +
11915 get_stat64(&hw_stats
->rx_mcast_packets
) +
11916 get_stat64(&hw_stats
->rx_bcast_packets
);
11918 stats
->tx_packets
= old_stats
->tx_packets
+
11919 get_stat64(&hw_stats
->tx_ucast_packets
) +
11920 get_stat64(&hw_stats
->tx_mcast_packets
) +
11921 get_stat64(&hw_stats
->tx_bcast_packets
);
11923 stats
->rx_bytes
= old_stats
->rx_bytes
+
11924 get_stat64(&hw_stats
->rx_octets
);
11925 stats
->tx_bytes
= old_stats
->tx_bytes
+
11926 get_stat64(&hw_stats
->tx_octets
);
11928 stats
->rx_errors
= old_stats
->rx_errors
+
11929 get_stat64(&hw_stats
->rx_errors
);
11930 stats
->tx_errors
= old_stats
->tx_errors
+
11931 get_stat64(&hw_stats
->tx_errors
) +
11932 get_stat64(&hw_stats
->tx_mac_errors
) +
11933 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11934 get_stat64(&hw_stats
->tx_discards
);
11936 stats
->multicast
= old_stats
->multicast
+
11937 get_stat64(&hw_stats
->rx_mcast_packets
);
11938 stats
->collisions
= old_stats
->collisions
+
11939 get_stat64(&hw_stats
->tx_collisions
);
11941 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11942 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11943 get_stat64(&hw_stats
->rx_undersize_packets
);
11945 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11946 get_stat64(&hw_stats
->rx_align_errors
);
11947 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11948 get_stat64(&hw_stats
->tx_discards
);
11949 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11950 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11952 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11953 tg3_calc_crc_errors(tp
);
11955 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11956 get_stat64(&hw_stats
->rx_discards
);
11958 stats
->rx_dropped
= tp
->rx_dropped
;
11959 stats
->tx_dropped
= tp
->tx_dropped
;
11962 static int tg3_get_regs_len(struct net_device
*dev
)
11964 return TG3_REG_BLK_SIZE
;
11967 static void tg3_get_regs(struct net_device
*dev
,
11968 struct ethtool_regs
*regs
, void *_p
)
11970 struct tg3
*tp
= netdev_priv(dev
);
11974 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11976 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11979 tg3_full_lock(tp
, 0);
11981 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11983 tg3_full_unlock(tp
);
11986 static int tg3_get_eeprom_len(struct net_device
*dev
)
11988 struct tg3
*tp
= netdev_priv(dev
);
11990 return tp
->nvram_size
;
11993 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11995 struct tg3
*tp
= netdev_priv(dev
);
11996 int ret
, cpmu_restore
= 0;
11998 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
12001 if (tg3_flag(tp
, NO_NVRAM
))
12004 offset
= eeprom
->offset
;
12008 eeprom
->magic
= TG3_EEPROM_MAGIC
;
12010 /* Override clock, link aware and link idle modes */
12011 if (tg3_flag(tp
, CPMU_PRESENT
)) {
12012 cpmu_val
= tr32(TG3_CPMU_CTRL
);
12013 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
12014 CPMU_CTRL_LINK_IDLE_MODE
)) {
12015 tw32(TG3_CPMU_CTRL
, cpmu_val
&
12016 ~(CPMU_CTRL_LINK_AWARE_MODE
|
12017 CPMU_CTRL_LINK_IDLE_MODE
));
12021 tg3_override_clk(tp
);
12024 /* adjustments to start on required 4 byte boundary */
12025 b_offset
= offset
& 3;
12026 b_count
= 4 - b_offset
;
12027 if (b_count
> len
) {
12028 /* i.e. offset=1 len=2 */
12031 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
12034 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
12037 eeprom
->len
+= b_count
;
12040 /* read bytes up to the last 4 byte boundary */
12041 pd
= &data
[eeprom
->len
];
12042 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
12043 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
12050 memcpy(pd
+ i
, &val
, 4);
12051 if (need_resched()) {
12052 if (signal_pending(current
)) {
12063 /* read last bytes not ending on 4 byte boundary */
12064 pd
= &data
[eeprom
->len
];
12066 b_offset
= offset
+ len
- b_count
;
12067 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12070 memcpy(pd
, &val
, b_count
);
12071 eeprom
->len
+= b_count
;
12076 /* Restore clock, link aware and link idle modes */
12077 tg3_restore_clk(tp
);
12079 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12084 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12086 struct tg3
*tp
= netdev_priv(dev
);
12088 u32 offset
, len
, b_offset
, odd_len
;
12090 __be32 start
= 0, end
;
12092 if (tg3_flag(tp
, NO_NVRAM
) ||
12093 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12096 offset
= eeprom
->offset
;
12099 if ((b_offset
= (offset
& 3))) {
12100 /* adjustments to start on required 4 byte boundary */
12101 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12112 /* adjustments to end on required 4 byte boundary */
12114 len
= (len
+ 3) & ~3;
12115 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12121 if (b_offset
|| odd_len
) {
12122 buf
= kmalloc(len
, GFP_KERNEL
);
12126 memcpy(buf
, &start
, 4);
12128 memcpy(buf
+len
-4, &end
, 4);
12129 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12132 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12140 static int tg3_get_link_ksettings(struct net_device
*dev
,
12141 struct ethtool_link_ksettings
*cmd
)
12143 struct tg3
*tp
= netdev_priv(dev
);
12144 u32 supported
, advertising
;
12146 if (tg3_flag(tp
, USE_PHYLIB
)) {
12147 struct phy_device
*phydev
;
12148 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12150 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12151 phy_ethtool_ksettings_get(phydev
, cmd
);
12156 supported
= (SUPPORTED_Autoneg
);
12158 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12159 supported
|= (SUPPORTED_1000baseT_Half
|
12160 SUPPORTED_1000baseT_Full
);
12162 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12163 supported
|= (SUPPORTED_100baseT_Half
|
12164 SUPPORTED_100baseT_Full
|
12165 SUPPORTED_10baseT_Half
|
12166 SUPPORTED_10baseT_Full
|
12168 cmd
->base
.port
= PORT_TP
;
12170 supported
|= SUPPORTED_FIBRE
;
12171 cmd
->base
.port
= PORT_FIBRE
;
12173 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12176 advertising
= tp
->link_config
.advertising
;
12177 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12178 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12179 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12180 advertising
|= ADVERTISED_Pause
;
12182 advertising
|= ADVERTISED_Pause
|
12183 ADVERTISED_Asym_Pause
;
12185 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12186 advertising
|= ADVERTISED_Asym_Pause
;
12189 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12192 if (netif_running(dev
) && tp
->link_up
) {
12193 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12194 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12195 ethtool_convert_legacy_u32_to_link_mode(
12196 cmd
->link_modes
.lp_advertising
,
12197 tp
->link_config
.rmt_adv
);
12199 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12200 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12201 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12203 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12206 cmd
->base
.speed
= SPEED_UNKNOWN
;
12207 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12208 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12210 cmd
->base
.phy_address
= tp
->phy_addr
;
12211 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12215 static int tg3_set_link_ksettings(struct net_device
*dev
,
12216 const struct ethtool_link_ksettings
*cmd
)
12218 struct tg3
*tp
= netdev_priv(dev
);
12219 u32 speed
= cmd
->base
.speed
;
12222 if (tg3_flag(tp
, USE_PHYLIB
)) {
12223 struct phy_device
*phydev
;
12224 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12226 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12227 return phy_ethtool_ksettings_set(phydev
, cmd
);
12230 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12231 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12234 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12235 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12236 cmd
->base
.duplex
!= DUPLEX_HALF
)
12239 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12240 cmd
->link_modes
.advertising
);
12242 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12243 u32 mask
= ADVERTISED_Autoneg
|
12245 ADVERTISED_Asym_Pause
;
12247 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12248 mask
|= ADVERTISED_1000baseT_Half
|
12249 ADVERTISED_1000baseT_Full
;
12251 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12252 mask
|= ADVERTISED_100baseT_Half
|
12253 ADVERTISED_100baseT_Full
|
12254 ADVERTISED_10baseT_Half
|
12255 ADVERTISED_10baseT_Full
|
12258 mask
|= ADVERTISED_FIBRE
;
12260 if (advertising
& ~mask
)
12263 mask
&= (ADVERTISED_1000baseT_Half
|
12264 ADVERTISED_1000baseT_Full
|
12265 ADVERTISED_100baseT_Half
|
12266 ADVERTISED_100baseT_Full
|
12267 ADVERTISED_10baseT_Half
|
12268 ADVERTISED_10baseT_Full
);
12270 advertising
&= mask
;
12272 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12273 if (speed
!= SPEED_1000
)
12276 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12279 if (speed
!= SPEED_100
&&
12285 tg3_full_lock(tp
, 0);
12287 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12288 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12289 tp
->link_config
.advertising
= (advertising
|
12290 ADVERTISED_Autoneg
);
12291 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12292 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12294 tp
->link_config
.advertising
= 0;
12295 tp
->link_config
.speed
= speed
;
12296 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12299 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12301 tg3_warn_mgmt_link_flap(tp
);
12303 if (netif_running(dev
))
12304 tg3_setup_phy(tp
, true);
12306 tg3_full_unlock(tp
);
12311 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12313 struct tg3
*tp
= netdev_priv(dev
);
12315 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12316 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12317 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12318 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12321 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12323 struct tg3
*tp
= netdev_priv(dev
);
12325 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12326 wol
->supported
= WAKE_MAGIC
;
12328 wol
->supported
= 0;
12330 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12331 wol
->wolopts
= WAKE_MAGIC
;
12332 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12335 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12337 struct tg3
*tp
= netdev_priv(dev
);
12338 struct device
*dp
= &tp
->pdev
->dev
;
12340 if (wol
->wolopts
& ~WAKE_MAGIC
)
12342 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12343 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12346 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12348 if (device_may_wakeup(dp
))
12349 tg3_flag_set(tp
, WOL_ENABLE
);
12351 tg3_flag_clear(tp
, WOL_ENABLE
);
12356 static u32
tg3_get_msglevel(struct net_device
*dev
)
12358 struct tg3
*tp
= netdev_priv(dev
);
12359 return tp
->msg_enable
;
12362 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12364 struct tg3
*tp
= netdev_priv(dev
);
12365 tp
->msg_enable
= value
;
12368 static int tg3_nway_reset(struct net_device
*dev
)
12370 struct tg3
*tp
= netdev_priv(dev
);
12373 if (!netif_running(dev
))
12376 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12379 tg3_warn_mgmt_link_flap(tp
);
12381 if (tg3_flag(tp
, USE_PHYLIB
)) {
12382 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12384 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12388 spin_lock_bh(&tp
->lock
);
12390 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12391 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12392 ((bmcr
& BMCR_ANENABLE
) ||
12393 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12394 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12398 spin_unlock_bh(&tp
->lock
);
12404 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12406 struct tg3
*tp
= netdev_priv(dev
);
12408 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12409 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12410 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12412 ering
->rx_jumbo_max_pending
= 0;
12414 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12416 ering
->rx_pending
= tp
->rx_pending
;
12417 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12418 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12420 ering
->rx_jumbo_pending
= 0;
12422 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12425 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12427 struct tg3
*tp
= netdev_priv(dev
);
12428 int i
, irq_sync
= 0, err
= 0;
12429 bool reset_phy
= false;
12431 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12432 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12433 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12434 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12435 (tg3_flag(tp
, TSO_BUG
) &&
12436 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12439 if (netif_running(dev
)) {
12441 tg3_netif_stop(tp
);
12445 tg3_full_lock(tp
, irq_sync
);
12447 tp
->rx_pending
= ering
->rx_pending
;
12449 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12450 tp
->rx_pending
> 63)
12451 tp
->rx_pending
= 63;
12453 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12454 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12456 for (i
= 0; i
< tp
->irq_max
; i
++)
12457 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12459 if (netif_running(dev
)) {
12460 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12461 /* Reset PHY to avoid PHY lock up */
12462 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12463 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12464 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12467 err
= tg3_restart_hw(tp
, reset_phy
);
12469 tg3_netif_start(tp
);
12472 tg3_full_unlock(tp
);
12474 if (irq_sync
&& !err
)
12480 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12482 struct tg3
*tp
= netdev_priv(dev
);
12484 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12486 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12487 epause
->rx_pause
= 1;
12489 epause
->rx_pause
= 0;
12491 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12492 epause
->tx_pause
= 1;
12494 epause
->tx_pause
= 0;
12497 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12499 struct tg3
*tp
= netdev_priv(dev
);
12501 bool reset_phy
= false;
12503 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12504 tg3_warn_mgmt_link_flap(tp
);
12506 if (tg3_flag(tp
, USE_PHYLIB
)) {
12508 struct phy_device
*phydev
;
12510 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12512 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12513 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12514 (epause
->rx_pause
!= epause
->tx_pause
)))
12517 tp
->link_config
.flowctrl
= 0;
12518 if (epause
->rx_pause
) {
12519 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12521 if (epause
->tx_pause
) {
12522 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12523 newadv
= ADVERTISED_Pause
;
12525 newadv
= ADVERTISED_Pause
|
12526 ADVERTISED_Asym_Pause
;
12527 } else if (epause
->tx_pause
) {
12528 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12529 newadv
= ADVERTISED_Asym_Pause
;
12533 if (epause
->autoneg
)
12534 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12536 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12538 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12539 u32 oldadv
= phydev
->advertising
&
12540 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12541 if (oldadv
!= newadv
) {
12542 phydev
->advertising
&=
12543 ~(ADVERTISED_Pause
|
12544 ADVERTISED_Asym_Pause
);
12545 phydev
->advertising
|= newadv
;
12546 if (phydev
->autoneg
) {
12548 * Always renegotiate the link to
12549 * inform our link partner of our
12550 * flow control settings, even if the
12551 * flow control is forced. Let
12552 * tg3_adjust_link() do the final
12553 * flow control setup.
12555 return phy_start_aneg(phydev
);
12559 if (!epause
->autoneg
)
12560 tg3_setup_flow_control(tp
, 0, 0);
12562 tp
->link_config
.advertising
&=
12563 ~(ADVERTISED_Pause
|
12564 ADVERTISED_Asym_Pause
);
12565 tp
->link_config
.advertising
|= newadv
;
12570 if (netif_running(dev
)) {
12571 tg3_netif_stop(tp
);
12575 tg3_full_lock(tp
, irq_sync
);
12577 if (epause
->autoneg
)
12578 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12580 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12581 if (epause
->rx_pause
)
12582 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12584 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12585 if (epause
->tx_pause
)
12586 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12588 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12590 if (netif_running(dev
)) {
12591 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12592 /* Reset PHY to avoid PHY lock up */
12593 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12594 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12595 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12598 err
= tg3_restart_hw(tp
, reset_phy
);
12600 tg3_netif_start(tp
);
12603 tg3_full_unlock(tp
);
12606 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12611 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12615 return TG3_NUM_TEST
;
12617 return TG3_NUM_STATS
;
12619 return -EOPNOTSUPP
;
12623 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12624 u32
*rules __always_unused
)
12626 struct tg3
*tp
= netdev_priv(dev
);
12628 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12629 return -EOPNOTSUPP
;
12631 switch (info
->cmd
) {
12632 case ETHTOOL_GRXRINGS
:
12633 if (netif_running(tp
->dev
))
12634 info
->data
= tp
->rxq_cnt
;
12636 info
->data
= num_online_cpus();
12637 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12638 info
->data
= TG3_RSS_MAX_NUM_QS
;
12644 return -EOPNOTSUPP
;
12648 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12651 struct tg3
*tp
= netdev_priv(dev
);
12653 if (tg3_flag(tp
, SUPPORT_MSIX
))
12654 size
= TG3_RSS_INDIR_TBL_SIZE
;
12659 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12661 struct tg3
*tp
= netdev_priv(dev
);
12665 *hfunc
= ETH_RSS_HASH_TOP
;
12669 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12670 indir
[i
] = tp
->rss_ind_tbl
[i
];
12675 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12678 struct tg3
*tp
= netdev_priv(dev
);
12681 /* We require at least one supported parameter to be changed and no
12682 * change in any of the unsupported parameters
12685 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12686 return -EOPNOTSUPP
;
12691 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12692 tp
->rss_ind_tbl
[i
] = indir
[i
];
12694 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12697 /* It is legal to write the indirection
12698 * table while the device is running.
12700 tg3_full_lock(tp
, 0);
12701 tg3_rss_write_indir_tbl(tp
);
12702 tg3_full_unlock(tp
);
12707 static void tg3_get_channels(struct net_device
*dev
,
12708 struct ethtool_channels
*channel
)
12710 struct tg3
*tp
= netdev_priv(dev
);
12711 u32 deflt_qs
= netif_get_num_default_rss_queues();
12713 channel
->max_rx
= tp
->rxq_max
;
12714 channel
->max_tx
= tp
->txq_max
;
12716 if (netif_running(dev
)) {
12717 channel
->rx_count
= tp
->rxq_cnt
;
12718 channel
->tx_count
= tp
->txq_cnt
;
12721 channel
->rx_count
= tp
->rxq_req
;
12723 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12726 channel
->tx_count
= tp
->txq_req
;
12728 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12732 static int tg3_set_channels(struct net_device
*dev
,
12733 struct ethtool_channels
*channel
)
12735 struct tg3
*tp
= netdev_priv(dev
);
12737 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12738 return -EOPNOTSUPP
;
12740 if (channel
->rx_count
> tp
->rxq_max
||
12741 channel
->tx_count
> tp
->txq_max
)
12744 tp
->rxq_req
= channel
->rx_count
;
12745 tp
->txq_req
= channel
->tx_count
;
12747 if (!netif_running(dev
))
12752 tg3_carrier_off(tp
);
12754 tg3_start(tp
, true, false, false);
12759 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12761 switch (stringset
) {
12763 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12766 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12769 WARN_ON(1); /* we need a WARN() */
12774 static int tg3_set_phys_id(struct net_device
*dev
,
12775 enum ethtool_phys_id_state state
)
12777 struct tg3
*tp
= netdev_priv(dev
);
12779 if (!netif_running(tp
->dev
))
12783 case ETHTOOL_ID_ACTIVE
:
12784 return 1; /* cycle on/off once per second */
12786 case ETHTOOL_ID_ON
:
12787 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12788 LED_CTRL_1000MBPS_ON
|
12789 LED_CTRL_100MBPS_ON
|
12790 LED_CTRL_10MBPS_ON
|
12791 LED_CTRL_TRAFFIC_OVERRIDE
|
12792 LED_CTRL_TRAFFIC_BLINK
|
12793 LED_CTRL_TRAFFIC_LED
);
12796 case ETHTOOL_ID_OFF
:
12797 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12798 LED_CTRL_TRAFFIC_OVERRIDE
);
12801 case ETHTOOL_ID_INACTIVE
:
12802 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12809 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12810 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12812 struct tg3
*tp
= netdev_priv(dev
);
12815 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12817 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12820 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12824 u32 offset
= 0, len
= 0;
12827 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12830 if (magic
== TG3_EEPROM_MAGIC
) {
12831 for (offset
= TG3_NVM_DIR_START
;
12832 offset
< TG3_NVM_DIR_END
;
12833 offset
+= TG3_NVM_DIRENT_SIZE
) {
12834 if (tg3_nvram_read(tp
, offset
, &val
))
12837 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12838 TG3_NVM_DIRTYPE_EXTVPD
)
12842 if (offset
!= TG3_NVM_DIR_END
) {
12843 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12844 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12847 offset
= tg3_nvram_logical_addr(tp
, offset
);
12851 if (!offset
|| !len
) {
12852 offset
= TG3_NVM_VPD_OFF
;
12853 len
= TG3_NVM_VPD_LEN
;
12856 buf
= kmalloc(len
, GFP_KERNEL
);
12860 if (magic
== TG3_EEPROM_MAGIC
) {
12861 for (i
= 0; i
< len
; i
+= 4) {
12862 /* The data is in little-endian format in NVRAM.
12863 * Use the big-endian read routines to preserve
12864 * the byte order as it exists in NVRAM.
12866 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12872 unsigned int pos
= 0;
12874 ptr
= (u8
*)&buf
[0];
12875 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12876 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12878 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12896 #define NVRAM_TEST_SIZE 0x100
12897 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12898 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12899 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12900 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12901 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12902 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12903 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12904 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12906 static int tg3_test_nvram(struct tg3
*tp
)
12908 u32 csum
, magic
, len
;
12910 int i
, j
, k
, err
= 0, size
;
12912 if (tg3_flag(tp
, NO_NVRAM
))
12915 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12918 if (magic
== TG3_EEPROM_MAGIC
)
12919 size
= NVRAM_TEST_SIZE
;
12920 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12921 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12922 TG3_EEPROM_SB_FORMAT_1
) {
12923 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12924 case TG3_EEPROM_SB_REVISION_0
:
12925 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12927 case TG3_EEPROM_SB_REVISION_2
:
12928 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12930 case TG3_EEPROM_SB_REVISION_3
:
12931 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12933 case TG3_EEPROM_SB_REVISION_4
:
12934 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12936 case TG3_EEPROM_SB_REVISION_5
:
12937 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12939 case TG3_EEPROM_SB_REVISION_6
:
12940 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12947 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12948 size
= NVRAM_SELFBOOT_HW_SIZE
;
12952 buf
= kmalloc(size
, GFP_KERNEL
);
12957 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12958 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12965 /* Selfboot format */
12966 magic
= be32_to_cpu(buf
[0]);
12967 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12968 TG3_EEPROM_MAGIC_FW
) {
12969 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12971 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12972 TG3_EEPROM_SB_REVISION_2
) {
12973 /* For rev 2, the csum doesn't include the MBA. */
12974 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12976 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12979 for (i
= 0; i
< size
; i
++)
12992 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12993 TG3_EEPROM_MAGIC_HW
) {
12994 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12995 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12996 u8
*buf8
= (u8
*) buf
;
12998 /* Separate the parity bits and the data bytes. */
12999 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
13000 if ((i
== 0) || (i
== 8)) {
13004 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
13005 parity
[k
++] = buf8
[i
] & msk
;
13007 } else if (i
== 16) {
13011 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
13012 parity
[k
++] = buf8
[i
] & msk
;
13015 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
13016 parity
[k
++] = buf8
[i
] & msk
;
13019 data
[j
++] = buf8
[i
];
13023 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
13024 u8 hw8
= hweight8(data
[i
]);
13026 if ((hw8
& 0x1) && parity
[i
])
13028 else if (!(hw8
& 0x1) && !parity
[i
])
13037 /* Bootstrap checksum at offset 0x10 */
13038 csum
= calc_crc((unsigned char *) buf
, 0x10);
13039 if (csum
!= le32_to_cpu(buf
[0x10/4]))
13042 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13043 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
13044 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
13049 buf
= tg3_vpd_readblock(tp
, &len
);
13053 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
13055 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
13059 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
13062 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13063 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
13064 PCI_VPD_RO_KEYWORD_CHKSUM
);
13068 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13070 for (i
= 0; i
<= j
; i
++)
13071 csum8
+= ((u8
*)buf
)[i
];
13085 #define TG3_SERDES_TIMEOUT_SEC 2
13086 #define TG3_COPPER_TIMEOUT_SEC 6
13088 static int tg3_test_link(struct tg3
*tp
)
13092 if (!netif_running(tp
->dev
))
13095 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13096 max
= TG3_SERDES_TIMEOUT_SEC
;
13098 max
= TG3_COPPER_TIMEOUT_SEC
;
13100 for (i
= 0; i
< max
; i
++) {
13104 if (msleep_interruptible(1000))
13111 /* Only test the commonly used registers */
13112 static int tg3_test_registers(struct tg3
*tp
)
13114 int i
, is_5705
, is_5750
;
13115 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13119 #define TG3_FL_5705 0x1
13120 #define TG3_FL_NOT_5705 0x2
13121 #define TG3_FL_NOT_5788 0x4
13122 #define TG3_FL_NOT_5750 0x8
13126 /* MAC Control Registers */
13127 { MAC_MODE
, TG3_FL_NOT_5705
,
13128 0x00000000, 0x00ef6f8c },
13129 { MAC_MODE
, TG3_FL_5705
,
13130 0x00000000, 0x01ef6b8c },
13131 { MAC_STATUS
, TG3_FL_NOT_5705
,
13132 0x03800107, 0x00000000 },
13133 { MAC_STATUS
, TG3_FL_5705
,
13134 0x03800100, 0x00000000 },
13135 { MAC_ADDR_0_HIGH
, 0x0000,
13136 0x00000000, 0x0000ffff },
13137 { MAC_ADDR_0_LOW
, 0x0000,
13138 0x00000000, 0xffffffff },
13139 { MAC_RX_MTU_SIZE
, 0x0000,
13140 0x00000000, 0x0000ffff },
13141 { MAC_TX_MODE
, 0x0000,
13142 0x00000000, 0x00000070 },
13143 { MAC_TX_LENGTHS
, 0x0000,
13144 0x00000000, 0x00003fff },
13145 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13146 0x00000000, 0x000007fc },
13147 { MAC_RX_MODE
, TG3_FL_5705
,
13148 0x00000000, 0x000007dc },
13149 { MAC_HASH_REG_0
, 0x0000,
13150 0x00000000, 0xffffffff },
13151 { MAC_HASH_REG_1
, 0x0000,
13152 0x00000000, 0xffffffff },
13153 { MAC_HASH_REG_2
, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { MAC_HASH_REG_3
, 0x0000,
13156 0x00000000, 0xffffffff },
13158 /* Receive Data and Receive BD Initiator Control Registers. */
13159 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13160 0x00000000, 0xffffffff },
13161 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13162 0x00000000, 0xffffffff },
13163 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13164 0x00000000, 0x00000003 },
13165 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13166 0x00000000, 0xffffffff },
13167 { RCVDBDI_STD_BD
+0, 0x0000,
13168 0x00000000, 0xffffffff },
13169 { RCVDBDI_STD_BD
+4, 0x0000,
13170 0x00000000, 0xffffffff },
13171 { RCVDBDI_STD_BD
+8, 0x0000,
13172 0x00000000, 0xffff0002 },
13173 { RCVDBDI_STD_BD
+0xc, 0x0000,
13174 0x00000000, 0xffffffff },
13176 /* Receive BD Initiator Control Registers. */
13177 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13178 0x00000000, 0xffffffff },
13179 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13180 0x00000000, 0x000003ff },
13181 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13182 0x00000000, 0xffffffff },
13184 /* Host Coalescing Control Registers. */
13185 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13186 0x00000000, 0x00000004 },
13187 { HOSTCC_MODE
, TG3_FL_5705
,
13188 0x00000000, 0x000000f6 },
13189 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13190 0x00000000, 0xffffffff },
13191 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13192 0x00000000, 0x000003ff },
13193 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13194 0x00000000, 0xffffffff },
13195 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13196 0x00000000, 0x000003ff },
13197 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13198 0x00000000, 0xffffffff },
13199 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13200 0x00000000, 0x000000ff },
13201 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13202 0x00000000, 0xffffffff },
13203 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13204 0x00000000, 0x000000ff },
13205 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13206 0x00000000, 0xffffffff },
13207 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13208 0x00000000, 0xffffffff },
13209 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13210 0x00000000, 0xffffffff },
13211 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13212 0x00000000, 0x000000ff },
13213 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13214 0x00000000, 0xffffffff },
13215 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13216 0x00000000, 0x000000ff },
13217 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13218 0x00000000, 0xffffffff },
13219 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13220 0x00000000, 0xffffffff },
13221 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13222 0x00000000, 0xffffffff },
13223 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13224 0x00000000, 0xffffffff },
13225 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13226 0x00000000, 0xffffffff },
13227 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13228 0xffffffff, 0x00000000 },
13229 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13230 0xffffffff, 0x00000000 },
13232 /* Buffer Manager Control Registers. */
13233 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13234 0x00000000, 0x007fff80 },
13235 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13236 0x00000000, 0x007fffff },
13237 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13238 0x00000000, 0x0000003f },
13239 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13240 0x00000000, 0x000001ff },
13241 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13242 0x00000000, 0x000001ff },
13243 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13244 0xffffffff, 0x00000000 },
13245 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13246 0xffffffff, 0x00000000 },
13248 /* Mailbox Registers */
13249 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13250 0x00000000, 0x000001ff },
13251 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13252 0x00000000, 0x000001ff },
13253 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13254 0x00000000, 0x000007ff },
13255 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13256 0x00000000, 0x000001ff },
13258 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13261 is_5705
= is_5750
= 0;
13262 if (tg3_flag(tp
, 5705_PLUS
)) {
13264 if (tg3_flag(tp
, 5750_PLUS
))
13268 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13269 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13272 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13275 if (tg3_flag(tp
, IS_5788
) &&
13276 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13279 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13282 offset
= (u32
) reg_tbl
[i
].offset
;
13283 read_mask
= reg_tbl
[i
].read_mask
;
13284 write_mask
= reg_tbl
[i
].write_mask
;
13286 /* Save the original register content */
13287 save_val
= tr32(offset
);
13289 /* Determine the read-only value. */
13290 read_val
= save_val
& read_mask
;
13292 /* Write zero to the register, then make sure the read-only bits
13293 * are not changed and the read/write bits are all zeros.
13297 val
= tr32(offset
);
13299 /* Test the read-only and read/write bits. */
13300 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13303 /* Write ones to all the bits defined by RdMask and WrMask, then
13304 * make sure the read-only bits are not changed and the
13305 * read/write bits are all ones.
13307 tw32(offset
, read_mask
| write_mask
);
13309 val
= tr32(offset
);
13311 /* Test the read-only bits. */
13312 if ((val
& read_mask
) != read_val
)
13315 /* Test the read/write bits. */
13316 if ((val
& write_mask
) != write_mask
)
13319 tw32(offset
, save_val
);
13325 if (netif_msg_hw(tp
))
13326 netdev_err(tp
->dev
,
13327 "Register test failed at offset %x\n", offset
);
13328 tw32(offset
, save_val
);
13332 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13334 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13338 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13339 for (j
= 0; j
< len
; j
+= 4) {
13342 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13343 tg3_read_mem(tp
, offset
+ j
, &val
);
13344 if (val
!= test_pattern
[i
])
13351 static int tg3_test_memory(struct tg3
*tp
)
13353 static struct mem_entry
{
13356 } mem_tbl_570x
[] = {
13357 { 0x00000000, 0x00b50},
13358 { 0x00002000, 0x1c000},
13359 { 0xffffffff, 0x00000}
13360 }, mem_tbl_5705
[] = {
13361 { 0x00000100, 0x0000c},
13362 { 0x00000200, 0x00008},
13363 { 0x00004000, 0x00800},
13364 { 0x00006000, 0x01000},
13365 { 0x00008000, 0x02000},
13366 { 0x00010000, 0x0e000},
13367 { 0xffffffff, 0x00000}
13368 }, mem_tbl_5755
[] = {
13369 { 0x00000200, 0x00008},
13370 { 0x00004000, 0x00800},
13371 { 0x00006000, 0x00800},
13372 { 0x00008000, 0x02000},
13373 { 0x00010000, 0x0c000},
13374 { 0xffffffff, 0x00000}
13375 }, mem_tbl_5906
[] = {
13376 { 0x00000200, 0x00008},
13377 { 0x00004000, 0x00400},
13378 { 0x00006000, 0x00400},
13379 { 0x00008000, 0x01000},
13380 { 0x00010000, 0x01000},
13381 { 0xffffffff, 0x00000}
13382 }, mem_tbl_5717
[] = {
13383 { 0x00000200, 0x00008},
13384 { 0x00010000, 0x0a000},
13385 { 0x00020000, 0x13c00},
13386 { 0xffffffff, 0x00000}
13387 }, mem_tbl_57765
[] = {
13388 { 0x00000200, 0x00008},
13389 { 0x00004000, 0x00800},
13390 { 0x00006000, 0x09800},
13391 { 0x00010000, 0x0a000},
13392 { 0xffffffff, 0x00000}
13394 struct mem_entry
*mem_tbl
;
13398 if (tg3_flag(tp
, 5717_PLUS
))
13399 mem_tbl
= mem_tbl_5717
;
13400 else if (tg3_flag(tp
, 57765_CLASS
) ||
13401 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13402 mem_tbl
= mem_tbl_57765
;
13403 else if (tg3_flag(tp
, 5755_PLUS
))
13404 mem_tbl
= mem_tbl_5755
;
13405 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13406 mem_tbl
= mem_tbl_5906
;
13407 else if (tg3_flag(tp
, 5705_PLUS
))
13408 mem_tbl
= mem_tbl_5705
;
13410 mem_tbl
= mem_tbl_570x
;
13412 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13413 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13421 #define TG3_TSO_MSS 500
13423 #define TG3_TSO_IP_HDR_LEN 20
13424 #define TG3_TSO_TCP_HDR_LEN 20
13425 #define TG3_TSO_TCP_OPT_LEN 12
13427 static const u8 tg3_tso_header
[] = {
13429 0x45, 0x00, 0x00, 0x00,
13430 0x00, 0x00, 0x40, 0x00,
13431 0x40, 0x06, 0x00, 0x00,
13432 0x0a, 0x00, 0x00, 0x01,
13433 0x0a, 0x00, 0x00, 0x02,
13434 0x0d, 0x00, 0xe0, 0x00,
13435 0x00, 0x00, 0x01, 0x00,
13436 0x00, 0x00, 0x02, 0x00,
13437 0x80, 0x10, 0x10, 0x00,
13438 0x14, 0x09, 0x00, 0x00,
13439 0x01, 0x01, 0x08, 0x0a,
13440 0x11, 0x11, 0x11, 0x11,
13441 0x11, 0x11, 0x11, 0x11,
13444 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13446 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13447 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13449 struct sk_buff
*skb
;
13450 u8
*tx_data
, *rx_data
;
13452 int num_pkts
, tx_len
, rx_len
, i
, err
;
13453 struct tg3_rx_buffer_desc
*desc
;
13454 struct tg3_napi
*tnapi
, *rnapi
;
13455 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13457 tnapi
= &tp
->napi
[0];
13458 rnapi
= &tp
->napi
[0];
13459 if (tp
->irq_cnt
> 1) {
13460 if (tg3_flag(tp
, ENABLE_RSS
))
13461 rnapi
= &tp
->napi
[1];
13462 if (tg3_flag(tp
, ENABLE_TSS
))
13463 tnapi
= &tp
->napi
[1];
13465 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13470 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13474 tx_data
= skb_put(skb
, tx_len
);
13475 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13476 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13478 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13480 if (tso_loopback
) {
13481 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13483 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13484 TG3_TSO_TCP_OPT_LEN
;
13486 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13487 sizeof(tg3_tso_header
));
13490 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13491 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13493 /* Set the total length field in the IP header */
13494 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13496 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13497 TXD_FLAG_CPU_POST_DMA
);
13499 if (tg3_flag(tp
, HW_TSO_1
) ||
13500 tg3_flag(tp
, HW_TSO_2
) ||
13501 tg3_flag(tp
, HW_TSO_3
)) {
13503 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13504 th
= (struct tcphdr
*)&tx_data
[val
];
13507 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13509 if (tg3_flag(tp
, HW_TSO_3
)) {
13510 mss
|= (hdr_len
& 0xc) << 12;
13511 if (hdr_len
& 0x10)
13512 base_flags
|= 0x00000010;
13513 base_flags
|= (hdr_len
& 0x3e0) << 5;
13514 } else if (tg3_flag(tp
, HW_TSO_2
))
13515 mss
|= hdr_len
<< 9;
13516 else if (tg3_flag(tp
, HW_TSO_1
) ||
13517 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13518 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13520 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13523 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13526 data_off
= ETH_HLEN
;
13528 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13529 tx_len
> VLAN_ETH_FRAME_LEN
)
13530 base_flags
|= TXD_FLAG_JMB_PKT
;
13533 for (i
= data_off
; i
< tx_len
; i
++)
13534 tx_data
[i
] = (u8
) (i
& 0xff);
13536 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13537 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13538 dev_kfree_skb(skb
);
13542 val
= tnapi
->tx_prod
;
13543 tnapi
->tx_buffers
[val
].skb
= skb
;
13544 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13546 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13551 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13553 budget
= tg3_tx_avail(tnapi
);
13554 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13555 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13556 tnapi
->tx_buffers
[val
].skb
= NULL
;
13557 dev_kfree_skb(skb
);
13563 /* Sync BD data before updating mailbox */
13566 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13567 tr32_mailbox(tnapi
->prodmbox
);
13571 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13572 for (i
= 0; i
< 35; i
++) {
13573 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13578 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13579 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13580 if ((tx_idx
== tnapi
->tx_prod
) &&
13581 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13585 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13586 dev_kfree_skb(skb
);
13588 if (tx_idx
!= tnapi
->tx_prod
)
13591 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13595 while (rx_idx
!= rx_start_idx
) {
13596 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13597 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13598 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13600 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13601 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13604 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13607 if (!tso_loopback
) {
13608 if (rx_len
!= tx_len
)
13611 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13612 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13615 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13618 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13619 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13620 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13624 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13625 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13626 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13628 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13629 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13630 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13635 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13636 PCI_DMA_FROMDEVICE
);
13638 rx_data
+= TG3_RX_OFFSET(tp
);
13639 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13640 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13647 /* tg3_free_rings will unmap and free the rx_data */
13652 #define TG3_STD_LOOPBACK_FAILED 1
13653 #define TG3_JMB_LOOPBACK_FAILED 2
13654 #define TG3_TSO_LOOPBACK_FAILED 4
13655 #define TG3_LOOPBACK_FAILED \
13656 (TG3_STD_LOOPBACK_FAILED | \
13657 TG3_JMB_LOOPBACK_FAILED | \
13658 TG3_TSO_LOOPBACK_FAILED)
13660 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13664 u32 jmb_pkt_sz
= 9000;
13667 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13669 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13670 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13672 if (!netif_running(tp
->dev
)) {
13673 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13674 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13676 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13680 err
= tg3_reset_hw(tp
, true);
13682 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13683 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13685 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13689 if (tg3_flag(tp
, ENABLE_RSS
)) {
13692 /* Reroute all rx packets to the 1st queue */
13693 for (i
= MAC_RSS_INDIR_TBL_0
;
13694 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13698 /* HW errata - mac loopback fails in some cases on 5780.
13699 * Normal traffic and PHY loopback are not affected by
13700 * errata. Also, the MAC loopback test is deprecated for
13701 * all newer ASIC revisions.
13703 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13704 !tg3_flag(tp
, CPMU_PRESENT
)) {
13705 tg3_mac_loopback(tp
, true);
13707 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13708 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13710 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13711 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13712 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13714 tg3_mac_loopback(tp
, false);
13717 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13718 !tg3_flag(tp
, USE_PHYLIB
)) {
13721 tg3_phy_lpbk_set(tp
, 0, false);
13723 /* Wait for link */
13724 for (i
= 0; i
< 100; i
++) {
13725 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13730 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13731 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13732 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13733 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13734 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13735 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13736 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13737 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13740 tg3_phy_lpbk_set(tp
, 0, true);
13742 /* All link indications report up, but the hardware
13743 * isn't really ready for about 20 msec. Double it
13748 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13749 data
[TG3_EXT_LOOPB_TEST
] |=
13750 TG3_STD_LOOPBACK_FAILED
;
13751 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13752 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13753 data
[TG3_EXT_LOOPB_TEST
] |=
13754 TG3_TSO_LOOPBACK_FAILED
;
13755 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13756 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13757 data
[TG3_EXT_LOOPB_TEST
] |=
13758 TG3_JMB_LOOPBACK_FAILED
;
13761 /* Re-enable gphy autopowerdown. */
13762 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13763 tg3_phy_toggle_apd(tp
, true);
13766 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13767 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13770 tp
->phy_flags
|= eee_cap
;
13775 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13778 struct tg3
*tp
= netdev_priv(dev
);
13779 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13781 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13782 if (tg3_power_up(tp
)) {
13783 etest
->flags
|= ETH_TEST_FL_FAILED
;
13784 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13787 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13790 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13792 if (tg3_test_nvram(tp
) != 0) {
13793 etest
->flags
|= ETH_TEST_FL_FAILED
;
13794 data
[TG3_NVRAM_TEST
] = 1;
13796 if (!doextlpbk
&& tg3_test_link(tp
)) {
13797 etest
->flags
|= ETH_TEST_FL_FAILED
;
13798 data
[TG3_LINK_TEST
] = 1;
13800 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13801 int err
, err2
= 0, irq_sync
= 0;
13803 if (netif_running(dev
)) {
13805 tg3_netif_stop(tp
);
13809 tg3_full_lock(tp
, irq_sync
);
13810 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13811 err
= tg3_nvram_lock(tp
);
13812 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13813 if (!tg3_flag(tp
, 5705_PLUS
))
13814 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13816 tg3_nvram_unlock(tp
);
13818 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13821 if (tg3_test_registers(tp
) != 0) {
13822 etest
->flags
|= ETH_TEST_FL_FAILED
;
13823 data
[TG3_REGISTER_TEST
] = 1;
13826 if (tg3_test_memory(tp
) != 0) {
13827 etest
->flags
|= ETH_TEST_FL_FAILED
;
13828 data
[TG3_MEMORY_TEST
] = 1;
13832 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13834 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13835 etest
->flags
|= ETH_TEST_FL_FAILED
;
13837 tg3_full_unlock(tp
);
13839 if (tg3_test_interrupt(tp
) != 0) {
13840 etest
->flags
|= ETH_TEST_FL_FAILED
;
13841 data
[TG3_INTERRUPT_TEST
] = 1;
13844 tg3_full_lock(tp
, 0);
13846 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13847 if (netif_running(dev
)) {
13848 tg3_flag_set(tp
, INIT_COMPLETE
);
13849 err2
= tg3_restart_hw(tp
, true);
13851 tg3_netif_start(tp
);
13854 tg3_full_unlock(tp
);
13856 if (irq_sync
&& !err2
)
13859 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13860 tg3_power_down_prepare(tp
);
13864 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13866 struct tg3
*tp
= netdev_priv(dev
);
13867 struct hwtstamp_config stmpconf
;
13869 if (!tg3_flag(tp
, PTP_CAPABLE
))
13870 return -EOPNOTSUPP
;
13872 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13875 if (stmpconf
.flags
)
13878 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13879 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13882 switch (stmpconf
.rx_filter
) {
13883 case HWTSTAMP_FILTER_NONE
:
13886 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13887 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13888 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13890 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13891 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13892 TG3_RX_PTP_CTL_SYNC_EVNT
;
13894 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13895 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13896 TG3_RX_PTP_CTL_DELAY_REQ
;
13898 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13899 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13900 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13902 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13903 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13904 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13906 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13907 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13908 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13910 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13911 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13912 TG3_RX_PTP_CTL_SYNC_EVNT
;
13914 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13915 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13916 TG3_RX_PTP_CTL_SYNC_EVNT
;
13918 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13919 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13920 TG3_RX_PTP_CTL_SYNC_EVNT
;
13922 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13923 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13924 TG3_RX_PTP_CTL_DELAY_REQ
;
13926 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13927 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13928 TG3_RX_PTP_CTL_DELAY_REQ
;
13930 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13931 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13932 TG3_RX_PTP_CTL_DELAY_REQ
;
13938 if (netif_running(dev
) && tp
->rxptpctl
)
13939 tw32(TG3_RX_PTP_CTL
,
13940 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13942 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13943 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13945 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13947 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13951 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13953 struct tg3
*tp
= netdev_priv(dev
);
13954 struct hwtstamp_config stmpconf
;
13956 if (!tg3_flag(tp
, PTP_CAPABLE
))
13957 return -EOPNOTSUPP
;
13959 stmpconf
.flags
= 0;
13960 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13961 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13963 switch (tp
->rxptpctl
) {
13965 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13967 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13968 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13970 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13971 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13973 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13974 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13976 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13977 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13979 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13980 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13982 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13983 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13985 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13986 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13988 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13989 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13991 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13992 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13994 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13995 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13997 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13998 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
14000 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
14001 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
14008 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
14012 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
14014 struct mii_ioctl_data
*data
= if_mii(ifr
);
14015 struct tg3
*tp
= netdev_priv(dev
);
14018 if (tg3_flag(tp
, USE_PHYLIB
)) {
14019 struct phy_device
*phydev
;
14020 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
14022 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
14023 return phy_mii_ioctl(phydev
, ifr
, cmd
);
14028 data
->phy_id
= tp
->phy_addr
;
14031 case SIOCGMIIREG
: {
14034 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14035 break; /* We have no PHY */
14037 if (!netif_running(dev
))
14040 spin_lock_bh(&tp
->lock
);
14041 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
14042 data
->reg_num
& 0x1f, &mii_regval
);
14043 spin_unlock_bh(&tp
->lock
);
14045 data
->val_out
= mii_regval
;
14051 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14052 break; /* We have no PHY */
14054 if (!netif_running(dev
))
14057 spin_lock_bh(&tp
->lock
);
14058 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
14059 data
->reg_num
& 0x1f, data
->val_in
);
14060 spin_unlock_bh(&tp
->lock
);
14064 case SIOCSHWTSTAMP
:
14065 return tg3_hwtstamp_set(dev
, ifr
);
14067 case SIOCGHWTSTAMP
:
14068 return tg3_hwtstamp_get(dev
, ifr
);
14074 return -EOPNOTSUPP
;
14077 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14079 struct tg3
*tp
= netdev_priv(dev
);
14081 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14085 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14087 struct tg3
*tp
= netdev_priv(dev
);
14088 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14089 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14091 if (!tg3_flag(tp
, 5705_PLUS
)) {
14092 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14093 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14094 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14095 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14098 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14099 (!ec
->rx_coalesce_usecs
) ||
14100 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14101 (!ec
->tx_coalesce_usecs
) ||
14102 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14103 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14104 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14105 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14106 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14107 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14108 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14109 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14112 /* Only copy relevant parameters, ignore all others. */
14113 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14114 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14115 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14116 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14117 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14118 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14119 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14120 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14121 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14123 if (netif_running(dev
)) {
14124 tg3_full_lock(tp
, 0);
14125 __tg3_set_coalesce(tp
, &tp
->coal
);
14126 tg3_full_unlock(tp
);
14131 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14133 struct tg3
*tp
= netdev_priv(dev
);
14135 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14136 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14137 return -EOPNOTSUPP
;
14140 if (edata
->advertised
!= tp
->eee
.advertised
) {
14141 netdev_warn(tp
->dev
,
14142 "Direct manipulation of EEE advertisement is not supported\n");
14146 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14147 netdev_warn(tp
->dev
,
14148 "Maximal Tx Lpi timer supported is %#x(u)\n",
14149 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14155 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14156 tg3_warn_mgmt_link_flap(tp
);
14158 if (netif_running(tp
->dev
)) {
14159 tg3_full_lock(tp
, 0);
14162 tg3_full_unlock(tp
);
14168 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14170 struct tg3
*tp
= netdev_priv(dev
);
14172 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14173 netdev_warn(tp
->dev
,
14174 "Board does not support EEE!\n");
14175 return -EOPNOTSUPP
;
14182 static const struct ethtool_ops tg3_ethtool_ops
= {
14183 .get_drvinfo
= tg3_get_drvinfo
,
14184 .get_regs_len
= tg3_get_regs_len
,
14185 .get_regs
= tg3_get_regs
,
14186 .get_wol
= tg3_get_wol
,
14187 .set_wol
= tg3_set_wol
,
14188 .get_msglevel
= tg3_get_msglevel
,
14189 .set_msglevel
= tg3_set_msglevel
,
14190 .nway_reset
= tg3_nway_reset
,
14191 .get_link
= ethtool_op_get_link
,
14192 .get_eeprom_len
= tg3_get_eeprom_len
,
14193 .get_eeprom
= tg3_get_eeprom
,
14194 .set_eeprom
= tg3_set_eeprom
,
14195 .get_ringparam
= tg3_get_ringparam
,
14196 .set_ringparam
= tg3_set_ringparam
,
14197 .get_pauseparam
= tg3_get_pauseparam
,
14198 .set_pauseparam
= tg3_set_pauseparam
,
14199 .self_test
= tg3_self_test
,
14200 .get_strings
= tg3_get_strings
,
14201 .set_phys_id
= tg3_set_phys_id
,
14202 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14203 .get_coalesce
= tg3_get_coalesce
,
14204 .set_coalesce
= tg3_set_coalesce
,
14205 .get_sset_count
= tg3_get_sset_count
,
14206 .get_rxnfc
= tg3_get_rxnfc
,
14207 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14208 .get_rxfh
= tg3_get_rxfh
,
14209 .set_rxfh
= tg3_set_rxfh
,
14210 .get_channels
= tg3_get_channels
,
14211 .set_channels
= tg3_set_channels
,
14212 .get_ts_info
= tg3_get_ts_info
,
14213 .get_eee
= tg3_get_eee
,
14214 .set_eee
= tg3_set_eee
,
14215 .get_link_ksettings
= tg3_get_link_ksettings
,
14216 .set_link_ksettings
= tg3_set_link_ksettings
,
14219 static void tg3_get_stats64(struct net_device
*dev
,
14220 struct rtnl_link_stats64
*stats
)
14222 struct tg3
*tp
= netdev_priv(dev
);
14224 spin_lock_bh(&tp
->lock
);
14225 if (!tp
->hw_stats
|| !tg3_flag(tp
, INIT_COMPLETE
)) {
14226 *stats
= tp
->net_stats_prev
;
14227 spin_unlock_bh(&tp
->lock
);
14231 tg3_get_nstats(tp
, stats
);
14232 spin_unlock_bh(&tp
->lock
);
14235 static void tg3_set_rx_mode(struct net_device
*dev
)
14237 struct tg3
*tp
= netdev_priv(dev
);
14239 if (!netif_running(dev
))
14242 tg3_full_lock(tp
, 0);
14243 __tg3_set_rx_mode(dev
);
14244 tg3_full_unlock(tp
);
14247 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14250 dev
->mtu
= new_mtu
;
14252 if (new_mtu
> ETH_DATA_LEN
) {
14253 if (tg3_flag(tp
, 5780_CLASS
)) {
14254 netdev_update_features(dev
);
14255 tg3_flag_clear(tp
, TSO_CAPABLE
);
14257 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14260 if (tg3_flag(tp
, 5780_CLASS
)) {
14261 tg3_flag_set(tp
, TSO_CAPABLE
);
14262 netdev_update_features(dev
);
14264 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14268 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14270 struct tg3
*tp
= netdev_priv(dev
);
14272 bool reset_phy
= false;
14274 if (!netif_running(dev
)) {
14275 /* We'll just catch it later when the
14278 tg3_set_mtu(dev
, tp
, new_mtu
);
14284 tg3_netif_stop(tp
);
14286 tg3_set_mtu(dev
, tp
, new_mtu
);
14288 tg3_full_lock(tp
, 1);
14290 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14292 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14293 * breaks all requests to 256 bytes.
14295 if (tg3_asic_rev(tp
) == ASIC_REV_57766
||
14296 tg3_asic_rev(tp
) == ASIC_REV_5717
||
14297 tg3_asic_rev(tp
) == ASIC_REV_5719
||
14298 tg3_asic_rev(tp
) == ASIC_REV_5720
)
14301 err
= tg3_restart_hw(tp
, reset_phy
);
14304 tg3_netif_start(tp
);
14306 tg3_full_unlock(tp
);
14314 static const struct net_device_ops tg3_netdev_ops
= {
14315 .ndo_open
= tg3_open
,
14316 .ndo_stop
= tg3_close
,
14317 .ndo_start_xmit
= tg3_start_xmit
,
14318 .ndo_get_stats64
= tg3_get_stats64
,
14319 .ndo_validate_addr
= eth_validate_addr
,
14320 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14321 .ndo_set_mac_address
= tg3_set_mac_addr
,
14322 .ndo_do_ioctl
= tg3_ioctl
,
14323 .ndo_tx_timeout
= tg3_tx_timeout
,
14324 .ndo_change_mtu
= tg3_change_mtu
,
14325 .ndo_fix_features
= tg3_fix_features
,
14326 .ndo_set_features
= tg3_set_features
,
14327 #ifdef CONFIG_NET_POLL_CONTROLLER
14328 .ndo_poll_controller
= tg3_poll_controller
,
14332 static void tg3_get_eeprom_size(struct tg3
*tp
)
14334 u32 cursize
, val
, magic
;
14336 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14338 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14341 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14342 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14343 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14347 * Size the chip by reading offsets at increasing powers of two.
14348 * When we encounter our validation signature, we know the addressing
14349 * has wrapped around, and thus have our chip size.
14353 while (cursize
< tp
->nvram_size
) {
14354 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14363 tp
->nvram_size
= cursize
;
14366 static void tg3_get_nvram_size(struct tg3
*tp
)
14370 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14373 /* Selfboot format */
14374 if (val
!= TG3_EEPROM_MAGIC
) {
14375 tg3_get_eeprom_size(tp
);
14379 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14381 /* This is confusing. We want to operate on the
14382 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14383 * call will read from NVRAM and byteswap the data
14384 * according to the byteswapping settings for all
14385 * other register accesses. This ensures the data we
14386 * want will always reside in the lower 16-bits.
14387 * However, the data in NVRAM is in LE format, which
14388 * means the data from the NVRAM read will always be
14389 * opposite the endianness of the CPU. The 16-bit
14390 * byteswap then brings the data to CPU endianness.
14392 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14396 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14399 static void tg3_get_nvram_info(struct tg3
*tp
)
14403 nvcfg1
= tr32(NVRAM_CFG1
);
14404 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14405 tg3_flag_set(tp
, FLASH
);
14407 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14408 tw32(NVRAM_CFG1
, nvcfg1
);
14411 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14412 tg3_flag(tp
, 5780_CLASS
)) {
14413 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14414 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14415 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14416 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14417 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14419 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14420 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14421 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14423 case FLASH_VENDOR_ATMEL_EEPROM
:
14424 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14425 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14426 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14428 case FLASH_VENDOR_ST
:
14429 tp
->nvram_jedecnum
= JEDEC_ST
;
14430 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14431 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14433 case FLASH_VENDOR_SAIFUN
:
14434 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14435 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14437 case FLASH_VENDOR_SST_SMALL
:
14438 case FLASH_VENDOR_SST_LARGE
:
14439 tp
->nvram_jedecnum
= JEDEC_SST
;
14440 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14444 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14445 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14446 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14450 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14452 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14453 case FLASH_5752PAGE_SIZE_256
:
14454 tp
->nvram_pagesize
= 256;
14456 case FLASH_5752PAGE_SIZE_512
:
14457 tp
->nvram_pagesize
= 512;
14459 case FLASH_5752PAGE_SIZE_1K
:
14460 tp
->nvram_pagesize
= 1024;
14462 case FLASH_5752PAGE_SIZE_2K
:
14463 tp
->nvram_pagesize
= 2048;
14465 case FLASH_5752PAGE_SIZE_4K
:
14466 tp
->nvram_pagesize
= 4096;
14468 case FLASH_5752PAGE_SIZE_264
:
14469 tp
->nvram_pagesize
= 264;
14471 case FLASH_5752PAGE_SIZE_528
:
14472 tp
->nvram_pagesize
= 528;
14477 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14481 nvcfg1
= tr32(NVRAM_CFG1
);
14483 /* NVRAM protection for TPM */
14484 if (nvcfg1
& (1 << 27))
14485 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14487 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14488 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14489 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14490 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14491 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14493 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14494 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14495 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14496 tg3_flag_set(tp
, FLASH
);
14498 case FLASH_5752VENDOR_ST_M45PE10
:
14499 case FLASH_5752VENDOR_ST_M45PE20
:
14500 case FLASH_5752VENDOR_ST_M45PE40
:
14501 tp
->nvram_jedecnum
= JEDEC_ST
;
14502 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14503 tg3_flag_set(tp
, FLASH
);
14507 if (tg3_flag(tp
, FLASH
)) {
14508 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14510 /* For eeprom, set pagesize to maximum eeprom size */
14511 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14513 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14514 tw32(NVRAM_CFG1
, nvcfg1
);
14518 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14520 u32 nvcfg1
, protect
= 0;
14522 nvcfg1
= tr32(NVRAM_CFG1
);
14524 /* NVRAM protection for TPM */
14525 if (nvcfg1
& (1 << 27)) {
14526 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14530 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14532 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14533 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14534 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14535 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14536 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14537 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14538 tg3_flag_set(tp
, FLASH
);
14539 tp
->nvram_pagesize
= 264;
14540 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14541 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14542 tp
->nvram_size
= (protect
? 0x3e200 :
14543 TG3_NVRAM_SIZE_512KB
);
14544 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14545 tp
->nvram_size
= (protect
? 0x1f200 :
14546 TG3_NVRAM_SIZE_256KB
);
14548 tp
->nvram_size
= (protect
? 0x1f200 :
14549 TG3_NVRAM_SIZE_128KB
);
14551 case FLASH_5752VENDOR_ST_M45PE10
:
14552 case FLASH_5752VENDOR_ST_M45PE20
:
14553 case FLASH_5752VENDOR_ST_M45PE40
:
14554 tp
->nvram_jedecnum
= JEDEC_ST
;
14555 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14556 tg3_flag_set(tp
, FLASH
);
14557 tp
->nvram_pagesize
= 256;
14558 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14559 tp
->nvram_size
= (protect
?
14560 TG3_NVRAM_SIZE_64KB
:
14561 TG3_NVRAM_SIZE_128KB
);
14562 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14563 tp
->nvram_size
= (protect
?
14564 TG3_NVRAM_SIZE_64KB
:
14565 TG3_NVRAM_SIZE_256KB
);
14567 tp
->nvram_size
= (protect
?
14568 TG3_NVRAM_SIZE_128KB
:
14569 TG3_NVRAM_SIZE_512KB
);
14574 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14578 nvcfg1
= tr32(NVRAM_CFG1
);
14580 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14581 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14582 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14583 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14584 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14585 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14586 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14587 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14589 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14590 tw32(NVRAM_CFG1
, nvcfg1
);
14592 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14593 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14594 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14595 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14596 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14597 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14598 tg3_flag_set(tp
, FLASH
);
14599 tp
->nvram_pagesize
= 264;
14601 case FLASH_5752VENDOR_ST_M45PE10
:
14602 case FLASH_5752VENDOR_ST_M45PE20
:
14603 case FLASH_5752VENDOR_ST_M45PE40
:
14604 tp
->nvram_jedecnum
= JEDEC_ST
;
14605 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14606 tg3_flag_set(tp
, FLASH
);
14607 tp
->nvram_pagesize
= 256;
14612 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14614 u32 nvcfg1
, protect
= 0;
14616 nvcfg1
= tr32(NVRAM_CFG1
);
14618 /* NVRAM protection for TPM */
14619 if (nvcfg1
& (1 << 27)) {
14620 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14624 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14626 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14627 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14628 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14629 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14630 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14631 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14632 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14633 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14634 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14635 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14636 tg3_flag_set(tp
, FLASH
);
14637 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14638 tp
->nvram_pagesize
= 256;
14640 case FLASH_5761VENDOR_ST_A_M45PE20
:
14641 case FLASH_5761VENDOR_ST_A_M45PE40
:
14642 case FLASH_5761VENDOR_ST_A_M45PE80
:
14643 case FLASH_5761VENDOR_ST_A_M45PE16
:
14644 case FLASH_5761VENDOR_ST_M_M45PE20
:
14645 case FLASH_5761VENDOR_ST_M_M45PE40
:
14646 case FLASH_5761VENDOR_ST_M_M45PE80
:
14647 case FLASH_5761VENDOR_ST_M_M45PE16
:
14648 tp
->nvram_jedecnum
= JEDEC_ST
;
14649 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14650 tg3_flag_set(tp
, FLASH
);
14651 tp
->nvram_pagesize
= 256;
14656 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14659 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14660 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14661 case FLASH_5761VENDOR_ST_A_M45PE16
:
14662 case FLASH_5761VENDOR_ST_M_M45PE16
:
14663 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14665 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14666 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14667 case FLASH_5761VENDOR_ST_A_M45PE80
:
14668 case FLASH_5761VENDOR_ST_M_M45PE80
:
14669 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14671 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14672 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14673 case FLASH_5761VENDOR_ST_A_M45PE40
:
14674 case FLASH_5761VENDOR_ST_M_M45PE40
:
14675 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14677 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14678 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14679 case FLASH_5761VENDOR_ST_A_M45PE20
:
14680 case FLASH_5761VENDOR_ST_M_M45PE20
:
14681 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14687 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14689 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14690 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14691 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14694 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14698 nvcfg1
= tr32(NVRAM_CFG1
);
14700 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14701 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14702 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14703 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14704 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14705 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14707 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14708 tw32(NVRAM_CFG1
, nvcfg1
);
14710 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14711 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14712 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14713 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14714 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14715 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14716 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14717 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14718 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14719 tg3_flag_set(tp
, FLASH
);
14721 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14722 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14723 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14724 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14725 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14727 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14728 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14729 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14731 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14732 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14733 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14737 case FLASH_5752VENDOR_ST_M45PE10
:
14738 case FLASH_5752VENDOR_ST_M45PE20
:
14739 case FLASH_5752VENDOR_ST_M45PE40
:
14740 tp
->nvram_jedecnum
= JEDEC_ST
;
14741 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14742 tg3_flag_set(tp
, FLASH
);
14744 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14745 case FLASH_5752VENDOR_ST_M45PE10
:
14746 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14748 case FLASH_5752VENDOR_ST_M45PE20
:
14749 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14751 case FLASH_5752VENDOR_ST_M45PE40
:
14752 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14757 tg3_flag_set(tp
, NO_NVRAM
);
14761 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14762 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14763 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14767 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14771 nvcfg1
= tr32(NVRAM_CFG1
);
14773 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14774 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14775 case FLASH_5717VENDOR_MICRO_EEPROM
:
14776 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14777 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14778 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14780 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14781 tw32(NVRAM_CFG1
, nvcfg1
);
14783 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14784 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14785 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14786 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14787 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14788 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14789 case FLASH_5717VENDOR_ATMEL_45USPT
:
14790 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14791 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14792 tg3_flag_set(tp
, FLASH
);
14794 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14795 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14796 /* Detect size with tg3_nvram_get_size() */
14798 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14799 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14800 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14803 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14807 case FLASH_5717VENDOR_ST_M_M25PE10
:
14808 case FLASH_5717VENDOR_ST_A_M25PE10
:
14809 case FLASH_5717VENDOR_ST_M_M45PE10
:
14810 case FLASH_5717VENDOR_ST_A_M45PE10
:
14811 case FLASH_5717VENDOR_ST_M_M25PE20
:
14812 case FLASH_5717VENDOR_ST_A_M25PE20
:
14813 case FLASH_5717VENDOR_ST_M_M45PE20
:
14814 case FLASH_5717VENDOR_ST_A_M45PE20
:
14815 case FLASH_5717VENDOR_ST_25USPT
:
14816 case FLASH_5717VENDOR_ST_45USPT
:
14817 tp
->nvram_jedecnum
= JEDEC_ST
;
14818 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14819 tg3_flag_set(tp
, FLASH
);
14821 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14822 case FLASH_5717VENDOR_ST_M_M25PE20
:
14823 case FLASH_5717VENDOR_ST_M_M45PE20
:
14824 /* Detect size with tg3_nvram_get_size() */
14826 case FLASH_5717VENDOR_ST_A_M25PE20
:
14827 case FLASH_5717VENDOR_ST_A_M45PE20
:
14828 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14831 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14836 tg3_flag_set(tp
, NO_NVRAM
);
14840 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14841 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14842 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14845 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14847 u32 nvcfg1
, nvmpinstrp
, nv_status
;
14849 nvcfg1
= tr32(NVRAM_CFG1
);
14850 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14852 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14853 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14854 tg3_flag_set(tp
, NO_NVRAM
);
14858 switch (nvmpinstrp
) {
14859 case FLASH_5762_MX25L_100
:
14860 case FLASH_5762_MX25L_200
:
14861 case FLASH_5762_MX25L_400
:
14862 case FLASH_5762_MX25L_800
:
14863 case FLASH_5762_MX25L_160_320
:
14864 tp
->nvram_pagesize
= 4096;
14865 tp
->nvram_jedecnum
= JEDEC_MACRONIX
;
14866 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14867 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14868 tg3_flag_set(tp
, FLASH
);
14869 nv_status
= tr32(NVRAM_AUTOSENSE_STATUS
);
14871 (1 << (nv_status
>> AUTOSENSE_DEVID
&
14872 AUTOSENSE_DEVID_MASK
)
14873 << AUTOSENSE_SIZE_IN_MB
);
14876 case FLASH_5762_EEPROM_HD
:
14877 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14879 case FLASH_5762_EEPROM_LD
:
14880 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14882 case FLASH_5720VENDOR_M_ST_M45PE20
:
14883 /* This pinstrap supports multiple sizes, so force it
14884 * to read the actual size from location 0xf0.
14886 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14891 switch (nvmpinstrp
) {
14892 case FLASH_5720_EEPROM_HD
:
14893 case FLASH_5720_EEPROM_LD
:
14894 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14895 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14897 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14898 tw32(NVRAM_CFG1
, nvcfg1
);
14899 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14900 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14902 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14904 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14905 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14906 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14907 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14908 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14909 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14910 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14911 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14912 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14913 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14914 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14915 case FLASH_5720VENDOR_ATMEL_45USPT
:
14916 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14917 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14918 tg3_flag_set(tp
, FLASH
);
14920 switch (nvmpinstrp
) {
14921 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14922 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14923 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14924 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14926 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14927 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14928 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14929 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14931 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14932 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14933 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14936 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14937 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14941 case FLASH_5720VENDOR_M_ST_M25PE10
:
14942 case FLASH_5720VENDOR_M_ST_M45PE10
:
14943 case FLASH_5720VENDOR_A_ST_M25PE10
:
14944 case FLASH_5720VENDOR_A_ST_M45PE10
:
14945 case FLASH_5720VENDOR_M_ST_M25PE20
:
14946 case FLASH_5720VENDOR_M_ST_M45PE20
:
14947 case FLASH_5720VENDOR_A_ST_M25PE20
:
14948 case FLASH_5720VENDOR_A_ST_M45PE20
:
14949 case FLASH_5720VENDOR_M_ST_M25PE40
:
14950 case FLASH_5720VENDOR_M_ST_M45PE40
:
14951 case FLASH_5720VENDOR_A_ST_M25PE40
:
14952 case FLASH_5720VENDOR_A_ST_M45PE40
:
14953 case FLASH_5720VENDOR_M_ST_M25PE80
:
14954 case FLASH_5720VENDOR_M_ST_M45PE80
:
14955 case FLASH_5720VENDOR_A_ST_M25PE80
:
14956 case FLASH_5720VENDOR_A_ST_M45PE80
:
14957 case FLASH_5720VENDOR_ST_25USPT
:
14958 case FLASH_5720VENDOR_ST_45USPT
:
14959 tp
->nvram_jedecnum
= JEDEC_ST
;
14960 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14961 tg3_flag_set(tp
, FLASH
);
14963 switch (nvmpinstrp
) {
14964 case FLASH_5720VENDOR_M_ST_M25PE20
:
14965 case FLASH_5720VENDOR_M_ST_M45PE20
:
14966 case FLASH_5720VENDOR_A_ST_M25PE20
:
14967 case FLASH_5720VENDOR_A_ST_M45PE20
:
14968 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14970 case FLASH_5720VENDOR_M_ST_M25PE40
:
14971 case FLASH_5720VENDOR_M_ST_M45PE40
:
14972 case FLASH_5720VENDOR_A_ST_M25PE40
:
14973 case FLASH_5720VENDOR_A_ST_M45PE40
:
14974 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14976 case FLASH_5720VENDOR_M_ST_M25PE80
:
14977 case FLASH_5720VENDOR_M_ST_M45PE80
:
14978 case FLASH_5720VENDOR_A_ST_M25PE80
:
14979 case FLASH_5720VENDOR_A_ST_M45PE80
:
14980 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14983 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14984 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14989 tg3_flag_set(tp
, NO_NVRAM
);
14993 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14994 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14995 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14997 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
15000 if (tg3_nvram_read(tp
, 0, &val
))
15003 if (val
!= TG3_EEPROM_MAGIC
&&
15004 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
15005 tg3_flag_set(tp
, NO_NVRAM
);
15009 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15010 static void tg3_nvram_init(struct tg3
*tp
)
15012 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15013 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15014 tg3_flag_clear(tp
, NVRAM
);
15015 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15016 tg3_flag_set(tp
, NO_NVRAM
);
15020 tw32_f(GRC_EEPROM_ADDR
,
15021 (EEPROM_ADDR_FSM_RESET
|
15022 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
15023 EEPROM_ADDR_CLKPERD_SHIFT
)));
15027 /* Enable seeprom accesses. */
15028 tw32_f(GRC_LOCAL_CTRL
,
15029 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
15032 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15033 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
15034 tg3_flag_set(tp
, NVRAM
);
15036 if (tg3_nvram_lock(tp
)) {
15037 netdev_warn(tp
->dev
,
15038 "Cannot get nvram lock, %s failed\n",
15042 tg3_enable_nvram_access(tp
);
15044 tp
->nvram_size
= 0;
15046 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15047 tg3_get_5752_nvram_info(tp
);
15048 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
15049 tg3_get_5755_nvram_info(tp
);
15050 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
15051 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15052 tg3_asic_rev(tp
) == ASIC_REV_5785
)
15053 tg3_get_5787_nvram_info(tp
);
15054 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
15055 tg3_get_5761_nvram_info(tp
);
15056 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15057 tg3_get_5906_nvram_info(tp
);
15058 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
15059 tg3_flag(tp
, 57765_CLASS
))
15060 tg3_get_57780_nvram_info(tp
);
15061 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15062 tg3_asic_rev(tp
) == ASIC_REV_5719
)
15063 tg3_get_5717_nvram_info(tp
);
15064 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15065 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15066 tg3_get_5720_nvram_info(tp
);
15068 tg3_get_nvram_info(tp
);
15070 if (tp
->nvram_size
== 0)
15071 tg3_get_nvram_size(tp
);
15073 tg3_disable_nvram_access(tp
);
15074 tg3_nvram_unlock(tp
);
15077 tg3_flag_clear(tp
, NVRAM
);
15078 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15080 tg3_get_eeprom_size(tp
);
15084 struct subsys_tbl_ent
{
15085 u16 subsys_vendor
, subsys_devid
;
15089 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15090 /* Broadcom boards. */
15091 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15092 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15093 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15094 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15095 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15096 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15097 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15098 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15099 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15100 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15101 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15102 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15103 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15104 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15105 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15106 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15107 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15108 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15109 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15110 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15111 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15112 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15115 { TG3PCI_SUBVENDOR_ID_3COM
,
15116 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15117 { TG3PCI_SUBVENDOR_ID_3COM
,
15118 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15119 { TG3PCI_SUBVENDOR_ID_3COM
,
15120 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15121 { TG3PCI_SUBVENDOR_ID_3COM
,
15122 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15123 { TG3PCI_SUBVENDOR_ID_3COM
,
15124 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15127 { TG3PCI_SUBVENDOR_ID_DELL
,
15128 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15129 { TG3PCI_SUBVENDOR_ID_DELL
,
15130 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15131 { TG3PCI_SUBVENDOR_ID_DELL
,
15132 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15133 { TG3PCI_SUBVENDOR_ID_DELL
,
15134 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15136 /* Compaq boards. */
15137 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15138 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15139 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15140 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15141 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15142 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15143 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15144 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15145 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15146 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15149 { TG3PCI_SUBVENDOR_ID_IBM
,
15150 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15153 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15157 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15158 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15159 tp
->pdev
->subsystem_vendor
) &&
15160 (subsys_id_to_phy_id
[i
].subsys_devid
==
15161 tp
->pdev
->subsystem_device
))
15162 return &subsys_id_to_phy_id
[i
];
15167 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15171 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15172 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15174 /* Assume an onboard device and WOL capable by default. */
15175 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15176 tg3_flag_set(tp
, WOL_CAP
);
15178 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15179 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15180 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15181 tg3_flag_set(tp
, IS_NIC
);
15183 val
= tr32(VCPU_CFGSHDW
);
15184 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15185 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15186 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15187 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15188 tg3_flag_set(tp
, WOL_ENABLE
);
15189 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15194 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15195 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15196 u32 nic_cfg
, led_cfg
;
15197 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15198 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15199 int eeprom_phy_serdes
= 0;
15201 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15202 tp
->nic_sram_data_cfg
= nic_cfg
;
15204 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15205 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15206 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15207 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15208 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15209 (ver
> 0) && (ver
< 0x100))
15210 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15212 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15213 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15215 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15216 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15217 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15218 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15220 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15221 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15222 eeprom_phy_serdes
= 1;
15224 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15225 if (nic_phy_id
!= 0) {
15226 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15227 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15229 eeprom_phy_id
= (id1
>> 16) << 10;
15230 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15231 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15235 tp
->phy_id
= eeprom_phy_id
;
15236 if (eeprom_phy_serdes
) {
15237 if (!tg3_flag(tp
, 5705_PLUS
))
15238 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15240 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15243 if (tg3_flag(tp
, 5750_PLUS
))
15244 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15245 SHASTA_EXT_LED_MODE_MASK
);
15247 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15251 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15252 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15255 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15256 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15259 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15260 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15262 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15263 * read on some older 5700/5701 bootcode.
15265 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15266 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15267 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15271 case SHASTA_EXT_LED_SHARED
:
15272 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15273 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15274 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15275 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15276 LED_CTRL_MODE_PHY_2
);
15278 if (tg3_flag(tp
, 5717_PLUS
) ||
15279 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15280 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15281 LED_CTRL_BLINK_RATE_MASK
;
15285 case SHASTA_EXT_LED_MAC
:
15286 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15289 case SHASTA_EXT_LED_COMBO
:
15290 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15291 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15292 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15293 LED_CTRL_MODE_PHY_2
);
15298 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15299 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15300 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15301 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15303 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15304 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15306 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15307 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15308 if ((tp
->pdev
->subsystem_vendor
==
15309 PCI_VENDOR_ID_ARIMA
) &&
15310 (tp
->pdev
->subsystem_device
== 0x205a ||
15311 tp
->pdev
->subsystem_device
== 0x2063))
15312 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15314 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15315 tg3_flag_set(tp
, IS_NIC
);
15318 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15319 tg3_flag_set(tp
, ENABLE_ASF
);
15320 if (tg3_flag(tp
, 5750_PLUS
))
15321 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15324 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15325 tg3_flag(tp
, 5750_PLUS
))
15326 tg3_flag_set(tp
, ENABLE_APE
);
15328 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15329 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15330 tg3_flag_clear(tp
, WOL_CAP
);
15332 if (tg3_flag(tp
, WOL_CAP
) &&
15333 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15334 tg3_flag_set(tp
, WOL_ENABLE
);
15335 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15338 if (cfg2
& (1 << 17))
15339 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15341 /* serdes signal pre-emphasis in register 0x590 set by */
15342 /* bootcode if bit 18 is set */
15343 if (cfg2
& (1 << 18))
15344 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15346 if ((tg3_flag(tp
, 57765_PLUS
) ||
15347 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15348 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15349 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15350 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15352 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15355 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15356 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15357 !tg3_flag(tp
, 57765_PLUS
) &&
15358 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15359 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15360 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15361 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15362 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15363 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15366 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15367 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15368 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15369 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15370 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15371 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15373 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15374 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15377 if (tg3_flag(tp
, WOL_CAP
))
15378 device_set_wakeup_enable(&tp
->pdev
->dev
,
15379 tg3_flag(tp
, WOL_ENABLE
));
15381 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15384 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15387 u32 val2
, off
= offset
* 8;
15389 err
= tg3_nvram_lock(tp
);
15393 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15394 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15395 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15396 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15399 for (i
= 0; i
< 100; i
++) {
15400 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15401 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15402 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15408 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15410 tg3_nvram_unlock(tp
);
15411 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15417 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15422 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15423 tw32(OTP_CTRL
, cmd
);
15425 /* Wait for up to 1 ms for command to execute. */
15426 for (i
= 0; i
< 100; i
++) {
15427 val
= tr32(OTP_STATUS
);
15428 if (val
& OTP_STATUS_CMD_DONE
)
15433 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15436 /* Read the gphy configuration from the OTP region of the chip. The gphy
15437 * configuration is a 32-bit value that straddles the alignment boundary.
15438 * We do two 32-bit reads and then shift and merge the results.
15440 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15442 u32 bhalf_otp
, thalf_otp
;
15444 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15446 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15449 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15451 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15454 thalf_otp
= tr32(OTP_READ_DATA
);
15456 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15458 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15461 bhalf_otp
= tr32(OTP_READ_DATA
);
15463 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15466 static void tg3_phy_init_link_config(struct tg3
*tp
)
15468 u32 adv
= ADVERTISED_Autoneg
;
15470 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15471 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15472 adv
|= ADVERTISED_1000baseT_Half
;
15473 adv
|= ADVERTISED_1000baseT_Full
;
15476 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15477 adv
|= ADVERTISED_100baseT_Half
|
15478 ADVERTISED_100baseT_Full
|
15479 ADVERTISED_10baseT_Half
|
15480 ADVERTISED_10baseT_Full
|
15483 adv
|= ADVERTISED_FIBRE
;
15485 tp
->link_config
.advertising
= adv
;
15486 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15487 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15488 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15489 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15490 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15495 static int tg3_phy_probe(struct tg3
*tp
)
15497 u32 hw_phy_id_1
, hw_phy_id_2
;
15498 u32 hw_phy_id
, hw_phy_id_masked
;
15501 /* flow control autonegotiation is default behavior */
15502 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15503 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15505 if (tg3_flag(tp
, ENABLE_APE
)) {
15506 switch (tp
->pci_fn
) {
15508 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15511 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15514 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15517 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15522 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15523 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15524 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15525 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15526 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15528 if (tg3_flag(tp
, USE_PHYLIB
))
15529 return tg3_phy_init(tp
);
15531 /* Reading the PHY ID register can conflict with ASF
15532 * firmware access to the PHY hardware.
15535 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15536 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15538 /* Now read the physical PHY_ID from the chip and verify
15539 * that it is sane. If it doesn't look good, we fall back
15540 * to either the hard-coded table based PHY_ID and failing
15541 * that the value found in the eeprom area.
15543 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15544 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15546 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15547 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15548 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15550 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15553 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15554 tp
->phy_id
= hw_phy_id
;
15555 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15556 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15558 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15560 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15561 /* Do nothing, phy ID already set up in
15562 * tg3_get_eeprom_hw_cfg().
15565 struct subsys_tbl_ent
*p
;
15567 /* No eeprom signature? Try the hardcoded
15568 * subsys device table.
15570 p
= tg3_lookup_by_subsys(tp
);
15572 tp
->phy_id
= p
->phy_id
;
15573 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15574 /* For now we saw the IDs 0xbc050cd0,
15575 * 0xbc050f80 and 0xbc050c30 on devices
15576 * connected to an BCM4785 and there are
15577 * probably more. Just assume that the phy is
15578 * supported when it is connected to a SSB core
15585 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15586 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15590 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15591 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15592 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15593 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15594 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15595 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15596 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15597 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15598 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15599 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15601 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15602 SUPPORTED_1000baseT_Full
;
15603 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15604 ADVERTISED_1000baseT_Full
;
15605 tp
->eee
.eee_enabled
= 1;
15606 tp
->eee
.tx_lpi_enabled
= 1;
15607 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15610 tg3_phy_init_link_config(tp
);
15612 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15613 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15614 !tg3_flag(tp
, ENABLE_APE
) &&
15615 !tg3_flag(tp
, ENABLE_ASF
)) {
15618 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15619 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15620 (bmsr
& BMSR_LSTATUS
))
15621 goto skip_phy_reset
;
15623 err
= tg3_phy_reset(tp
);
15627 tg3_phy_set_wirespeed(tp
);
15629 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15630 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15631 tp
->link_config
.flowctrl
);
15633 tg3_writephy(tp
, MII_BMCR
,
15634 BMCR_ANENABLE
| BMCR_ANRESTART
);
15639 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15640 err
= tg3_init_5401phy_dsp(tp
);
15644 err
= tg3_init_5401phy_dsp(tp
);
15650 static void tg3_read_vpd(struct tg3
*tp
)
15653 unsigned int block_end
, rosize
, len
;
15657 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15661 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15663 goto out_not_found
;
15665 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15666 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15667 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15669 if (block_end
> vpdlen
)
15670 goto out_not_found
;
15672 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15673 PCI_VPD_RO_KEYWORD_MFR_ID
);
15675 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15677 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15678 if (j
+ len
> block_end
|| len
!= 4 ||
15679 memcmp(&vpd_data
[j
], "1028", 4))
15682 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15683 PCI_VPD_RO_KEYWORD_VENDOR0
);
15687 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15689 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15690 if (j
+ len
> block_end
)
15693 if (len
>= sizeof(tp
->fw_ver
))
15694 len
= sizeof(tp
->fw_ver
) - 1;
15695 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15696 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15701 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15702 PCI_VPD_RO_KEYWORD_PARTNO
);
15704 goto out_not_found
;
15706 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15708 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15709 if (len
> TG3_BPN_SIZE
||
15710 (len
+ i
) > vpdlen
)
15711 goto out_not_found
;
15713 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15717 if (tp
->board_part_number
[0])
15721 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15722 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15723 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15724 strcpy(tp
->board_part_number
, "BCM5717");
15725 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15726 strcpy(tp
->board_part_number
, "BCM5718");
15729 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15730 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15731 strcpy(tp
->board_part_number
, "BCM57780");
15732 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15733 strcpy(tp
->board_part_number
, "BCM57760");
15734 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15735 strcpy(tp
->board_part_number
, "BCM57790");
15736 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15737 strcpy(tp
->board_part_number
, "BCM57788");
15740 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15741 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15742 strcpy(tp
->board_part_number
, "BCM57761");
15743 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15744 strcpy(tp
->board_part_number
, "BCM57765");
15745 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15746 strcpy(tp
->board_part_number
, "BCM57781");
15747 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15748 strcpy(tp
->board_part_number
, "BCM57785");
15749 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15750 strcpy(tp
->board_part_number
, "BCM57791");
15751 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15752 strcpy(tp
->board_part_number
, "BCM57795");
15755 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15756 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15757 strcpy(tp
->board_part_number
, "BCM57762");
15758 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15759 strcpy(tp
->board_part_number
, "BCM57766");
15760 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15761 strcpy(tp
->board_part_number
, "BCM57782");
15762 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15763 strcpy(tp
->board_part_number
, "BCM57786");
15766 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15767 strcpy(tp
->board_part_number
, "BCM95906");
15770 strcpy(tp
->board_part_number
, "none");
15774 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15778 if (tg3_nvram_read(tp
, offset
, &val
) ||
15779 (val
& 0xfc000000) != 0x0c000000 ||
15780 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15787 static void tg3_read_bc_ver(struct tg3
*tp
)
15789 u32 val
, offset
, start
, ver_offset
;
15791 bool newver
= false;
15793 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15794 tg3_nvram_read(tp
, 0x4, &start
))
15797 offset
= tg3_nvram_logical_addr(tp
, offset
);
15799 if (tg3_nvram_read(tp
, offset
, &val
))
15802 if ((val
& 0xfc000000) == 0x0c000000) {
15803 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15810 dst_off
= strlen(tp
->fw_ver
);
15813 if (TG3_VER_SIZE
- dst_off
< 16 ||
15814 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15817 offset
= offset
+ ver_offset
- start
;
15818 for (i
= 0; i
< 16; i
+= 4) {
15820 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15823 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15828 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15831 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15832 TG3_NVM_BCVER_MAJSFT
;
15833 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15834 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15835 "v%d.%02d", major
, minor
);
15839 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15841 u32 val
, major
, minor
;
15843 /* Use native endian representation */
15844 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15847 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15848 TG3_NVM_HWSB_CFG1_MAJSFT
;
15849 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15850 TG3_NVM_HWSB_CFG1_MINSFT
;
15852 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15855 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15857 u32 offset
, major
, minor
, build
;
15859 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15861 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15864 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15865 case TG3_EEPROM_SB_REVISION_0
:
15866 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15868 case TG3_EEPROM_SB_REVISION_2
:
15869 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15871 case TG3_EEPROM_SB_REVISION_3
:
15872 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15874 case TG3_EEPROM_SB_REVISION_4
:
15875 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15877 case TG3_EEPROM_SB_REVISION_5
:
15878 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15880 case TG3_EEPROM_SB_REVISION_6
:
15881 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15887 if (tg3_nvram_read(tp
, offset
, &val
))
15890 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15891 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15892 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15893 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15894 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15896 if (minor
> 99 || build
> 26)
15899 offset
= strlen(tp
->fw_ver
);
15900 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15901 " v%d.%02d", major
, minor
);
15904 offset
= strlen(tp
->fw_ver
);
15905 if (offset
< TG3_VER_SIZE
- 1)
15906 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15910 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15912 u32 val
, offset
, start
;
15915 for (offset
= TG3_NVM_DIR_START
;
15916 offset
< TG3_NVM_DIR_END
;
15917 offset
+= TG3_NVM_DIRENT_SIZE
) {
15918 if (tg3_nvram_read(tp
, offset
, &val
))
15921 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15925 if (offset
== TG3_NVM_DIR_END
)
15928 if (!tg3_flag(tp
, 5705_PLUS
))
15929 start
= 0x08000000;
15930 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15933 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15934 !tg3_fw_img_is_valid(tp
, offset
) ||
15935 tg3_nvram_read(tp
, offset
+ 8, &val
))
15938 offset
+= val
- start
;
15940 vlen
= strlen(tp
->fw_ver
);
15942 tp
->fw_ver
[vlen
++] = ',';
15943 tp
->fw_ver
[vlen
++] = ' ';
15945 for (i
= 0; i
< 4; i
++) {
15947 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15950 offset
+= sizeof(v
);
15952 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15953 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15957 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15962 static void tg3_probe_ncsi(struct tg3
*tp
)
15966 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15967 if (apedata
!= APE_SEG_SIG_MAGIC
)
15970 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15971 if (!(apedata
& APE_FW_STATUS_READY
))
15974 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15975 tg3_flag_set(tp
, APE_HAS_NCSI
);
15978 static void tg3_read_dash_ver(struct tg3
*tp
)
15984 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15986 if (tg3_flag(tp
, APE_HAS_NCSI
))
15988 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15993 vlen
= strlen(tp
->fw_ver
);
15995 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15997 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15998 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15999 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
16000 (apedata
& APE_FW_VERSION_BLDMSK
));
16003 static void tg3_read_otp_ver(struct tg3
*tp
)
16007 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
16010 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
16011 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
16012 TG3_OTP_MAGIC0_VALID(val
)) {
16013 u64 val64
= (u64
) val
<< 32 | val2
;
16017 for (i
= 0; i
< 7; i
++) {
16018 if ((val64
& 0xff) == 0)
16020 ver
= val64
& 0xff;
16023 vlen
= strlen(tp
->fw_ver
);
16024 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
16028 static void tg3_read_fw_ver(struct tg3
*tp
)
16031 bool vpd_vers
= false;
16033 if (tp
->fw_ver
[0] != 0)
16036 if (tg3_flag(tp
, NO_NVRAM
)) {
16037 strcat(tp
->fw_ver
, "sb");
16038 tg3_read_otp_ver(tp
);
16042 if (tg3_nvram_read(tp
, 0, &val
))
16045 if (val
== TG3_EEPROM_MAGIC
)
16046 tg3_read_bc_ver(tp
);
16047 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
16048 tg3_read_sb_ver(tp
, val
);
16049 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
16050 tg3_read_hwsb_ver(tp
);
16052 if (tg3_flag(tp
, ENABLE_ASF
)) {
16053 if (tg3_flag(tp
, ENABLE_APE
)) {
16054 tg3_probe_ncsi(tp
);
16056 tg3_read_dash_ver(tp
);
16057 } else if (!vpd_vers
) {
16058 tg3_read_mgmtfw_ver(tp
);
16062 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
16065 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
16067 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
16068 return TG3_RX_RET_MAX_SIZE_5717
;
16069 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
16070 return TG3_RX_RET_MAX_SIZE_5700
;
16072 return TG3_RX_RET_MAX_SIZE_5705
;
16075 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
16076 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
16077 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
16078 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
16082 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
16084 struct pci_dev
*peer
;
16085 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
16087 for (func
= 0; func
< 8; func
++) {
16088 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
16089 if (peer
&& peer
!= tp
->pdev
)
16093 /* 5704 can be configured in single-port mode, set peer to
16094 * tp->pdev in that case.
16102 * We don't need to keep the refcount elevated; there's no way
16103 * to remove one half of this device without removing the other
16110 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16112 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16113 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16116 /* All devices that use the alternate
16117 * ASIC REV location have a CPMU.
16119 tg3_flag_set(tp
, CPMU_PRESENT
);
16121 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16122 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16123 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16124 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16125 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16126 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16127 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16128 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16129 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16130 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16131 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16132 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16133 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16134 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16135 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16136 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16137 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16138 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16139 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16140 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16141 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16142 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16143 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16145 reg
= TG3PCI_PRODID_ASICREV
;
16147 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16150 /* Wrong chip ID in 5752 A0. This code can be removed later
16151 * as A0 is not in production.
16153 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16154 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16156 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16157 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16159 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16160 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16161 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16162 tg3_flag_set(tp
, 5717_PLUS
);
16164 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16165 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16166 tg3_flag_set(tp
, 57765_CLASS
);
16168 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16169 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16170 tg3_flag_set(tp
, 57765_PLUS
);
16172 /* Intentionally exclude ASIC_REV_5906 */
16173 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16174 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16175 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16176 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16177 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16178 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16179 tg3_flag(tp
, 57765_PLUS
))
16180 tg3_flag_set(tp
, 5755_PLUS
);
16182 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16183 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16184 tg3_flag_set(tp
, 5780_CLASS
);
16186 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16187 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16188 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16189 tg3_flag(tp
, 5755_PLUS
) ||
16190 tg3_flag(tp
, 5780_CLASS
))
16191 tg3_flag_set(tp
, 5750_PLUS
);
16193 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16194 tg3_flag(tp
, 5750_PLUS
))
16195 tg3_flag_set(tp
, 5705_PLUS
);
16198 static bool tg3_10_100_only_device(struct tg3
*tp
,
16199 const struct pci_device_id
*ent
)
16201 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16203 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16204 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16205 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16208 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16209 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16210 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16220 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16223 u32 pci_state_reg
, grc_misc_cfg
;
16228 /* Force memory write invalidate off. If we leave it on,
16229 * then on 5700_BX chips we have to enable a workaround.
16230 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16231 * to match the cacheline size. The Broadcom driver have this
16232 * workaround but turns MWI off all the times so never uses
16233 * it. This seems to suggest that the workaround is insufficient.
16235 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16236 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16237 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16239 /* Important! -- Make sure register accesses are byteswapped
16240 * correctly. Also, for those chips that require it, make
16241 * sure that indirect register accesses are enabled before
16242 * the first operation.
16244 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16246 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16247 MISC_HOST_CTRL_CHIPREV
);
16248 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16249 tp
->misc_host_ctrl
);
16251 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16253 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16254 * we need to disable memory and use config. cycles
16255 * only to access all registers. The 5702/03 chips
16256 * can mistakenly decode the special cycles from the
16257 * ICH chipsets as memory write cycles, causing corruption
16258 * of register and memory space. Only certain ICH bridges
16259 * will drive special cycles with non-zero data during the
16260 * address phase which can fall within the 5703's address
16261 * range. This is not an ICH bug as the PCI spec allows
16262 * non-zero address during special cycles. However, only
16263 * these ICH bridges are known to drive non-zero addresses
16264 * during special cycles.
16266 * Since special cycles do not cross PCI bridges, we only
16267 * enable this workaround if the 5703 is on the secondary
16268 * bus of these ICH bridges.
16270 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16271 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16272 static struct tg3_dev_id
{
16276 } ich_chipsets
[] = {
16277 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16279 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16281 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16283 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16287 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16288 struct pci_dev
*bridge
= NULL
;
16290 while (pci_id
->vendor
!= 0) {
16291 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16297 if (pci_id
->rev
!= PCI_ANY_ID
) {
16298 if (bridge
->revision
> pci_id
->rev
)
16301 if (bridge
->subordinate
&&
16302 (bridge
->subordinate
->number
==
16303 tp
->pdev
->bus
->number
)) {
16304 tg3_flag_set(tp
, ICH_WORKAROUND
);
16305 pci_dev_put(bridge
);
16311 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16312 static struct tg3_dev_id
{
16315 } bridge_chipsets
[] = {
16316 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16317 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16320 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16321 struct pci_dev
*bridge
= NULL
;
16323 while (pci_id
->vendor
!= 0) {
16324 bridge
= pci_get_device(pci_id
->vendor
,
16331 if (bridge
->subordinate
&&
16332 (bridge
->subordinate
->number
<=
16333 tp
->pdev
->bus
->number
) &&
16334 (bridge
->subordinate
->busn_res
.end
>=
16335 tp
->pdev
->bus
->number
)) {
16336 tg3_flag_set(tp
, 5701_DMA_BUG
);
16337 pci_dev_put(bridge
);
16343 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16344 * DMA addresses > 40-bit. This bridge may have other additional
16345 * 57xx devices behind it in some 4-port NIC designs for example.
16346 * Any tg3 device found behind the bridge will also need the 40-bit
16349 if (tg3_flag(tp
, 5780_CLASS
)) {
16350 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16351 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16353 struct pci_dev
*bridge
= NULL
;
16356 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16357 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16359 if (bridge
&& bridge
->subordinate
&&
16360 (bridge
->subordinate
->number
<=
16361 tp
->pdev
->bus
->number
) &&
16362 (bridge
->subordinate
->busn_res
.end
>=
16363 tp
->pdev
->bus
->number
)) {
16364 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16365 pci_dev_put(bridge
);
16371 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16372 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16373 tp
->pdev_peer
= tg3_find_peer(tp
);
16375 /* Determine TSO capabilities */
16376 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16377 ; /* Do nothing. HW bug. */
16378 else if (tg3_flag(tp
, 57765_PLUS
))
16379 tg3_flag_set(tp
, HW_TSO_3
);
16380 else if (tg3_flag(tp
, 5755_PLUS
) ||
16381 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16382 tg3_flag_set(tp
, HW_TSO_2
);
16383 else if (tg3_flag(tp
, 5750_PLUS
)) {
16384 tg3_flag_set(tp
, HW_TSO_1
);
16385 tg3_flag_set(tp
, TSO_BUG
);
16386 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16387 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16388 tg3_flag_clear(tp
, TSO_BUG
);
16389 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16390 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16391 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16392 tg3_flag_set(tp
, FW_TSO
);
16393 tg3_flag_set(tp
, TSO_BUG
);
16394 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16395 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16397 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16400 /* Selectively allow TSO based on operating conditions */
16401 if (tg3_flag(tp
, HW_TSO_1
) ||
16402 tg3_flag(tp
, HW_TSO_2
) ||
16403 tg3_flag(tp
, HW_TSO_3
) ||
16404 tg3_flag(tp
, FW_TSO
)) {
16405 /* For firmware TSO, assume ASF is disabled.
16406 * We'll disable TSO later if we discover ASF
16407 * is enabled in tg3_get_eeprom_hw_cfg().
16409 tg3_flag_set(tp
, TSO_CAPABLE
);
16411 tg3_flag_clear(tp
, TSO_CAPABLE
);
16412 tg3_flag_clear(tp
, TSO_BUG
);
16413 tp
->fw_needed
= NULL
;
16416 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16417 tp
->fw_needed
= FIRMWARE_TG3
;
16419 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16420 tp
->fw_needed
= FIRMWARE_TG357766
;
16424 if (tg3_flag(tp
, 5750_PLUS
)) {
16425 tg3_flag_set(tp
, SUPPORT_MSI
);
16426 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16427 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16428 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16429 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16430 tp
->pdev_peer
== tp
->pdev
))
16431 tg3_flag_clear(tp
, SUPPORT_MSI
);
16433 if (tg3_flag(tp
, 5755_PLUS
) ||
16434 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16435 tg3_flag_set(tp
, 1SHOT_MSI
);
16438 if (tg3_flag(tp
, 57765_PLUS
)) {
16439 tg3_flag_set(tp
, SUPPORT_MSIX
);
16440 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16446 if (tp
->irq_max
> 1) {
16447 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16448 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16450 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16451 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16452 tp
->txq_max
= tp
->irq_max
- 1;
16455 if (tg3_flag(tp
, 5755_PLUS
) ||
16456 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16457 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16459 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16460 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16462 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16463 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16464 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16465 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16466 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16468 if (tg3_flag(tp
, 57765_PLUS
) &&
16469 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16470 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16472 if (!tg3_flag(tp
, 5705_PLUS
) ||
16473 tg3_flag(tp
, 5780_CLASS
) ||
16474 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16475 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16477 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16480 if (pci_is_pcie(tp
->pdev
)) {
16483 tg3_flag_set(tp
, PCI_EXPRESS
);
16485 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16486 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16487 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16488 tg3_flag_clear(tp
, HW_TSO_2
);
16489 tg3_flag_clear(tp
, TSO_CAPABLE
);
16491 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16492 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16493 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16494 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16495 tg3_flag_set(tp
, CLKREQ_BUG
);
16496 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16497 tg3_flag_set(tp
, L1PLLPD_EN
);
16499 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16500 /* BCM5785 devices are effectively PCIe devices, and should
16501 * follow PCIe codepaths, but do not have a PCIe capabilities
16504 tg3_flag_set(tp
, PCI_EXPRESS
);
16505 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16506 tg3_flag(tp
, 5780_CLASS
)) {
16507 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16508 if (!tp
->pcix_cap
) {
16509 dev_err(&tp
->pdev
->dev
,
16510 "Cannot find PCI-X capability, aborting\n");
16514 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16515 tg3_flag_set(tp
, PCIX_MODE
);
16518 /* If we have an AMD 762 or VIA K8T800 chipset, write
16519 * reordering to the mailbox registers done by the host
16520 * controller can cause major troubles. We read back from
16521 * every mailbox register write to force the writes to be
16522 * posted to the chip in order.
16524 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16525 !tg3_flag(tp
, PCI_EXPRESS
))
16526 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16528 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16529 &tp
->pci_cacheline_sz
);
16530 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16531 &tp
->pci_lat_timer
);
16532 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16533 tp
->pci_lat_timer
< 64) {
16534 tp
->pci_lat_timer
= 64;
16535 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16536 tp
->pci_lat_timer
);
16539 /* Important! -- It is critical that the PCI-X hw workaround
16540 * situation is decided before the first MMIO register access.
16542 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16543 /* 5700 BX chips need to have their TX producer index
16544 * mailboxes written twice to workaround a bug.
16546 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16548 /* If we are in PCI-X mode, enable register write workaround.
16550 * The workaround is to use indirect register accesses
16551 * for all chip writes not to mailbox registers.
16553 if (tg3_flag(tp
, PCIX_MODE
)) {
16556 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16558 /* The chip can have it's power management PCI config
16559 * space registers clobbered due to this bug.
16560 * So explicitly force the chip into D0 here.
16562 pci_read_config_dword(tp
->pdev
,
16563 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16565 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16566 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16567 pci_write_config_dword(tp
->pdev
,
16568 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16571 /* Also, force SERR#/PERR# in PCI command. */
16572 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16573 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16574 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16578 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16579 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16580 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16581 tg3_flag_set(tp
, PCI_32BIT
);
16583 /* Chip-specific fixup from Broadcom driver */
16584 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16585 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16586 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16587 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16590 /* Default fast path register access methods */
16591 tp
->read32
= tg3_read32
;
16592 tp
->write32
= tg3_write32
;
16593 tp
->read32_mbox
= tg3_read32
;
16594 tp
->write32_mbox
= tg3_write32
;
16595 tp
->write32_tx_mbox
= tg3_write32
;
16596 tp
->write32_rx_mbox
= tg3_write32
;
16598 /* Various workaround register access methods */
16599 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16600 tp
->write32
= tg3_write_indirect_reg32
;
16601 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16602 (tg3_flag(tp
, PCI_EXPRESS
) &&
16603 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16605 * Back to back register writes can cause problems on these
16606 * chips, the workaround is to read back all reg writes
16607 * except those to mailbox regs.
16609 * See tg3_write_indirect_reg32().
16611 tp
->write32
= tg3_write_flush_reg32
;
16614 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16615 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16616 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16617 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16620 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16621 tp
->read32
= tg3_read_indirect_reg32
;
16622 tp
->write32
= tg3_write_indirect_reg32
;
16623 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16624 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16625 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16626 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16631 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16632 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16633 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16635 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16636 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16637 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16638 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16639 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16642 if (tp
->write32
== tg3_write_indirect_reg32
||
16643 (tg3_flag(tp
, PCIX_MODE
) &&
16644 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16645 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16646 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16648 /* The memory arbiter has to be enabled in order for SRAM accesses
16649 * to succeed. Normally on powerup the tg3 chip firmware will make
16650 * sure it is enabled, but other entities such as system netboot
16651 * code might disable it.
16653 val
= tr32(MEMARB_MODE
);
16654 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16656 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16657 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16658 tg3_flag(tp
, 5780_CLASS
)) {
16659 if (tg3_flag(tp
, PCIX_MODE
)) {
16660 pci_read_config_dword(tp
->pdev
,
16661 tp
->pcix_cap
+ PCI_X_STATUS
,
16663 tp
->pci_fn
= val
& 0x7;
16665 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16666 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16667 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16668 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16669 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16670 val
= tr32(TG3_CPMU_STATUS
);
16672 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16673 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16675 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16676 TG3_CPMU_STATUS_FSHFT_5719
;
16679 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16680 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16681 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16684 /* Get eeprom hw config before calling tg3_set_power_state().
16685 * In particular, the TG3_FLAG_IS_NIC flag must be
16686 * determined before calling tg3_set_power_state() so that
16687 * we know whether or not to switch out of Vaux power.
16688 * When the flag is set, it means that GPIO1 is used for eeprom
16689 * write protect and also implies that it is a LOM where GPIOs
16690 * are not used to switch power.
16692 tg3_get_eeprom_hw_cfg(tp
);
16694 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16695 tg3_flag_clear(tp
, TSO_CAPABLE
);
16696 tg3_flag_clear(tp
, TSO_BUG
);
16697 tp
->fw_needed
= NULL
;
16700 if (tg3_flag(tp
, ENABLE_APE
)) {
16701 /* Allow reads and writes to the
16702 * APE register and memory space.
16704 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16705 PCISTATE_ALLOW_APE_SHMEM_WR
|
16706 PCISTATE_ALLOW_APE_PSPACE_WR
;
16707 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16710 tg3_ape_lock_init(tp
);
16711 tp
->ape_hb_interval
=
16712 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC
);
16715 /* Set up tp->grc_local_ctrl before calling
16716 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16717 * will bring 5700's external PHY out of reset.
16718 * It is also used as eeprom write protect on LOMs.
16720 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16721 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16722 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16723 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16724 GRC_LCLCTRL_GPIO_OUTPUT1
);
16725 /* Unused GPIO3 must be driven as output on 5752 because there
16726 * are no pull-up resistors on unused GPIO pins.
16728 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16729 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16731 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16732 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16733 tg3_flag(tp
, 57765_CLASS
))
16734 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16736 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16737 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16738 /* Turn off the debug UART. */
16739 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16740 if (tg3_flag(tp
, IS_NIC
))
16741 /* Keep VMain power. */
16742 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16743 GRC_LCLCTRL_GPIO_OUTPUT0
;
16746 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16747 tp
->grc_local_ctrl
|=
16748 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16750 /* Switch out of Vaux if it is a NIC */
16751 tg3_pwrsrc_switch_to_vmain(tp
);
16753 /* Derive initial jumbo mode from MTU assigned in
16754 * ether_setup() via the alloc_etherdev() call
16756 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16757 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16759 /* Determine WakeOnLan speed to use. */
16760 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16761 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16762 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16763 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16764 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16766 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16769 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16770 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16772 /* A few boards don't want Ethernet@WireSpeed phy feature */
16773 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16774 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16775 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16776 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16777 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16778 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16779 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16781 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16782 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16783 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16784 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16785 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16787 if (tg3_flag(tp
, 5705_PLUS
) &&
16788 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16789 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16790 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16791 !tg3_flag(tp
, 57765_PLUS
)) {
16792 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16793 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16794 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16795 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16796 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16797 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16798 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16799 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16800 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16802 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16805 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16806 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16807 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16808 if (tp
->phy_otp
== 0)
16809 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16812 if (tg3_flag(tp
, CPMU_PRESENT
))
16813 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16815 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16817 tp
->coalesce_mode
= 0;
16818 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16819 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16820 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16822 /* Set these bits to enable statistics workaround. */
16823 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16824 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16825 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16826 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16827 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16828 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16831 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16832 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16833 tg3_flag_set(tp
, USE_PHYLIB
);
16835 err
= tg3_mdio_init(tp
);
16839 /* Initialize data/descriptor byte/word swapping. */
16840 val
= tr32(GRC_MODE
);
16841 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16842 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16843 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16844 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16845 GRC_MODE_B2HRX_ENABLE
|
16846 GRC_MODE_HTX2B_ENABLE
|
16847 GRC_MODE_HOST_STACKUP
);
16849 val
&= GRC_MODE_HOST_STACKUP
;
16851 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16853 tg3_switch_clocks(tp
);
16855 /* Clear this out for sanity. */
16856 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16858 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16859 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16861 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16863 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16864 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16865 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16866 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16867 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16868 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16869 void __iomem
*sram_base
;
16871 /* Write some dummy words into the SRAM status block
16872 * area, see if it reads back correctly. If the return
16873 * value is bad, force enable the PCIX workaround.
16875 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16877 writel(0x00000000, sram_base
);
16878 writel(0x00000000, sram_base
+ 4);
16879 writel(0xffffffff, sram_base
+ 4);
16880 if (readl(sram_base
) != 0x00000000)
16881 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16886 tg3_nvram_init(tp
);
16888 /* If the device has an NVRAM, no need to load patch firmware */
16889 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16890 !tg3_flag(tp
, NO_NVRAM
))
16891 tp
->fw_needed
= NULL
;
16893 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16894 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16896 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16897 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16898 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16899 tg3_flag_set(tp
, IS_5788
);
16901 if (!tg3_flag(tp
, IS_5788
) &&
16902 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16903 tg3_flag_set(tp
, TAGGED_STATUS
);
16904 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16905 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16906 HOSTCC_MODE_CLRTICK_TXBD
);
16908 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16909 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16910 tp
->misc_host_ctrl
);
16913 /* Preserve the APE MAC_MODE bits */
16914 if (tg3_flag(tp
, ENABLE_APE
))
16915 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16919 if (tg3_10_100_only_device(tp
, ent
))
16920 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16922 err
= tg3_phy_probe(tp
);
16924 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16925 /* ... but do not return immediately ... */
16930 tg3_read_fw_ver(tp
);
16932 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16933 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16935 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16936 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16938 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16941 /* 5700 {AX,BX} chips have a broken status block link
16942 * change bit implementation, so we must use the
16943 * status register in those cases.
16945 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16946 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16948 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16950 /* The led_ctrl is set during tg3_phy_probe, here we might
16951 * have to force the link status polling mechanism based
16952 * upon subsystem IDs.
16954 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16955 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16956 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16957 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16958 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16961 /* For all SERDES we poll the MAC status register. */
16962 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16963 tg3_flag_set(tp
, POLL_SERDES
);
16965 tg3_flag_clear(tp
, POLL_SERDES
);
16967 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16968 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16970 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16971 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16972 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16973 tg3_flag(tp
, PCIX_MODE
)) {
16974 tp
->rx_offset
= NET_SKB_PAD
;
16975 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16976 tp
->rx_copy_thresh
= ~(u16
)0;
16980 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16981 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16982 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16984 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16986 /* Increment the rx prod index on the rx std ring by at most
16987 * 8 for these chips to workaround hw errata.
16989 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16990 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16991 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16992 tp
->rx_std_max_post
= 8;
16994 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16995 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16996 PCIE_PWR_MGMT_L1_THRESH_MSK
;
17001 #ifdef CONFIG_SPARC
17002 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
17004 struct net_device
*dev
= tp
->dev
;
17005 struct pci_dev
*pdev
= tp
->pdev
;
17006 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
17007 const unsigned char *addr
;
17010 addr
= of_get_property(dp
, "local-mac-address", &len
);
17011 if (addr
&& len
== ETH_ALEN
) {
17012 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
17018 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
17020 struct net_device
*dev
= tp
->dev
;
17022 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, ETH_ALEN
);
17027 static int tg3_get_device_address(struct tg3
*tp
)
17029 struct net_device
*dev
= tp
->dev
;
17030 u32 hi
, lo
, mac_offset
;
17034 #ifdef CONFIG_SPARC
17035 if (!tg3_get_macaddr_sparc(tp
))
17039 if (tg3_flag(tp
, IS_SSB_CORE
)) {
17040 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
17041 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
17046 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
17047 tg3_flag(tp
, 5780_CLASS
)) {
17048 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
17050 if (tg3_nvram_lock(tp
))
17051 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
17053 tg3_nvram_unlock(tp
);
17054 } else if (tg3_flag(tp
, 5717_PLUS
)) {
17055 if (tp
->pci_fn
& 1)
17057 if (tp
->pci_fn
> 1)
17058 mac_offset
+= 0x18c;
17059 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
17062 /* First try to get it from MAC address mailbox. */
17063 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
17064 if ((hi
>> 16) == 0x484b) {
17065 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17066 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
17068 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
17069 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17070 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17071 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17072 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
17074 /* Some old bootcode may report a 0 MAC address in SRAM */
17075 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
17078 /* Next, try NVRAM. */
17079 if (!tg3_flag(tp
, NO_NVRAM
) &&
17080 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
17081 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
17082 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
17083 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
17085 /* Finally just fetch it out of the MAC control regs. */
17087 hi
= tr32(MAC_ADDR_0_HIGH
);
17088 lo
= tr32(MAC_ADDR_0_LOW
);
17090 dev
->dev_addr
[5] = lo
& 0xff;
17091 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17092 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17093 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17094 dev
->dev_addr
[1] = hi
& 0xff;
17095 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17099 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
17100 #ifdef CONFIG_SPARC
17101 if (!tg3_get_default_macaddr_sparc(tp
))
17109 #define BOUNDARY_SINGLE_CACHELINE 1
17110 #define BOUNDARY_MULTI_CACHELINE 2
17112 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17114 int cacheline_size
;
17118 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17120 cacheline_size
= 1024;
17122 cacheline_size
= (int) byte
* 4;
17124 /* On 5703 and later chips, the boundary bits have no
17127 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17128 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17129 !tg3_flag(tp
, PCI_EXPRESS
))
17132 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17133 goal
= BOUNDARY_MULTI_CACHELINE
;
17135 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17136 goal
= BOUNDARY_SINGLE_CACHELINE
;
17142 if (tg3_flag(tp
, 57765_PLUS
)) {
17143 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17150 /* PCI controllers on most RISC systems tend to disconnect
17151 * when a device tries to burst across a cache-line boundary.
17152 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17154 * Unfortunately, for PCI-E there are only limited
17155 * write-side controls for this, and thus for reads
17156 * we will still get the disconnects. We'll also waste
17157 * these PCI cycles for both read and write for chips
17158 * other than 5700 and 5701 which do not implement the
17161 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17162 switch (cacheline_size
) {
17167 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17168 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17171 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17177 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17182 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17186 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17187 switch (cacheline_size
) {
17191 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17192 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17193 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17199 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17200 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17204 switch (cacheline_size
) {
17206 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17207 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17208 DMA_RWCTRL_WRITE_BNDRY_16
);
17213 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17214 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17215 DMA_RWCTRL_WRITE_BNDRY_32
);
17220 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17221 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17222 DMA_RWCTRL_WRITE_BNDRY_64
);
17227 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17228 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17229 DMA_RWCTRL_WRITE_BNDRY_128
);
17234 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17235 DMA_RWCTRL_WRITE_BNDRY_256
);
17238 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17239 DMA_RWCTRL_WRITE_BNDRY_512
);
17243 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17244 DMA_RWCTRL_WRITE_BNDRY_1024
);
17253 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17254 int size
, bool to_device
)
17256 struct tg3_internal_buffer_desc test_desc
;
17257 u32 sram_dma_descs
;
17260 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17262 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17263 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17264 tw32(RDMAC_STATUS
, 0);
17265 tw32(WDMAC_STATUS
, 0);
17267 tw32(BUFMGR_MODE
, 0);
17268 tw32(FTQ_RESET
, 0);
17270 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17271 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17272 test_desc
.nic_mbuf
= 0x00002100;
17273 test_desc
.len
= size
;
17276 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17277 * the *second* time the tg3 driver was getting loaded after an
17280 * Broadcom tells me:
17281 * ...the DMA engine is connected to the GRC block and a DMA
17282 * reset may affect the GRC block in some unpredictable way...
17283 * The behavior of resets to individual blocks has not been tested.
17285 * Broadcom noted the GRC reset will also reset all sub-components.
17288 test_desc
.cqid_sqid
= (13 << 8) | 2;
17290 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17293 test_desc
.cqid_sqid
= (16 << 8) | 7;
17295 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17298 test_desc
.flags
= 0x00000005;
17300 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17303 val
= *(((u32
*)&test_desc
) + i
);
17304 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17305 sram_dma_descs
+ (i
* sizeof(u32
)));
17306 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17308 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17311 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17313 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17316 for (i
= 0; i
< 40; i
++) {
17320 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17322 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17323 if ((val
& 0xffff) == sram_dma_descs
) {
17334 #define TEST_BUFFER_SIZE 0x2000
17336 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17337 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17341 static int tg3_test_dma(struct tg3
*tp
)
17343 dma_addr_t buf_dma
;
17344 u32
*buf
, saved_dma_rwctrl
;
17347 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17348 &buf_dma
, GFP_KERNEL
);
17354 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17355 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17357 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17359 if (tg3_flag(tp
, 57765_PLUS
))
17362 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17363 /* DMA read watermark not used on PCIE */
17364 tp
->dma_rwctrl
|= 0x00180000;
17365 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17366 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17367 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17368 tp
->dma_rwctrl
|= 0x003f0000;
17370 tp
->dma_rwctrl
|= 0x003f000f;
17372 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17373 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17374 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17375 u32 read_water
= 0x7;
17377 /* If the 5704 is behind the EPB bridge, we can
17378 * do the less restrictive ONE_DMA workaround for
17379 * better performance.
17381 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17382 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17383 tp
->dma_rwctrl
|= 0x8000;
17384 else if (ccval
== 0x6 || ccval
== 0x7)
17385 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17387 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17389 /* Set bit 23 to enable PCIX hw bug fix */
17391 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17392 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17394 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17395 /* 5780 always in PCIX mode */
17396 tp
->dma_rwctrl
|= 0x00144000;
17397 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17398 /* 5714 always in PCIX mode */
17399 tp
->dma_rwctrl
|= 0x00148000;
17401 tp
->dma_rwctrl
|= 0x001b000f;
17404 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17405 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17407 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17408 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17409 tp
->dma_rwctrl
&= 0xfffffff0;
17411 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17412 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17413 /* Remove this if it causes problems for some boards. */
17414 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17416 /* On 5700/5701 chips, we need to set this bit.
17417 * Otherwise the chip will issue cacheline transactions
17418 * to streamable DMA memory with not all the byte
17419 * enables turned on. This is an error on several
17420 * RISC PCI controllers, in particular sparc64.
17422 * On 5703/5704 chips, this bit has been reassigned
17423 * a different meaning. In particular, it is used
17424 * on those chips to enable a PCI-X workaround.
17426 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17429 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17432 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17433 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17436 /* It is best to perform DMA test with maximum write burst size
17437 * to expose the 5700/5701 write DMA bug.
17439 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17440 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17441 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17446 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17449 /* Send the buffer to the chip. */
17450 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17452 dev_err(&tp
->pdev
->dev
,
17453 "%s: Buffer write failed. err = %d\n",
17458 /* Now read it back. */
17459 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17461 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17462 "err = %d\n", __func__
, ret
);
17467 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17471 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17472 DMA_RWCTRL_WRITE_BNDRY_16
) {
17473 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17474 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17475 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17478 dev_err(&tp
->pdev
->dev
,
17479 "%s: Buffer corrupted on read back! "
17480 "(%d != %d)\n", __func__
, p
[i
], i
);
17486 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17492 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17493 DMA_RWCTRL_WRITE_BNDRY_16
) {
17494 /* DMA test passed without adjusting DMA boundary,
17495 * now look for chipsets that are known to expose the
17496 * DMA bug without failing the test.
17498 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17499 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17500 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17502 /* Safe to use the calculated DMA boundary. */
17503 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17506 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17510 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17515 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17517 if (tg3_flag(tp
, 57765_PLUS
)) {
17518 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17519 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17520 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17521 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17522 tp
->bufmgr_config
.mbuf_high_water
=
17523 DEFAULT_MB_HIGH_WATER_57765
;
17525 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17526 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17527 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17528 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17529 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17530 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17531 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17532 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17533 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17534 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17535 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17536 tp
->bufmgr_config
.mbuf_high_water
=
17537 DEFAULT_MB_HIGH_WATER_5705
;
17538 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17539 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17540 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17541 tp
->bufmgr_config
.mbuf_high_water
=
17542 DEFAULT_MB_HIGH_WATER_5906
;
17545 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17546 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17547 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17548 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17549 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17550 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17552 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17553 DEFAULT_MB_RDMA_LOW_WATER
;
17554 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17555 DEFAULT_MB_MACRX_LOW_WATER
;
17556 tp
->bufmgr_config
.mbuf_high_water
=
17557 DEFAULT_MB_HIGH_WATER
;
17559 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17560 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17561 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17562 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17563 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17564 DEFAULT_MB_HIGH_WATER_JUMBO
;
17567 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17568 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17571 static char *tg3_phy_string(struct tg3
*tp
)
17573 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17574 case TG3_PHY_ID_BCM5400
: return "5400";
17575 case TG3_PHY_ID_BCM5401
: return "5401";
17576 case TG3_PHY_ID_BCM5411
: return "5411";
17577 case TG3_PHY_ID_BCM5701
: return "5701";
17578 case TG3_PHY_ID_BCM5703
: return "5703";
17579 case TG3_PHY_ID_BCM5704
: return "5704";
17580 case TG3_PHY_ID_BCM5705
: return "5705";
17581 case TG3_PHY_ID_BCM5750
: return "5750";
17582 case TG3_PHY_ID_BCM5752
: return "5752";
17583 case TG3_PHY_ID_BCM5714
: return "5714";
17584 case TG3_PHY_ID_BCM5780
: return "5780";
17585 case TG3_PHY_ID_BCM5755
: return "5755";
17586 case TG3_PHY_ID_BCM5787
: return "5787";
17587 case TG3_PHY_ID_BCM5784
: return "5784";
17588 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17589 case TG3_PHY_ID_BCM5906
: return "5906";
17590 case TG3_PHY_ID_BCM5761
: return "5761";
17591 case TG3_PHY_ID_BCM5718C
: return "5718C";
17592 case TG3_PHY_ID_BCM5718S
: return "5718S";
17593 case TG3_PHY_ID_BCM57765
: return "57765";
17594 case TG3_PHY_ID_BCM5719C
: return "5719C";
17595 case TG3_PHY_ID_BCM5720C
: return "5720C";
17596 case TG3_PHY_ID_BCM5762
: return "5762C";
17597 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17598 case 0: return "serdes";
17599 default: return "unknown";
17603 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17605 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17606 strcpy(str
, "PCI Express");
17608 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17609 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17611 strcpy(str
, "PCIX:");
17613 if ((clock_ctrl
== 7) ||
17614 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17615 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17616 strcat(str
, "133MHz");
17617 else if (clock_ctrl
== 0)
17618 strcat(str
, "33MHz");
17619 else if (clock_ctrl
== 2)
17620 strcat(str
, "50MHz");
17621 else if (clock_ctrl
== 4)
17622 strcat(str
, "66MHz");
17623 else if (clock_ctrl
== 6)
17624 strcat(str
, "100MHz");
17626 strcpy(str
, "PCI:");
17627 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17628 strcat(str
, "66MHz");
17630 strcat(str
, "33MHz");
17632 if (tg3_flag(tp
, PCI_32BIT
))
17633 strcat(str
, ":32-bit");
17635 strcat(str
, ":64-bit");
17639 static void tg3_init_coal(struct tg3
*tp
)
17641 struct ethtool_coalesce
*ec
= &tp
->coal
;
17643 memset(ec
, 0, sizeof(*ec
));
17644 ec
->cmd
= ETHTOOL_GCOALESCE
;
17645 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17646 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17647 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17648 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17649 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17650 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17651 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17652 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17653 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17655 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17656 HOSTCC_MODE_CLRTICK_TXBD
)) {
17657 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17658 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17659 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17660 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17663 if (tg3_flag(tp
, 5705_PLUS
)) {
17664 ec
->rx_coalesce_usecs_irq
= 0;
17665 ec
->tx_coalesce_usecs_irq
= 0;
17666 ec
->stats_block_coalesce_usecs
= 0;
17670 static int tg3_init_one(struct pci_dev
*pdev
,
17671 const struct pci_device_id
*ent
)
17673 struct net_device
*dev
;
17676 u32 sndmbx
, rcvmbx
, intmbx
;
17678 u64 dma_mask
, persist_dma_mask
;
17679 netdev_features_t features
= 0;
17681 printk_once(KERN_INFO
"%s\n", version
);
17683 err
= pci_enable_device(pdev
);
17685 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17689 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17691 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17692 goto err_out_disable_pdev
;
17695 pci_set_master(pdev
);
17697 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17700 goto err_out_free_res
;
17703 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17705 tp
= netdev_priv(dev
);
17708 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17709 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17711 tp
->pcierr_recovery
= false;
17714 tp
->msg_enable
= tg3_debug
;
17716 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17718 if (pdev_is_ssb_gige_core(pdev
)) {
17719 tg3_flag_set(tp
, IS_SSB_CORE
);
17720 if (ssb_gige_must_flush_posted_writes(pdev
))
17721 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17722 if (ssb_gige_one_dma_at_once(pdev
))
17723 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17724 if (ssb_gige_have_roboswitch(pdev
)) {
17725 tg3_flag_set(tp
, USE_PHYLIB
);
17726 tg3_flag_set(tp
, ROBOSWITCH
);
17728 if (ssb_gige_is_rgmii(pdev
))
17729 tg3_flag_set(tp
, RGMII_MODE
);
17732 /* The word/byte swap controls here control register access byte
17733 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17736 tp
->misc_host_ctrl
=
17737 MISC_HOST_CTRL_MASK_PCI_INT
|
17738 MISC_HOST_CTRL_WORD_SWAP
|
17739 MISC_HOST_CTRL_INDIR_ACCESS
|
17740 MISC_HOST_CTRL_PCISTATE_RW
;
17742 /* The NONFRM (non-frame) byte/word swap controls take effect
17743 * on descriptor entries, anything which isn't packet data.
17745 * The StrongARM chips on the board (one for tx, one for rx)
17746 * are running in big-endian mode.
17748 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17749 GRC_MODE_WSWAP_NONFRM_DATA
);
17750 #ifdef __BIG_ENDIAN
17751 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17753 spin_lock_init(&tp
->lock
);
17754 spin_lock_init(&tp
->indirect_lock
);
17755 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17757 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17759 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17761 goto err_out_free_dev
;
17764 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17765 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17766 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17767 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17768 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17769 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17770 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17771 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17772 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17773 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17774 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17775 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17776 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17777 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17778 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17779 tg3_flag_set(tp
, ENABLE_APE
);
17780 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17781 if (!tp
->aperegs
) {
17782 dev_err(&pdev
->dev
,
17783 "Cannot map APE registers, aborting\n");
17785 goto err_out_iounmap
;
17789 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17790 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17792 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17793 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17794 dev
->netdev_ops
= &tg3_netdev_ops
;
17795 dev
->irq
= pdev
->irq
;
17797 err
= tg3_get_invariants(tp
, ent
);
17799 dev_err(&pdev
->dev
,
17800 "Problem fetching invariants of chip, aborting\n");
17801 goto err_out_apeunmap
;
17804 /* The EPB bridge inside 5714, 5715, and 5780 and any
17805 * device behind the EPB cannot support DMA addresses > 40-bit.
17806 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17807 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17808 * do DMA address check in tg3_start_xmit().
17810 if (tg3_flag(tp
, IS_5788
))
17811 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17812 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17813 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17814 #ifdef CONFIG_HIGHMEM
17815 dma_mask
= DMA_BIT_MASK(64);
17818 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17820 /* Configure DMA attributes. */
17821 if (dma_mask
> DMA_BIT_MASK(32)) {
17822 err
= pci_set_dma_mask(pdev
, dma_mask
);
17824 features
|= NETIF_F_HIGHDMA
;
17825 err
= pci_set_consistent_dma_mask(pdev
,
17828 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17829 "DMA for consistent allocations\n");
17830 goto err_out_apeunmap
;
17834 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17835 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17837 dev_err(&pdev
->dev
,
17838 "No usable DMA configuration, aborting\n");
17839 goto err_out_apeunmap
;
17843 tg3_init_bufmgr_config(tp
);
17845 /* 5700 B0 chips do not support checksumming correctly due
17846 * to hardware bugs.
17848 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17849 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17851 if (tg3_flag(tp
, 5755_PLUS
))
17852 features
|= NETIF_F_IPV6_CSUM
;
17855 /* TSO is on by default on chips that support hardware TSO.
17856 * Firmware TSO on older chips gives lower performance, so it
17857 * is off by default, but can be enabled using ethtool.
17859 if ((tg3_flag(tp
, HW_TSO_1
) ||
17860 tg3_flag(tp
, HW_TSO_2
) ||
17861 tg3_flag(tp
, HW_TSO_3
)) &&
17862 (features
& NETIF_F_IP_CSUM
))
17863 features
|= NETIF_F_TSO
;
17864 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17865 if (features
& NETIF_F_IPV6_CSUM
)
17866 features
|= NETIF_F_TSO6
;
17867 if (tg3_flag(tp
, HW_TSO_3
) ||
17868 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17869 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17870 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17871 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17872 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17873 features
|= NETIF_F_TSO_ECN
;
17876 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17877 NETIF_F_HW_VLAN_CTAG_RX
;
17878 dev
->vlan_features
|= features
;
17881 * Add loopback capability only for a subset of devices that support
17882 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17883 * loopback for the remaining devices.
17885 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17886 !tg3_flag(tp
, CPMU_PRESENT
))
17887 /* Add the loopback capability */
17888 features
|= NETIF_F_LOOPBACK
;
17890 dev
->hw_features
|= features
;
17891 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17893 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17894 dev
->min_mtu
= TG3_MIN_MTU
;
17895 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17897 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17898 !tg3_flag(tp
, TSO_CAPABLE
) &&
17899 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17900 tg3_flag_set(tp
, MAX_RXPEND_64
);
17901 tp
->rx_pending
= 63;
17904 err
= tg3_get_device_address(tp
);
17906 dev_err(&pdev
->dev
,
17907 "Could not obtain valid ethernet address, aborting\n");
17908 goto err_out_apeunmap
;
17911 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17912 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17913 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17914 for (i
= 0; i
< tp
->irq_max
; i
++) {
17915 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17918 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17920 tnapi
->int_mbox
= intmbx
;
17926 tnapi
->consmbox
= rcvmbx
;
17927 tnapi
->prodmbox
= sndmbx
;
17930 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17932 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17934 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17938 * If we support MSIX, we'll be using RSS. If we're using
17939 * RSS, the first vector only handles link interrupts and the
17940 * remaining vectors handle rx and tx interrupts. Reuse the
17941 * mailbox values for the next iteration. The values we setup
17942 * above are still useful for the single vectored mode.
17956 * Reset chip in case UNDI or EFI driver did not shutdown
17957 * DMA self test will enable WDMAC and we'll see (spurious)
17958 * pending DMA on the PCI bus at that point.
17960 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17961 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17962 tg3_full_lock(tp
, 0);
17963 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17964 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17965 tg3_full_unlock(tp
);
17968 err
= tg3_test_dma(tp
);
17970 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17971 goto err_out_apeunmap
;
17976 pci_set_drvdata(pdev
, dev
);
17978 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17979 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17980 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17981 tg3_flag_set(tp
, PTP_CAPABLE
);
17983 tg3_timer_init(tp
);
17985 tg3_carrier_off(tp
);
17987 err
= register_netdev(dev
);
17989 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17990 goto err_out_apeunmap
;
17993 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17995 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17997 if (IS_ERR(tp
->ptp_clock
))
17998 tp
->ptp_clock
= NULL
;
18001 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18002 tp
->board_part_number
,
18003 tg3_chip_rev_id(tp
),
18004 tg3_bus_string(tp
, str
),
18007 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
18010 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
18011 ethtype
= "10/100Base-TX";
18012 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
18013 ethtype
= "1000Base-SX";
18015 ethtype
= "10/100/1000Base-T";
18017 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
18018 "(WireSpeed[%d], EEE[%d])\n",
18019 tg3_phy_string(tp
), ethtype
,
18020 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
18021 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
18024 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18025 (dev
->features
& NETIF_F_RXCSUM
) != 0,
18026 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
18027 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
18028 tg3_flag(tp
, ENABLE_ASF
) != 0,
18029 tg3_flag(tp
, TSO_CAPABLE
) != 0);
18030 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18032 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
18033 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
18035 pci_save_state(pdev
);
18041 iounmap(tp
->aperegs
);
18042 tp
->aperegs
= NULL
;
18055 pci_release_regions(pdev
);
18057 err_out_disable_pdev
:
18058 if (pci_is_enabled(pdev
))
18059 pci_disable_device(pdev
);
18063 static void tg3_remove_one(struct pci_dev
*pdev
)
18065 struct net_device
*dev
= pci_get_drvdata(pdev
);
18068 struct tg3
*tp
= netdev_priv(dev
);
18072 release_firmware(tp
->fw
);
18074 tg3_reset_task_cancel(tp
);
18076 if (tg3_flag(tp
, USE_PHYLIB
)) {
18081 unregister_netdev(dev
);
18083 iounmap(tp
->aperegs
);
18084 tp
->aperegs
= NULL
;
18091 pci_release_regions(pdev
);
18092 pci_disable_device(pdev
);
18096 #ifdef CONFIG_PM_SLEEP
18097 static int tg3_suspend(struct device
*device
)
18099 struct pci_dev
*pdev
= to_pci_dev(device
);
18100 struct net_device
*dev
= pci_get_drvdata(pdev
);
18101 struct tg3
*tp
= netdev_priv(dev
);
18106 if (!netif_running(dev
))
18109 tg3_reset_task_cancel(tp
);
18111 tg3_netif_stop(tp
);
18113 tg3_timer_stop(tp
);
18115 tg3_full_lock(tp
, 1);
18116 tg3_disable_ints(tp
);
18117 tg3_full_unlock(tp
);
18119 netif_device_detach(dev
);
18121 tg3_full_lock(tp
, 0);
18122 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18123 tg3_flag_clear(tp
, INIT_COMPLETE
);
18124 tg3_full_unlock(tp
);
18126 err
= tg3_power_down_prepare(tp
);
18130 tg3_full_lock(tp
, 0);
18132 tg3_flag_set(tp
, INIT_COMPLETE
);
18133 err2
= tg3_restart_hw(tp
, true);
18137 tg3_timer_start(tp
);
18139 netif_device_attach(dev
);
18140 tg3_netif_start(tp
);
18143 tg3_full_unlock(tp
);
18154 static int tg3_resume(struct device
*device
)
18156 struct pci_dev
*pdev
= to_pci_dev(device
);
18157 struct net_device
*dev
= pci_get_drvdata(pdev
);
18158 struct tg3
*tp
= netdev_priv(dev
);
18163 if (!netif_running(dev
))
18166 netif_device_attach(dev
);
18168 tg3_full_lock(tp
, 0);
18170 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18172 tg3_flag_set(tp
, INIT_COMPLETE
);
18173 err
= tg3_restart_hw(tp
,
18174 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18178 tg3_timer_start(tp
);
18180 tg3_netif_start(tp
);
18183 tg3_full_unlock(tp
);
18192 #endif /* CONFIG_PM_SLEEP */
18194 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18196 static void tg3_shutdown(struct pci_dev
*pdev
)
18198 struct net_device
*dev
= pci_get_drvdata(pdev
);
18199 struct tg3
*tp
= netdev_priv(dev
);
18202 netif_device_detach(dev
);
18204 if (netif_running(dev
))
18207 if (system_state
== SYSTEM_POWER_OFF
)
18208 tg3_power_down(tp
);
18214 * tg3_io_error_detected - called when PCI error is detected
18215 * @pdev: Pointer to PCI device
18216 * @state: The current pci connection state
18218 * This function is called after a PCI bus error affecting
18219 * this device has been detected.
18221 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18222 pci_channel_state_t state
)
18224 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18225 struct tg3
*tp
= netdev_priv(netdev
);
18226 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18228 netdev_info(netdev
, "PCI I/O error detected\n");
18232 /* Could be second call or maybe we don't have netdev yet */
18233 if (!netdev
|| tp
->pcierr_recovery
|| !netif_running(netdev
))
18236 /* We needn't recover from permanent error */
18237 if (state
== pci_channel_io_frozen
)
18238 tp
->pcierr_recovery
= true;
18242 tg3_netif_stop(tp
);
18244 tg3_timer_stop(tp
);
18246 /* Want to make sure that the reset task doesn't run */
18247 tg3_reset_task_cancel(tp
);
18249 netif_device_detach(netdev
);
18251 /* Clean up software state, even if MMIO is blocked */
18252 tg3_full_lock(tp
, 0);
18253 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18254 tg3_full_unlock(tp
);
18257 if (state
== pci_channel_io_perm_failure
) {
18259 tg3_napi_enable(tp
);
18262 err
= PCI_ERS_RESULT_DISCONNECT
;
18264 pci_disable_device(pdev
);
18273 * tg3_io_slot_reset - called after the pci bus has been reset.
18274 * @pdev: Pointer to PCI device
18276 * Restart the card from scratch, as if from a cold-boot.
18277 * At this point, the card has exprienced a hard reset,
18278 * followed by fixups by BIOS, and has its config space
18279 * set up identically to what it was at cold boot.
18281 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18283 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18284 struct tg3
*tp
= netdev_priv(netdev
);
18285 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18290 if (pci_enable_device(pdev
)) {
18291 dev_err(&pdev
->dev
,
18292 "Cannot re-enable PCI device after reset.\n");
18296 pci_set_master(pdev
);
18297 pci_restore_state(pdev
);
18298 pci_save_state(pdev
);
18300 if (!netdev
|| !netif_running(netdev
)) {
18301 rc
= PCI_ERS_RESULT_RECOVERED
;
18305 err
= tg3_power_up(tp
);
18309 rc
= PCI_ERS_RESULT_RECOVERED
;
18312 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18313 tg3_napi_enable(tp
);
18322 * tg3_io_resume - called when traffic can start flowing again.
18323 * @pdev: Pointer to PCI device
18325 * This callback is called when the error recovery driver tells
18326 * us that its OK to resume normal operation.
18328 static void tg3_io_resume(struct pci_dev
*pdev
)
18330 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18331 struct tg3
*tp
= netdev_priv(netdev
);
18336 if (!netdev
|| !netif_running(netdev
))
18339 tg3_full_lock(tp
, 0);
18340 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18341 tg3_flag_set(tp
, INIT_COMPLETE
);
18342 err
= tg3_restart_hw(tp
, true);
18344 tg3_full_unlock(tp
);
18345 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18349 netif_device_attach(netdev
);
18351 tg3_timer_start(tp
);
18353 tg3_netif_start(tp
);
18355 tg3_full_unlock(tp
);
18360 tp
->pcierr_recovery
= false;
18364 static const struct pci_error_handlers tg3_err_handler
= {
18365 .error_detected
= tg3_io_error_detected
,
18366 .slot_reset
= tg3_io_slot_reset
,
18367 .resume
= tg3_io_resume
18370 static struct pci_driver tg3_driver
= {
18371 .name
= DRV_MODULE_NAME
,
18372 .id_table
= tg3_pci_tbl
,
18373 .probe
= tg3_init_one
,
18374 .remove
= tg3_remove_one
,
18375 .err_handler
= &tg3_err_handler
,
18376 .driver
.pm
= &tg3_pm_ops
,
18377 .shutdown
= tg3_shutdown
,
18380 module_pci_driver(tg3_driver
);