2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
78 return test_bit(flag
, bits
);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
88 clear_bit(flag
, bits
);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM 3
101 #define TG3_MIN_NUM 137
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static const struct pci_device_id tg3_pci_tbl
[] = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
358 static const struct {
359 const char string
[ETH_GSTRING_LEN
];
360 } ethtool_stats_keys
[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string
[ETH_GSTRING_LEN
];
454 } ethtool_test_keys
[] = {
455 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
456 [TG3_LINK_TEST
] = { "link test (online) " },
457 [TG3_REGISTER_TEST
] = { "register test (offline)" },
458 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
470 writel(val
, tp
->regs
+ off
);
473 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
475 return readl(tp
->regs
+ off
);
478 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
480 writel(val
, tp
->aperegs
+ off
);
483 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
485 return readl(tp
->aperegs
+ off
);
488 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
492 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
493 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
494 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
495 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
498 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
500 writel(val
, tp
->regs
+ off
);
501 readl(tp
->regs
+ off
);
504 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
509 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
510 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
511 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
512 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
516 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
520 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
525 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
526 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
527 TG3_64BIT_REG_LOW
, val
);
531 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
532 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
533 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
534 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
541 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
542 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
546 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
551 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
552 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
553 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
554 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
565 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
566 /* Non-posted methods */
567 tp
->write32(tp
, off
, val
);
570 tg3_write32(tp
, off
, val
);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
584 tp
->write32_mbox(tp
, off
, val
);
585 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
586 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
587 !tg3_flag(tp
, ICH_WORKAROUND
)))
588 tp
->read32_mbox(tp
, off
);
591 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
593 void __iomem
*mbox
= tp
->regs
+ off
;
595 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
597 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
598 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
602 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
604 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
607 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
609 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
627 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
628 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
631 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
632 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
633 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
640 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
645 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
648 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
652 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
653 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
658 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
659 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
660 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
667 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
672 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
675 static void tg3_ape_lock_init(struct tg3
*tp
)
680 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
681 regbase
= TG3_APE_LOCK_GRANT
;
683 regbase
= TG3_APE_PER_LOCK_GRANT
;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
688 case TG3_APE_LOCK_PHY0
:
689 case TG3_APE_LOCK_PHY1
:
690 case TG3_APE_LOCK_PHY2
:
691 case TG3_APE_LOCK_PHY3
:
692 bit
= APE_LOCK_GRANT_DRIVER
;
696 bit
= APE_LOCK_GRANT_DRIVER
;
698 bit
= 1 << tp
->pci_fn
;
700 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
705 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
709 u32 status
, req
, gnt
, bit
;
711 if (!tg3_flag(tp
, ENABLE_APE
))
715 case TG3_APE_LOCK_GPIO
:
716 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
719 case TG3_APE_LOCK_GRC
:
720 case TG3_APE_LOCK_MEM
:
722 bit
= APE_LOCK_REQ_DRIVER
;
724 bit
= 1 << tp
->pci_fn
;
726 case TG3_APE_LOCK_PHY0
:
727 case TG3_APE_LOCK_PHY1
:
728 case TG3_APE_LOCK_PHY2
:
729 case TG3_APE_LOCK_PHY3
:
730 bit
= APE_LOCK_REQ_DRIVER
;
736 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
737 req
= TG3_APE_LOCK_REQ
;
738 gnt
= TG3_APE_LOCK_GRANT
;
740 req
= TG3_APE_PER_LOCK_REQ
;
741 gnt
= TG3_APE_PER_LOCK_GRANT
;
746 tg3_ape_write32(tp
, req
+ off
, bit
);
748 /* Wait for up to 1 millisecond to acquire lock. */
749 for (i
= 0; i
< 100; i
++) {
750 status
= tg3_ape_read32(tp
, gnt
+ off
);
753 if (pci_channel_offline(tp
->pdev
))
760 /* Revoke the lock request. */
761 tg3_ape_write32(tp
, gnt
+ off
, bit
);
768 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
772 if (!tg3_flag(tp
, ENABLE_APE
))
776 case TG3_APE_LOCK_GPIO
:
777 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
780 case TG3_APE_LOCK_GRC
:
781 case TG3_APE_LOCK_MEM
:
783 bit
= APE_LOCK_GRANT_DRIVER
;
785 bit
= 1 << tp
->pci_fn
;
787 case TG3_APE_LOCK_PHY0
:
788 case TG3_APE_LOCK_PHY1
:
789 case TG3_APE_LOCK_PHY2
:
790 case TG3_APE_LOCK_PHY3
:
791 bit
= APE_LOCK_GRANT_DRIVER
;
797 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
798 gnt
= TG3_APE_LOCK_GRANT
;
800 gnt
= TG3_APE_PER_LOCK_GRANT
;
802 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
805 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
810 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
813 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
814 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
817 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
820 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
823 return timeout_us
? 0 : -EBUSY
;
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
831 for (i
= 0; i
< timeout_us
/ 10; i
++) {
832 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
834 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
840 return i
== timeout_us
/ 10;
843 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
847 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
849 if (!tg3_flag(tp
, APE_HAS_NCSI
))
852 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
853 if (apedata
!= APE_SEG_SIG_MAGIC
)
856 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
857 if (!(apedata
& APE_FW_STATUS_READY
))
860 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
862 msgoff
= bufoff
+ 2 * sizeof(u32
);
863 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
868 /* Cap xfer sizes to scratchpad limits. */
869 length
= (len
> maxlen
) ? maxlen
: len
;
872 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
873 if (!(apedata
& APE_FW_STATUS_READY
))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err
= tg3_ape_event_lock(tp
, 1000);
881 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
882 APE_EVENT_STATUS_SCRTCHPD_READ
|
883 APE_EVENT_STATUS_EVENT_PENDING
;
884 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
886 tg3_ape_write32(tp
, bufoff
, base_off
);
887 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
889 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
890 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
894 if (tg3_ape_wait_for_event(tp
, 30000))
897 for (i
= 0; length
; i
+= 4, length
-= 4) {
898 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
899 memcpy(data
, &val
, sizeof(u32
));
908 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
913 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
914 if (apedata
!= APE_SEG_SIG_MAGIC
)
917 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
918 if (!(apedata
& APE_FW_STATUS_READY
))
921 /* Wait for up to 20 millisecond for APE to service previous event. */
922 err
= tg3_ape_event_lock(tp
, 20000);
926 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
927 event
| APE_EVENT_STATUS_EVENT_PENDING
);
929 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
930 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
935 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
940 if (!tg3_flag(tp
, ENABLE_APE
))
944 case RESET_KIND_INIT
:
945 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
946 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
947 APE_HOST_SEG_SIG_MAGIC
);
948 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
949 APE_HOST_SEG_LEN_MAGIC
);
950 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
951 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
952 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
954 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
955 APE_HOST_BEHAV_NO_PHYLOCK
);
956 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
957 TG3_APE_HOST_DRVR_STATE_START
);
959 event
= APE_EVENT_STATUS_STATE_START
;
961 case RESET_KIND_SHUTDOWN
:
962 if (device_may_wakeup(&tp
->pdev
->dev
) &&
963 tg3_flag(tp
, WOL_ENABLE
)) {
964 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
965 TG3_APE_HOST_WOL_SPEED_AUTO
);
966 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
968 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
970 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
972 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
978 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
980 tg3_ape_send_event(tp
, event
);
983 static void tg3_send_ape_heartbeat(struct tg3
*tp
,
984 unsigned long interval
)
986 /* Check if hb interval has exceeded */
987 if (!tg3_flag(tp
, ENABLE_APE
) ||
988 time_before(jiffies
, tp
->ape_hb_jiffies
+ interval
))
991 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_COUNT
, tp
->ape_hb
++);
992 tp
->ape_hb_jiffies
= jiffies
;
995 static void tg3_disable_ints(struct tg3
*tp
)
999 tw32(TG3PCI_MISC_HOST_CTRL
,
1000 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
1001 for (i
= 0; i
< tp
->irq_max
; i
++)
1002 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
1005 static void tg3_enable_ints(struct tg3
*tp
)
1012 tw32(TG3PCI_MISC_HOST_CTRL
,
1013 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1015 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1016 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1017 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1019 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1020 if (tg3_flag(tp
, 1SHOT_MSI
))
1021 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1023 tp
->coal_now
|= tnapi
->coal_now
;
1026 /* Force an initial interrupt */
1027 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1028 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1029 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1031 tw32(HOSTCC_MODE
, tp
->coal_now
);
1033 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1036 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1038 struct tg3
*tp
= tnapi
->tp
;
1039 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1040 unsigned int work_exists
= 0;
1042 /* check for phy events */
1043 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1044 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1048 /* check for TX work to do */
1049 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1052 /* check for RX work to do */
1053 if (tnapi
->rx_rcb_prod_idx
&&
1054 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1061 * similar to tg3_enable_ints, but it accurately determines whether there
1062 * is new work pending and can return without flushing the PIO write
1063 * which reenables interrupts
1065 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1067 struct tg3
*tp
= tnapi
->tp
;
1069 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1071 /* When doing tagged status, this work check is unnecessary.
1072 * The last_tag we write above tells the chip which piece of
1073 * work we've completed.
1075 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1076 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1077 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1080 static void tg3_switch_clocks(struct tg3
*tp
)
1083 u32 orig_clock_ctrl
;
1085 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1088 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1090 orig_clock_ctrl
= clock_ctrl
;
1091 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1092 CLOCK_CTRL_CLKRUN_OENABLE
|
1094 tp
->pci_clock_ctrl
= clock_ctrl
;
1096 if (tg3_flag(tp
, 5705_PLUS
)) {
1097 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1099 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1101 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1102 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1104 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1106 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1107 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1113 #define PHY_BUSY_LOOPS 5000
1115 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1122 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1124 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1128 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1132 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1133 MI_COM_PHY_ADDR_MASK
);
1134 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1135 MI_COM_REG_ADDR_MASK
);
1136 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1138 tw32_f(MAC_MI_COM
, frame_val
);
1140 loops
= PHY_BUSY_LOOPS
;
1141 while (loops
!= 0) {
1143 frame_val
= tr32(MAC_MI_COM
);
1145 if ((frame_val
& MI_COM_BUSY
) == 0) {
1147 frame_val
= tr32(MAC_MI_COM
);
1155 *val
= frame_val
& MI_COM_DATA_MASK
;
1159 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1160 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1164 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1169 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1171 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1174 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1181 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1182 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1185 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1187 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1191 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1193 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1194 MI_COM_PHY_ADDR_MASK
);
1195 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1196 MI_COM_REG_ADDR_MASK
);
1197 frame_val
|= (val
& MI_COM_DATA_MASK
);
1198 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1200 tw32_f(MAC_MI_COM
, frame_val
);
1202 loops
= PHY_BUSY_LOOPS
;
1203 while (loops
!= 0) {
1205 frame_val
= tr32(MAC_MI_COM
);
1206 if ((frame_val
& MI_COM_BUSY
) == 0) {
1208 frame_val
= tr32(MAC_MI_COM
);
1218 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1219 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1223 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1228 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1230 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1233 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1237 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1241 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1246 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1250 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1256 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1260 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1264 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1268 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1269 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1273 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1279 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1283 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1285 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1290 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1294 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1296 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1301 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1305 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1306 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1307 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1309 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1314 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1316 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1317 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1319 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1327 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1333 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1335 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1337 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1338 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1343 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1345 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1346 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1349 static int tg3_bmcr_reset(struct tg3
*tp
)
1354 /* OK, reset it, and poll the BMCR_RESET bit until it
1355 * clears or we time out.
1357 phy_control
= BMCR_RESET
;
1358 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1364 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1368 if ((phy_control
& BMCR_RESET
) == 0) {
1380 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1382 struct tg3
*tp
= bp
->priv
;
1385 spin_lock_bh(&tp
->lock
);
1387 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1390 spin_unlock_bh(&tp
->lock
);
1395 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1397 struct tg3
*tp
= bp
->priv
;
1400 spin_lock_bh(&tp
->lock
);
1402 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1405 spin_unlock_bh(&tp
->lock
);
1410 static void tg3_mdio_config_5785(struct tg3
*tp
)
1413 struct phy_device
*phydev
;
1415 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1416 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1417 case PHY_ID_BCM50610
:
1418 case PHY_ID_BCM50610M
:
1419 val
= MAC_PHYCFG2_50610_LED_MODES
;
1421 case PHY_ID_BCMAC131
:
1422 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1424 case PHY_ID_RTL8211C
:
1425 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1427 case PHY_ID_RTL8201E
:
1428 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1434 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1435 tw32(MAC_PHYCFG2
, val
);
1437 val
= tr32(MAC_PHYCFG1
);
1438 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1439 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1440 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1441 tw32(MAC_PHYCFG1
, val
);
1446 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1447 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1448 MAC_PHYCFG2_FMODE_MASK_MASK
|
1449 MAC_PHYCFG2_GMODE_MASK_MASK
|
1450 MAC_PHYCFG2_ACT_MASK_MASK
|
1451 MAC_PHYCFG2_QUAL_MASK_MASK
|
1452 MAC_PHYCFG2_INBAND_ENABLE
;
1454 tw32(MAC_PHYCFG2
, val
);
1456 val
= tr32(MAC_PHYCFG1
);
1457 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1459 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1460 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1461 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1462 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1463 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1465 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1466 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1467 tw32(MAC_PHYCFG1
, val
);
1469 val
= tr32(MAC_EXT_RGMII_MODE
);
1470 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1471 MAC_RGMII_MODE_RX_QUALITY
|
1472 MAC_RGMII_MODE_RX_ACTIVITY
|
1473 MAC_RGMII_MODE_RX_ENG_DET
|
1474 MAC_RGMII_MODE_TX_ENABLE
|
1475 MAC_RGMII_MODE_TX_LOWPWR
|
1476 MAC_RGMII_MODE_TX_RESET
);
1477 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1478 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1479 val
|= MAC_RGMII_MODE_RX_INT_B
|
1480 MAC_RGMII_MODE_RX_QUALITY
|
1481 MAC_RGMII_MODE_RX_ACTIVITY
|
1482 MAC_RGMII_MODE_RX_ENG_DET
;
1483 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1484 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1485 MAC_RGMII_MODE_TX_LOWPWR
|
1486 MAC_RGMII_MODE_TX_RESET
;
1488 tw32(MAC_EXT_RGMII_MODE
, val
);
1491 static void tg3_mdio_start(struct tg3
*tp
)
1493 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1494 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1497 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1498 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1499 tg3_mdio_config_5785(tp
);
1502 static int tg3_mdio_init(struct tg3
*tp
)
1506 struct phy_device
*phydev
;
1508 if (tg3_flag(tp
, 5717_PLUS
)) {
1511 tp
->phy_addr
= tp
->pci_fn
+ 1;
1513 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1514 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1516 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1517 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1520 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1523 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1526 tp
->phy_addr
= addr
;
1528 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1532 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1535 tp
->mdio_bus
= mdiobus_alloc();
1536 if (tp
->mdio_bus
== NULL
)
1539 tp
->mdio_bus
->name
= "tg3 mdio bus";
1540 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1541 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1542 tp
->mdio_bus
->priv
= tp
;
1543 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1544 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1545 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1546 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1548 /* The bus registration will look for all the PHYs on the mdio bus.
1549 * Unfortunately, it does not ensure the PHY is powered up before
1550 * accessing the PHY ID registers. A chip reset is the
1551 * quickest way to bring the device back to an operational state..
1553 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1556 i
= mdiobus_register(tp
->mdio_bus
);
1558 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1559 mdiobus_free(tp
->mdio_bus
);
1563 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1565 if (!phydev
|| !phydev
->drv
) {
1566 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1567 mdiobus_unregister(tp
->mdio_bus
);
1568 mdiobus_free(tp
->mdio_bus
);
1572 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1573 case PHY_ID_BCM57780
:
1574 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1575 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1577 case PHY_ID_BCM50610
:
1578 case PHY_ID_BCM50610M
:
1579 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1580 PHY_BRCM_RX_REFCLK_UNUSED
|
1581 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1582 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1583 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1584 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1585 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1586 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1587 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1588 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1590 case PHY_ID_RTL8211C
:
1591 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1593 case PHY_ID_RTL8201E
:
1594 case PHY_ID_BCMAC131
:
1595 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1596 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1597 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1601 tg3_flag_set(tp
, MDIOBUS_INITED
);
1603 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1604 tg3_mdio_config_5785(tp
);
1609 static void tg3_mdio_fini(struct tg3
*tp
)
1611 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1612 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1613 mdiobus_unregister(tp
->mdio_bus
);
1614 mdiobus_free(tp
->mdio_bus
);
1618 /* tp->lock is held. */
1619 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1623 val
= tr32(GRC_RX_CPU_EVENT
);
1624 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1625 tw32_f(GRC_RX_CPU_EVENT
, val
);
1627 tp
->last_event_jiffies
= jiffies
;
1630 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632 /* tp->lock is held. */
1633 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1636 unsigned int delay_cnt
;
1639 /* If enough time has passed, no wait is necessary. */
1640 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1641 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1643 if (time_remain
< 0)
1646 /* Check if we can shorten the wait time. */
1647 delay_cnt
= jiffies_to_usecs(time_remain
);
1648 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1649 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1650 delay_cnt
= (delay_cnt
>> 3) + 1;
1652 for (i
= 0; i
< delay_cnt
; i
++) {
1653 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1655 if (pci_channel_offline(tp
->pdev
))
1662 /* tp->lock is held. */
1663 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1668 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1670 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1671 val
|= (reg
& 0xffff);
1675 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1677 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1678 val
|= (reg
& 0xffff);
1682 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1683 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1685 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1686 val
|= (reg
& 0xffff);
1690 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1697 /* tp->lock is held. */
1698 static void tg3_ump_link_report(struct tg3
*tp
)
1702 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1705 tg3_phy_gather_ump_data(tp
, data
);
1707 tg3_wait_for_event_ack(tp
);
1709 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1710 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1711 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1712 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1713 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1714 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1716 tg3_generate_fw_event(tp
);
1719 /* tp->lock is held. */
1720 static void tg3_stop_fw(struct tg3
*tp
)
1722 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1723 /* Wait for RX cpu to ACK the previous event. */
1724 tg3_wait_for_event_ack(tp
);
1726 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1728 tg3_generate_fw_event(tp
);
1730 /* Wait for RX cpu to ACK this event. */
1731 tg3_wait_for_event_ack(tp
);
1735 /* tp->lock is held. */
1736 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1738 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1739 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1741 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1743 case RESET_KIND_INIT
:
1744 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1748 case RESET_KIND_SHUTDOWN
:
1749 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1753 case RESET_KIND_SUSPEND
:
1754 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1764 /* tp->lock is held. */
1765 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1767 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1769 case RESET_KIND_INIT
:
1770 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1771 DRV_STATE_START_DONE
);
1774 case RESET_KIND_SHUTDOWN
:
1775 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1776 DRV_STATE_UNLOAD_DONE
);
1785 /* tp->lock is held. */
1786 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1788 if (tg3_flag(tp
, ENABLE_ASF
)) {
1790 case RESET_KIND_INIT
:
1791 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1795 case RESET_KIND_SHUTDOWN
:
1796 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1800 case RESET_KIND_SUSPEND
:
1801 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1811 static int tg3_poll_fw(struct tg3
*tp
)
1816 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1819 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1820 /* We don't use firmware. */
1824 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1825 /* Wait up to 20ms for init done. */
1826 for (i
= 0; i
< 200; i
++) {
1827 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1829 if (pci_channel_offline(tp
->pdev
))
1837 /* Wait for firmware initialization to complete. */
1838 for (i
= 0; i
< 100000; i
++) {
1839 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1840 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1842 if (pci_channel_offline(tp
->pdev
)) {
1843 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1844 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1845 netdev_info(tp
->dev
, "No firmware running\n");
1854 /* Chip might not be fitted with firmware. Some Sun onboard
1855 * parts are configured like that. So don't signal the timeout
1856 * of the above loop as an error, but do report the lack of
1857 * running firmware once.
1859 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1860 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1862 netdev_info(tp
->dev
, "No firmware running\n");
1865 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1866 /* The 57765 A0 needs a little more
1867 * time to do some important work.
1875 static void tg3_link_report(struct tg3
*tp
)
1877 if (!netif_carrier_ok(tp
->dev
)) {
1878 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1879 tg3_ump_link_report(tp
);
1880 } else if (netif_msg_link(tp
)) {
1881 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1882 (tp
->link_config
.active_speed
== SPEED_1000
?
1884 (tp
->link_config
.active_speed
== SPEED_100
?
1886 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1889 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1890 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1892 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1895 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1896 netdev_info(tp
->dev
, "EEE is %s\n",
1897 tp
->setlpicnt
? "enabled" : "disabled");
1899 tg3_ump_link_report(tp
);
1902 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1905 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1909 if (adv
& ADVERTISE_PAUSE_CAP
) {
1910 flowctrl
|= FLOW_CTRL_RX
;
1911 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1912 flowctrl
|= FLOW_CTRL_TX
;
1913 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1914 flowctrl
|= FLOW_CTRL_TX
;
1919 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1923 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1924 miireg
= ADVERTISE_1000XPAUSE
;
1925 else if (flow_ctrl
& FLOW_CTRL_TX
)
1926 miireg
= ADVERTISE_1000XPSE_ASYM
;
1927 else if (flow_ctrl
& FLOW_CTRL_RX
)
1928 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1935 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1939 if (adv
& ADVERTISE_1000XPAUSE
) {
1940 flowctrl
|= FLOW_CTRL_RX
;
1941 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1942 flowctrl
|= FLOW_CTRL_TX
;
1943 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1944 flowctrl
|= FLOW_CTRL_TX
;
1949 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1953 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1954 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1955 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1956 if (lcladv
& ADVERTISE_1000XPAUSE
)
1958 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1965 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1969 u32 old_rx_mode
= tp
->rx_mode
;
1970 u32 old_tx_mode
= tp
->tx_mode
;
1972 if (tg3_flag(tp
, USE_PHYLIB
))
1973 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1975 autoneg
= tp
->link_config
.autoneg
;
1977 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1978 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1979 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1981 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1983 flowctrl
= tp
->link_config
.flowctrl
;
1985 tp
->link_config
.active_flowctrl
= flowctrl
;
1987 if (flowctrl
& FLOW_CTRL_RX
)
1988 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1990 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1992 if (old_rx_mode
!= tp
->rx_mode
)
1993 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1995 if (flowctrl
& FLOW_CTRL_TX
)
1996 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1998 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2000 if (old_tx_mode
!= tp
->tx_mode
)
2001 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2004 static void tg3_adjust_link(struct net_device
*dev
)
2006 u8 oldflowctrl
, linkmesg
= 0;
2007 u32 mac_mode
, lcl_adv
, rmt_adv
;
2008 struct tg3
*tp
= netdev_priv(dev
);
2009 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2011 spin_lock_bh(&tp
->lock
);
2013 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2014 MAC_MODE_HALF_DUPLEX
);
2016 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2022 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2023 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2024 else if (phydev
->speed
== SPEED_1000
||
2025 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2026 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2028 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2030 if (phydev
->duplex
== DUPLEX_HALF
)
2031 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2033 lcl_adv
= mii_advertise_flowctrl(
2034 tp
->link_config
.flowctrl
);
2037 rmt_adv
= LPA_PAUSE_CAP
;
2038 if (phydev
->asym_pause
)
2039 rmt_adv
|= LPA_PAUSE_ASYM
;
2042 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2044 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2046 if (mac_mode
!= tp
->mac_mode
) {
2047 tp
->mac_mode
= mac_mode
;
2048 tw32_f(MAC_MODE
, tp
->mac_mode
);
2052 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2053 if (phydev
->speed
== SPEED_10
)
2055 MAC_MI_STAT_10MBPS_MODE
|
2056 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2058 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2061 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2062 tw32(MAC_TX_LENGTHS
,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2064 (6 << TX_LENGTHS_IPG_SHIFT
) |
2065 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2067 tw32(MAC_TX_LENGTHS
,
2068 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2069 (6 << TX_LENGTHS_IPG_SHIFT
) |
2070 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2072 if (phydev
->link
!= tp
->old_link
||
2073 phydev
->speed
!= tp
->link_config
.active_speed
||
2074 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2075 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2078 tp
->old_link
= phydev
->link
;
2079 tp
->link_config
.active_speed
= phydev
->speed
;
2080 tp
->link_config
.active_duplex
= phydev
->duplex
;
2082 spin_unlock_bh(&tp
->lock
);
2085 tg3_link_report(tp
);
2088 static int tg3_phy_init(struct tg3
*tp
)
2090 struct phy_device
*phydev
;
2092 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2095 /* Bring the PHY back to a known state. */
2098 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2100 /* Attach the MAC to the PHY. */
2101 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2102 tg3_adjust_link
, phydev
->interface
);
2103 if (IS_ERR(phydev
)) {
2104 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2105 return PTR_ERR(phydev
);
2108 /* Mask with MAC supported features. */
2109 switch (phydev
->interface
) {
2110 case PHY_INTERFACE_MODE_GMII
:
2111 case PHY_INTERFACE_MODE_RGMII
:
2112 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2113 phy_set_max_speed(phydev
, SPEED_1000
);
2114 phy_support_asym_pause(phydev
);
2118 case PHY_INTERFACE_MODE_MII
:
2119 phy_set_max_speed(phydev
, SPEED_100
);
2120 phy_support_asym_pause(phydev
);
2123 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2127 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2129 phy_attached_info(phydev
);
2134 static void tg3_phy_start(struct tg3
*tp
)
2136 struct phy_device
*phydev
;
2138 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2141 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2143 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2144 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2145 phydev
->speed
= tp
->link_config
.speed
;
2146 phydev
->duplex
= tp
->link_config
.duplex
;
2147 phydev
->autoneg
= tp
->link_config
.autoneg
;
2148 ethtool_convert_legacy_u32_to_link_mode(
2149 phydev
->advertising
, tp
->link_config
.advertising
);
2154 phy_start_aneg(phydev
);
2157 static void tg3_phy_stop(struct tg3
*tp
)
2159 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2162 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2165 static void tg3_phy_fini(struct tg3
*tp
)
2167 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2168 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2169 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2173 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2178 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2181 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2182 /* Cannot do read-modify-write on 5401 */
2183 err
= tg3_phy_auxctl_write(tp
,
2184 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2185 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2190 err
= tg3_phy_auxctl_read(tp
,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2195 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2196 err
= tg3_phy_auxctl_write(tp
,
2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2203 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2207 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2210 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2211 phytest
| MII_TG3_FET_SHADOW_EN
);
2212 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2214 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2216 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2217 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2219 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2223 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2227 if (!tg3_flag(tp
, 5705_PLUS
) ||
2228 (tg3_flag(tp
, 5717_PLUS
) &&
2229 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2232 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2233 tg3_phy_fet_toggle_apd(tp
, enable
);
2237 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2238 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2239 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2240 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2241 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2242 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2244 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2247 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2249 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2251 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2254 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2258 if (!tg3_flag(tp
, 5705_PLUS
) ||
2259 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2262 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2265 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2266 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2268 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2269 ephy
| MII_TG3_FET_SHADOW_EN
);
2270 if (!tg3_readphy(tp
, reg
, &phy
)) {
2272 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2274 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2275 tg3_writephy(tp
, reg
, phy
);
2277 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2282 ret
= tg3_phy_auxctl_read(tp
,
2283 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2286 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2288 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2289 tg3_phy_auxctl_write(tp
,
2290 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2295 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2300 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2303 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2305 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2306 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2309 static void tg3_phy_apply_otp(struct tg3
*tp
)
2318 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2321 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2322 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2323 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2325 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2326 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2327 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2329 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2330 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2331 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2333 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2334 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2336 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2337 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2339 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2340 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2341 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2343 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2346 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2349 struct ethtool_eee
*dest
= &tp
->eee
;
2351 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2357 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2360 /* Pull eee_active */
2361 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2362 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2363 dest
->eee_active
= 1;
2365 dest
->eee_active
= 0;
2367 /* Pull lp advertised settings */
2368 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2370 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2372 /* Pull advertised and eee_enabled settings */
2373 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2375 dest
->eee_enabled
= !!val
;
2376 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2378 /* Pull tx_lpi_enabled */
2379 val
= tr32(TG3_CPMU_EEE_MODE
);
2380 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2382 /* Pull lpi timer value */
2383 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2386 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2390 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2395 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2397 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2398 (tp
->link_config
.active_speed
== SPEED_100
||
2399 tp
->link_config
.active_speed
== SPEED_1000
)) {
2402 if (tp
->link_config
.active_speed
== SPEED_1000
)
2403 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2405 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2407 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2409 tg3_eee_pull_config(tp
, NULL
);
2410 if (tp
->eee
.eee_active
)
2414 if (!tp
->setlpicnt
) {
2415 if (current_link_up
&&
2416 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2417 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2418 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2421 val
= tr32(TG3_CPMU_EEE_MODE
);
2422 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2426 static void tg3_phy_eee_enable(struct tg3
*tp
)
2430 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2431 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2432 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2433 tg3_flag(tp
, 57765_CLASS
)) &&
2434 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2435 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2436 MII_TG3_DSP_TAP26_RMRXSTO
;
2437 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2438 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2441 val
= tr32(TG3_CPMU_EEE_MODE
);
2442 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2445 static int tg3_wait_macro_done(struct tg3
*tp
)
2452 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2453 if ((tmp32
& 0x1000) == 0)
2463 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2465 static const u32 test_pat
[4][6] = {
2466 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2473 for (chan
= 0; chan
< 4; chan
++) {
2476 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2477 (chan
* 0x2000) | 0x0200);
2478 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2480 for (i
= 0; i
< 6; i
++)
2481 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2484 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2485 if (tg3_wait_macro_done(tp
)) {
2490 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2491 (chan
* 0x2000) | 0x0200);
2492 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2493 if (tg3_wait_macro_done(tp
)) {
2498 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2499 if (tg3_wait_macro_done(tp
)) {
2504 for (i
= 0; i
< 6; i
+= 2) {
2507 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2508 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2509 tg3_wait_macro_done(tp
)) {
2515 if (low
!= test_pat
[chan
][i
] ||
2516 high
!= test_pat
[chan
][i
+1]) {
2517 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2518 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2519 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2529 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2533 for (chan
= 0; chan
< 4; chan
++) {
2536 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2537 (chan
* 0x2000) | 0x0200);
2538 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2539 for (i
= 0; i
< 6; i
++)
2540 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2541 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2542 if (tg3_wait_macro_done(tp
))
2549 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2551 u32 reg32
, phy9_orig
;
2552 int retries
, do_phy_reset
, err
;
2558 err
= tg3_bmcr_reset(tp
);
2564 /* Disable transmitter and interrupt. */
2565 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2569 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2571 /* Set full-duplex, 1000 mbps. */
2572 tg3_writephy(tp
, MII_BMCR
,
2573 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2575 /* Set to master mode. */
2576 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2579 tg3_writephy(tp
, MII_CTRL1000
,
2580 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2582 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2586 /* Block the PHY control access. */
2587 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2589 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2592 } while (--retries
);
2594 err
= tg3_phy_reset_chanpat(tp
);
2598 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2600 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2601 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2603 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2605 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2607 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2612 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2617 static void tg3_carrier_off(struct tg3
*tp
)
2619 netif_carrier_off(tp
->dev
);
2620 tp
->link_up
= false;
2623 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2625 if (tg3_flag(tp
, ENABLE_ASF
))
2626 netdev_warn(tp
->dev
,
2627 "Management side-band traffic will be interrupted during phy settings change\n");
2630 /* This will reset the tigon3 PHY if there is no valid
2631 * link unless the FORCE argument is non-zero.
2633 static int tg3_phy_reset(struct tg3
*tp
)
2638 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2639 val
= tr32(GRC_MISC_CFG
);
2640 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2643 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2644 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2648 if (netif_running(tp
->dev
) && tp
->link_up
) {
2649 netif_carrier_off(tp
->dev
);
2650 tg3_link_report(tp
);
2653 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2654 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2655 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2656 err
= tg3_phy_reset_5703_4_5(tp
);
2663 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2664 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2665 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2666 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2668 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2671 err
= tg3_bmcr_reset(tp
);
2675 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2676 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2677 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2679 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2682 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2683 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2684 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2685 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2686 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2687 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2689 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2693 if (tg3_flag(tp
, 5717_PLUS
) &&
2694 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2697 tg3_phy_apply_otp(tp
);
2699 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2700 tg3_phy_toggle_apd(tp
, true);
2702 tg3_phy_toggle_apd(tp
, false);
2705 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2706 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2707 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2708 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2709 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2712 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2713 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2714 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2717 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2718 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2719 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2720 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2721 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2722 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2724 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2725 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2726 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2727 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2728 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2729 tg3_writephy(tp
, MII_TG3_TEST1
,
2730 MII_TG3_TEST1_TRIM_EN
| 0x4);
2732 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2734 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2738 /* Set Extended packet length bit (bit 14) on all chips that */
2739 /* support jumbo frames */
2740 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2741 /* Cannot do read-modify-write on 5401 */
2742 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2743 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2744 /* Set bit 14 with read-modify-write to preserve other bits */
2745 err
= tg3_phy_auxctl_read(tp
,
2746 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2748 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2749 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2752 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753 * jumbo frames transmission.
2755 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2756 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2757 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2758 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2761 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2762 /* adjust output voltage */
2763 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2766 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2767 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2769 tg3_phy_toggle_automdix(tp
, true);
2770 tg3_phy_set_wirespeed(tp
);
2774 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2776 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2777 TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2794 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2795 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2796 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2798 status
= tr32(TG3_CPMU_DRV_STATUS
);
2800 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2801 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2802 status
|= (newstat
<< shift
);
2804 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2805 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2806 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2808 tw32(TG3_CPMU_DRV_STATUS
, status
);
2810 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2815 if (!tg3_flag(tp
, IS_NIC
))
2818 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2819 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2820 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2821 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2824 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2826 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2829 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2831 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2842 if (!tg3_flag(tp
, IS_NIC
) ||
2843 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2844 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2847 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2849 tw32_wait_f(GRC_LOCAL_CTRL
,
2850 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2853 tw32_wait_f(GRC_LOCAL_CTRL
,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2857 tw32_wait_f(GRC_LOCAL_CTRL
,
2858 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2864 if (!tg3_flag(tp
, IS_NIC
))
2867 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2868 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2869 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2870 (GRC_LCLCTRL_GPIO_OE0
|
2871 GRC_LCLCTRL_GPIO_OE1
|
2872 GRC_LCLCTRL_GPIO_OE2
|
2873 GRC_LCLCTRL_GPIO_OUTPUT0
|
2874 GRC_LCLCTRL_GPIO_OUTPUT1
),
2875 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2876 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2877 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2878 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2880 GRC_LCLCTRL_GPIO_OE1
|
2881 GRC_LCLCTRL_GPIO_OE2
|
2882 GRC_LCLCTRL_GPIO_OUTPUT0
|
2883 GRC_LCLCTRL_GPIO_OUTPUT1
|
2885 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2888 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2889 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2892 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2893 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2897 u32 grc_local_ctrl
= 0;
2899 /* Workaround to prevent overdrawing Amps. */
2900 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2901 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2902 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2904 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2907 /* On 5753 and variants, GPIO2 cannot be used. */
2908 no_gpio2
= tp
->nic_sram_data_cfg
&
2909 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2911 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2912 GRC_LCLCTRL_GPIO_OE1
|
2913 GRC_LCLCTRL_GPIO_OE2
|
2914 GRC_LCLCTRL_GPIO_OUTPUT1
|
2915 GRC_LCLCTRL_GPIO_OUTPUT2
;
2917 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2918 GRC_LCLCTRL_GPIO_OUTPUT2
);
2920 tw32_wait_f(GRC_LOCAL_CTRL
,
2921 tp
->grc_local_ctrl
| grc_local_ctrl
,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2924 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2926 tw32_wait_f(GRC_LOCAL_CTRL
,
2927 tp
->grc_local_ctrl
| grc_local_ctrl
,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2931 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2932 tw32_wait_f(GRC_LOCAL_CTRL
,
2933 tp
->grc_local_ctrl
| grc_local_ctrl
,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2939 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2943 /* Serialize power state transitions */
2944 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2947 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2948 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2950 msg
= tg3_set_function_status(tp
, msg
);
2952 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2955 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2956 tg3_pwrsrc_switch_to_vaux(tp
);
2958 tg3_pwrsrc_die_with_vmain(tp
);
2961 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2964 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2966 bool need_vaux
= false;
2968 /* The GPIOs do something completely different on 57765. */
2969 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2972 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2973 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2974 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2975 tg3_frob_aux_power_5717(tp
, include_wol
?
2976 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2980 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2981 struct net_device
*dev_peer
;
2983 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2985 /* remove_one() may have been run on the peer. */
2987 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2989 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2992 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2993 tg3_flag(tp_peer
, ENABLE_ASF
))
2998 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2999 tg3_flag(tp
, ENABLE_ASF
))
3003 tg3_pwrsrc_switch_to_vaux(tp
);
3005 tg3_pwrsrc_die_with_vmain(tp
);
3008 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3010 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3012 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3013 if (speed
!= SPEED_10
)
3015 } else if (speed
== SPEED_10
)
3021 static bool tg3_phy_power_bug(struct tg3
*tp
)
3023 switch (tg3_asic_rev(tp
)) {
3028 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3037 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3046 static bool tg3_phy_led_bug(struct tg3
*tp
)
3048 switch (tg3_asic_rev(tp
)) {
3051 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3060 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3064 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3067 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3068 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3069 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3070 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3073 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3074 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3075 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3080 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3082 val
= tr32(GRC_MISC_CFG
);
3083 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3086 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3088 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3091 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3092 tg3_writephy(tp
, MII_BMCR
,
3093 BMCR_ANENABLE
| BMCR_ANRESTART
);
3095 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3096 phytest
| MII_TG3_FET_SHADOW_EN
);
3097 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3098 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3100 MII_TG3_FET_SHDW_AUXMODE4
,
3103 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3106 } else if (do_low_power
) {
3107 if (!tg3_phy_led_bug(tp
))
3108 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3109 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3111 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3112 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3113 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3114 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3117 /* The PHY should not be powered down on some chips because
3120 if (tg3_phy_power_bug(tp
))
3123 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3124 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3125 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3126 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3127 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3128 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3131 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3
*tp
)
3137 if (tg3_flag(tp
, NVRAM
)) {
3140 if (tp
->nvram_lock_cnt
== 0) {
3141 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3142 for (i
= 0; i
< 8000; i
++) {
3143 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3148 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3152 tp
->nvram_lock_cnt
++;
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3
*tp
)
3160 if (tg3_flag(tp
, NVRAM
)) {
3161 if (tp
->nvram_lock_cnt
> 0)
3162 tp
->nvram_lock_cnt
--;
3163 if (tp
->nvram_lock_cnt
== 0)
3164 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3
*tp
)
3171 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3172 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3174 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3
*tp
)
3181 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3182 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3184 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3188 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3189 u32 offset
, u32
*val
)
3194 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3197 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3198 EEPROM_ADDR_DEVID_MASK
|
3200 tw32(GRC_EEPROM_ADDR
,
3202 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3203 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3204 EEPROM_ADDR_ADDR_MASK
) |
3205 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3207 for (i
= 0; i
< 1000; i
++) {
3208 tmp
= tr32(GRC_EEPROM_ADDR
);
3210 if (tmp
& EEPROM_ADDR_COMPLETE
)
3214 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3217 tmp
= tr32(GRC_EEPROM_DATA
);
3220 * The data will always be opposite the native endian
3221 * format. Perform a blind byteswap to compensate.
3228 #define NVRAM_CMD_TIMEOUT 10000
3230 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3234 tw32(NVRAM_CMD
, nvram_cmd
);
3235 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3236 usleep_range(10, 40);
3237 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3243 if (i
== NVRAM_CMD_TIMEOUT
)
3249 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3251 if (tg3_flag(tp
, NVRAM
) &&
3252 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3253 tg3_flag(tp
, FLASH
) &&
3254 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3255 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3257 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3258 ATMEL_AT45DB0X1B_PAGE_POS
) +
3259 (addr
% tp
->nvram_pagesize
);
3264 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3266 if (tg3_flag(tp
, NVRAM
) &&
3267 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3268 tg3_flag(tp
, FLASH
) &&
3269 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3270 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3272 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3273 tp
->nvram_pagesize
) +
3274 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280 * the byteswapping settings for all other register accesses.
3281 * tg3 devices are BE devices, so on a BE machine, the data
3282 * returned will be exactly as it is seen in NVRAM. On a LE
3283 * machine, the 32-bit value will be byteswapped.
3285 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3289 if (!tg3_flag(tp
, NVRAM
))
3290 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3292 offset
= tg3_nvram_phys_addr(tp
, offset
);
3294 if (offset
> NVRAM_ADDR_MSK
)
3297 ret
= tg3_nvram_lock(tp
);
3301 tg3_enable_nvram_access(tp
);
3303 tw32(NVRAM_ADDR
, offset
);
3304 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3305 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3308 *val
= tr32(NVRAM_RDDATA
);
3310 tg3_disable_nvram_access(tp
);
3312 tg3_nvram_unlock(tp
);
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3321 int res
= tg3_nvram_read(tp
, offset
, &v
);
3323 *val
= cpu_to_be32(v
);
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3328 u32 offset
, u32 len
, u8
*buf
)
3333 for (i
= 0; i
< len
; i
+= 4) {
3339 memcpy(&data
, buf
+ i
, 4);
3342 * The SEEPROM interface expects the data to always be opposite
3343 * the native endian format. We accomplish this by reversing
3344 * all the operations that would have been performed on the
3345 * data from a call to tg3_nvram_read_be32().
3347 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3349 val
= tr32(GRC_EEPROM_ADDR
);
3350 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3352 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3354 tw32(GRC_EEPROM_ADDR
, val
|
3355 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3356 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3360 for (j
= 0; j
< 1000; j
++) {
3361 val
= tr32(GRC_EEPROM_ADDR
);
3363 if (val
& EEPROM_ADDR_COMPLETE
)
3367 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3381 u32 pagesize
= tp
->nvram_pagesize
;
3382 u32 pagemask
= pagesize
- 1;
3386 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3392 u32 phy_addr
, page_off
, size
;
3394 phy_addr
= offset
& ~pagemask
;
3396 for (j
= 0; j
< pagesize
; j
+= 4) {
3397 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3398 (__be32
*) (tmp
+ j
));
3405 page_off
= offset
& pagemask
;
3412 memcpy(tmp
+ page_off
, buf
, size
);
3414 offset
= offset
+ (pagesize
- page_off
);
3416 tg3_enable_nvram_access(tp
);
3419 * Before we can erase the flash page, we need
3420 * to issue a special "write enable" command.
3422 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3424 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3427 /* Erase the target page */
3428 tw32(NVRAM_ADDR
, phy_addr
);
3430 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3431 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3433 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3436 /* Issue another write enable to start the write. */
3437 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3439 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3442 for (j
= 0; j
< pagesize
; j
+= 4) {
3445 data
= *((__be32
*) (tmp
+ j
));
3447 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3449 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3451 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3455 nvram_cmd
|= NVRAM_CMD_FIRST
;
3456 else if (j
== (pagesize
- 4))
3457 nvram_cmd
|= NVRAM_CMD_LAST
;
3459 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3467 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3468 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3481 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3482 u32 page_off
, phy_addr
, nvram_cmd
;
3485 memcpy(&data
, buf
+ i
, 4);
3486 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3488 page_off
= offset
% tp
->nvram_pagesize
;
3490 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3492 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3494 if (page_off
== 0 || i
== 0)
3495 nvram_cmd
|= NVRAM_CMD_FIRST
;
3496 if (page_off
== (tp
->nvram_pagesize
- 4))
3497 nvram_cmd
|= NVRAM_CMD_LAST
;
3500 nvram_cmd
|= NVRAM_CMD_LAST
;
3502 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3503 !tg3_flag(tp
, FLASH
) ||
3504 !tg3_flag(tp
, 57765_PLUS
))
3505 tw32(NVRAM_ADDR
, phy_addr
);
3507 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3508 !tg3_flag(tp
, 5755_PLUS
) &&
3509 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3510 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3513 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3514 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3518 if (!tg3_flag(tp
, FLASH
)) {
3519 /* We always do complete word writes to eeprom. */
3520 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3523 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3535 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3536 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3537 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3541 if (!tg3_flag(tp
, NVRAM
)) {
3542 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3546 ret
= tg3_nvram_lock(tp
);
3550 tg3_enable_nvram_access(tp
);
3551 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3552 tw32(NVRAM_WRITE1
, 0x406);
3554 grc_mode
= tr32(GRC_MODE
);
3555 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3557 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3558 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3561 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3565 grc_mode
= tr32(GRC_MODE
);
3566 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3568 tg3_disable_nvram_access(tp
);
3569 tg3_nvram_unlock(tp
);
3572 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3573 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3580 #define RX_CPU_SCRATCH_BASE 0x30000
3581 #define RX_CPU_SCRATCH_SIZE 0x04000
3582 #define TX_CPU_SCRATCH_BASE 0x34000
3583 #define TX_CPU_SCRATCH_SIZE 0x04000
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3589 const int iters
= 10000;
3591 for (i
= 0; i
< iters
; i
++) {
3592 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3593 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3594 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3596 if (pci_channel_offline(tp
->pdev
))
3600 return (i
== iters
) ? -EBUSY
: 0;
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3
*tp
)
3606 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3608 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3609 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3
*tp
)
3618 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3624 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3625 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3
*tp
)
3631 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3639 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3641 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3642 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3644 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3647 if (cpu_base
== RX_CPU_BASE
) {
3648 rc
= tg3_rxcpu_pause(tp
);
3651 * There is only an Rx CPU for the 5750 derivative in the
3654 if (tg3_flag(tp
, IS_SSB_CORE
))
3657 rc
= tg3_txcpu_pause(tp
);
3661 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3662 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3666 /* Clear firmware's nvram arbitration. */
3667 if (tg3_flag(tp
, NVRAM
))
3668 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3672 static int tg3_fw_data_len(struct tg3
*tp
,
3673 const struct tg3_firmware_hdr
*fw_hdr
)
3677 /* Non fragmented firmware have one firmware header followed by a
3678 * contiguous chunk of data to be written. The length field in that
3679 * header is not the length of data to be written but the complete
3680 * length of the bss. The data length is determined based on
3681 * tp->fw->size minus headers.
3683 * Fragmented firmware have a main header followed by multiple
3684 * fragments. Each fragment is identical to non fragmented firmware
3685 * with a firmware header followed by a contiguous chunk of data. In
3686 * the main header, the length field is unused and set to 0xffffffff.
3687 * In each fragment header the length is the entire size of that
3688 * fragment i.e. fragment data + header length. Data length is
3689 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 if (tp
->fw_len
== 0xffffffff)
3692 fw_len
= be32_to_cpu(fw_hdr
->len
);
3694 fw_len
= tp
->fw
->size
;
3696 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3701 u32 cpu_scratch_base
, int cpu_scratch_size
,
3702 const struct tg3_firmware_hdr
*fw_hdr
)
3705 void (*write_op
)(struct tg3
*, u32
, u32
);
3706 int total_len
= tp
->fw
->size
;
3708 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3710 "%s: Trying to load TX cpu firmware which is 5705\n",
3715 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3716 write_op
= tg3_write_mem
;
3718 write_op
= tg3_write_indirect_reg32
;
3720 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3721 /* It is possible that bootcode is still loading at this point.
3722 * Get the nvram lock first before halting the cpu.
3724 int lock_err
= tg3_nvram_lock(tp
);
3725 err
= tg3_halt_cpu(tp
, cpu_base
);
3727 tg3_nvram_unlock(tp
);
3731 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3732 write_op(tp
, cpu_scratch_base
+ i
, 0);
3733 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3734 tw32(cpu_base
+ CPU_MODE
,
3735 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3737 /* Subtract additional main header for fragmented firmware and
3738 * advance to the first fragment
3740 total_len
-= TG3_FW_HDR_LEN
;
3745 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3746 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3747 write_op(tp
, cpu_scratch_base
+
3748 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3750 be32_to_cpu(fw_data
[i
]));
3752 total_len
-= be32_to_cpu(fw_hdr
->len
);
3754 /* Advance to next fragment */
3755 fw_hdr
= (struct tg3_firmware_hdr
*)
3756 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3757 } while (total_len
> 0);
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3769 const int iters
= 5;
3771 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3772 tw32_f(cpu_base
+ CPU_PC
, pc
);
3774 for (i
= 0; i
< iters
; i
++) {
3775 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3777 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3778 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3779 tw32_f(cpu_base
+ CPU_PC
, pc
);
3783 return (i
== iters
) ? -EBUSY
: 0;
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3789 const struct tg3_firmware_hdr
*fw_hdr
;
3792 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3794 /* Firmware blob starts with version numbers, followed by
3795 start address and length. We are setting complete length.
3796 length = end_address_of_bss - start_address_of_text.
3797 Remainder is the blob to be loaded contiguously
3798 from start address. */
3800 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3801 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3806 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3807 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3812 /* Now startup only the RX cpu. */
3813 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3814 be32_to_cpu(fw_hdr
->base_addr
));
3816 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3817 "should be %08x\n", __func__
,
3818 tr32(RX_CPU_BASE
+ CPU_PC
),
3819 be32_to_cpu(fw_hdr
->base_addr
));
3823 tg3_rxcpu_resume(tp
);
3828 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3830 const int iters
= 1000;
3834 /* Wait for boot code to complete initialization and enter service
3835 * loop. It is then safe to download service patches
3837 for (i
= 0; i
< iters
; i
++) {
3838 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3845 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3849 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3851 netdev_warn(tp
->dev
,
3852 "Other patches exist. Not downloading EEE patch\n");
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3
*tp
)
3862 struct tg3_firmware_hdr
*fw_hdr
;
3864 if (!tg3_flag(tp
, NO_NVRAM
))
3867 if (tg3_validate_rxcpu_state(tp
))
3873 /* This firmware blob has a different format than older firmware
3874 * releases as given below. The main difference is we have fragmented
3875 * data to be written to non-contiguous locations.
3877 * In the beginning we have a firmware header identical to other
3878 * firmware which consists of version, base addr and length. The length
3879 * here is unused and set to 0xffffffff.
3881 * This is followed by a series of firmware fragments which are
3882 * individually identical to previous firmware. i.e. they have the
3883 * firmware header and followed by data for that fragment. The version
3884 * field of the individual fragment header is unused.
3887 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3888 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3891 if (tg3_rxcpu_pause(tp
))
3894 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3897 tg3_rxcpu_resume(tp
);
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3
*tp
)
3903 const struct tg3_firmware_hdr
*fw_hdr
;
3904 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3907 if (!tg3_flag(tp
, FW_TSO
))
3910 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3912 /* Firmware blob starts with version numbers, followed by
3913 start address and length. We are setting complete length.
3914 length = end_address_of_bss - start_address_of_text.
3915 Remainder is the blob to be loaded contiguously
3916 from start address. */
3918 cpu_scratch_size
= tp
->fw_len
;
3920 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3921 cpu_base
= RX_CPU_BASE
;
3922 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3924 cpu_base
= TX_CPU_BASE
;
3925 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3926 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3929 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3930 cpu_scratch_base
, cpu_scratch_size
,
3935 /* Now startup the cpu. */
3936 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3937 be32_to_cpu(fw_hdr
->base_addr
));
3940 "%s fails to set CPU PC, is %08x should be %08x\n",
3941 __func__
, tr32(cpu_base
+ CPU_PC
),
3942 be32_to_cpu(fw_hdr
->base_addr
));
3946 tg3_resume_cpu(tp
, cpu_base
);
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3953 u32 addr_high
, addr_low
;
3955 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3956 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3957 (mac_addr
[4] << 8) | mac_addr
[5]);
3960 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3961 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3964 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3965 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3975 for (i
= 0; i
< 4; i
++) {
3976 if (i
== 1 && skip_mac_1
)
3978 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3981 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3982 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3983 for (i
= 4; i
< 16; i
++)
3984 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3987 addr_high
= (tp
->dev
->dev_addr
[0] +
3988 tp
->dev
->dev_addr
[1] +
3989 tp
->dev
->dev_addr
[2] +
3990 tp
->dev
->dev_addr
[3] +
3991 tp
->dev
->dev_addr
[4] +
3992 tp
->dev
->dev_addr
[5]) &
3993 TX_BACKOFF_SEED_MASK
;
3994 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3997 static void tg3_enable_register_access(struct tg3
*tp
)
4000 * Make sure register accesses (indirect or otherwise) will function
4003 pci_write_config_dword(tp
->pdev
,
4004 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4007 static int tg3_power_up(struct tg3
*tp
)
4011 tg3_enable_register_access(tp
);
4013 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4015 /* Switch out of Vaux if it is a NIC */
4016 tg3_pwrsrc_switch_to_vmain(tp
);
4018 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4024 static int tg3_setup_phy(struct tg3
*, bool);
4026 static int tg3_power_down_prepare(struct tg3
*tp
)
4029 bool device_should_wake
, do_low_power
;
4031 tg3_enable_register_access(tp
);
4033 /* Restore the CLKREQ setting. */
4034 if (tg3_flag(tp
, CLKREQ_BUG
))
4035 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4036 PCI_EXP_LNKCTL_CLKREQ_EN
);
4038 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4039 tw32(TG3PCI_MISC_HOST_CTRL
,
4040 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4042 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4043 tg3_flag(tp
, WOL_ENABLE
);
4045 if (tg3_flag(tp
, USE_PHYLIB
)) {
4046 do_low_power
= false;
4047 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4048 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4049 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising
) = { 0, };
4050 struct phy_device
*phydev
;
4053 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4055 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4057 tp
->link_config
.speed
= phydev
->speed
;
4058 tp
->link_config
.duplex
= phydev
->duplex
;
4059 tp
->link_config
.autoneg
= phydev
->autoneg
;
4060 ethtool_convert_link_mode_to_legacy_u32(
4061 &tp
->link_config
.advertising
,
4062 phydev
->advertising
);
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, advertising
);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
4067 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
4069 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
,
4072 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4073 if (tg3_flag(tp
, WOL_SPEED_100MB
)) {
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
4076 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
4078 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4081 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
,
4086 linkmode_copy(phydev
->advertising
, advertising
);
4087 phy_start_aneg(phydev
);
4089 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4090 if (phyid
!= PHY_ID_BCMAC131
) {
4091 phyid
&= PHY_BCM_OUI_MASK
;
4092 if (phyid
== PHY_BCM_OUI_1
||
4093 phyid
== PHY_BCM_OUI_2
||
4094 phyid
== PHY_BCM_OUI_3
)
4095 do_low_power
= true;
4099 do_low_power
= true;
4101 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4102 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4104 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4105 tg3_setup_phy(tp
, false);
4108 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4111 val
= tr32(GRC_VCPU_EXT_CTRL
);
4112 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4113 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4117 for (i
= 0; i
< 200; i
++) {
4118 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4119 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4124 if (tg3_flag(tp
, WOL_CAP
))
4125 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4126 WOL_DRV_STATE_SHUTDOWN
|
4130 if (device_should_wake
) {
4133 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4135 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4136 tg3_phy_auxctl_write(tp
,
4137 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4138 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4139 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4140 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4144 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4145 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4146 else if (tp
->phy_flags
&
4147 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4148 if (tp
->link_config
.active_speed
== SPEED_1000
)
4149 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4151 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4153 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4155 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4156 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4157 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4158 SPEED_100
: SPEED_10
;
4159 if (tg3_5700_link_polarity(tp
, speed
))
4160 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4162 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4165 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4168 if (!tg3_flag(tp
, 5750_PLUS
))
4169 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4171 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4172 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4173 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4174 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4176 if (tg3_flag(tp
, ENABLE_APE
))
4177 mac_mode
|= MAC_MODE_APE_TX_EN
|
4178 MAC_MODE_APE_RX_EN
|
4179 MAC_MODE_TDE_ENABLE
;
4181 tw32_f(MAC_MODE
, mac_mode
);
4184 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4188 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4189 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4190 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4193 base_val
= tp
->pci_clock_ctrl
;
4194 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4195 CLOCK_CTRL_TXCLK_DISABLE
);
4197 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4198 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4199 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4200 tg3_flag(tp
, CPMU_PRESENT
) ||
4201 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4203 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4204 u32 newbits1
, newbits2
;
4206 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4207 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4208 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4209 CLOCK_CTRL_TXCLK_DISABLE
|
4211 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4212 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4213 newbits1
= CLOCK_CTRL_625_CORE
;
4214 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4216 newbits1
= CLOCK_CTRL_ALTCLK
;
4217 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4223 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4226 if (!tg3_flag(tp
, 5705_PLUS
)) {
4229 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4230 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4231 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4232 CLOCK_CTRL_TXCLK_DISABLE
|
4233 CLOCK_CTRL_44MHZ_CORE
);
4235 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4238 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4239 tp
->pci_clock_ctrl
| newbits3
, 40);
4243 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4244 tg3_power_down_phy(tp
, do_low_power
);
4246 tg3_frob_aux_power(tp
, true);
4248 /* Workaround for unstable PLL clock */
4249 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4250 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4251 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4252 u32 val
= tr32(0x7d00);
4254 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4256 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4259 err
= tg3_nvram_lock(tp
);
4260 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4262 tg3_nvram_unlock(tp
);
4266 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4268 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4273 static void tg3_power_down(struct tg3
*tp
)
4275 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4276 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4279 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u32
*speed
, u8
*duplex
)
4281 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4282 case MII_TG3_AUX_STAT_10HALF
:
4284 *duplex
= DUPLEX_HALF
;
4287 case MII_TG3_AUX_STAT_10FULL
:
4289 *duplex
= DUPLEX_FULL
;
4292 case MII_TG3_AUX_STAT_100HALF
:
4294 *duplex
= DUPLEX_HALF
;
4297 case MII_TG3_AUX_STAT_100FULL
:
4299 *duplex
= DUPLEX_FULL
;
4302 case MII_TG3_AUX_STAT_1000HALF
:
4303 *speed
= SPEED_1000
;
4304 *duplex
= DUPLEX_HALF
;
4307 case MII_TG3_AUX_STAT_1000FULL
:
4308 *speed
= SPEED_1000
;
4309 *duplex
= DUPLEX_FULL
;
4313 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4314 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4316 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4320 *speed
= SPEED_UNKNOWN
;
4321 *duplex
= DUPLEX_UNKNOWN
;
4326 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4331 new_adv
= ADVERTISE_CSMA
;
4332 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4333 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4335 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4339 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4340 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4342 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4343 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4344 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4346 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4351 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4354 tw32(TG3_CPMU_EEE_MODE
,
4355 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4357 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4362 /* Advertise 100-BaseTX EEE ability */
4363 if (advertise
& ADVERTISED_100baseT_Full
)
4364 val
|= MDIO_AN_EEE_ADV_100TX
;
4365 /* Advertise 1000-BaseT EEE ability */
4366 if (advertise
& ADVERTISED_1000baseT_Full
)
4367 val
|= MDIO_AN_EEE_ADV_1000T
;
4369 if (!tp
->eee
.eee_enabled
) {
4371 tp
->eee
.advertised
= 0;
4373 tp
->eee
.advertised
= advertise
&
4374 (ADVERTISED_100baseT_Full
|
4375 ADVERTISED_1000baseT_Full
);
4378 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4382 switch (tg3_asic_rev(tp
)) {
4384 case ASIC_REV_57765
:
4385 case ASIC_REV_57766
:
4387 /* If we advertised any eee advertisements above... */
4389 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4390 MII_TG3_DSP_TAP26_RMRXSTO
|
4391 MII_TG3_DSP_TAP26_OPCSINPT
;
4392 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4396 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4397 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4398 MII_TG3_DSP_CH34TP2_HIBW01
);
4401 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4410 static void tg3_phy_copper_begin(struct tg3
*tp
)
4412 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4413 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4416 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4417 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4418 adv
= ADVERTISED_10baseT_Half
|
4419 ADVERTISED_10baseT_Full
;
4420 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4421 adv
|= ADVERTISED_100baseT_Half
|
4422 ADVERTISED_100baseT_Full
;
4423 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4424 if (!(tp
->phy_flags
&
4425 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4426 adv
|= ADVERTISED_1000baseT_Half
;
4427 adv
|= ADVERTISED_1000baseT_Full
;
4430 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4432 adv
= tp
->link_config
.advertising
;
4433 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4434 adv
&= ~(ADVERTISED_1000baseT_Half
|
4435 ADVERTISED_1000baseT_Full
);
4437 fc
= tp
->link_config
.flowctrl
;
4440 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4442 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4443 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4444 /* Normally during power down we want to autonegotiate
4445 * the lowest possible speed for WOL. However, to avoid
4446 * link flap, we leave it untouched.
4451 tg3_writephy(tp
, MII_BMCR
,
4452 BMCR_ANENABLE
| BMCR_ANRESTART
);
4455 u32 bmcr
, orig_bmcr
;
4457 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4458 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4460 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4461 /* With autoneg disabled, 5715 only links up when the
4462 * advertisement register has the configured speed
4465 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4469 switch (tp
->link_config
.speed
) {
4475 bmcr
|= BMCR_SPEED100
;
4479 bmcr
|= BMCR_SPEED1000
;
4483 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4484 bmcr
|= BMCR_FULLDPLX
;
4486 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4487 (bmcr
!= orig_bmcr
)) {
4488 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4489 for (i
= 0; i
< 1500; i
++) {
4493 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4494 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4496 if (!(tmp
& BMSR_LSTATUS
)) {
4501 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4507 static int tg3_phy_pull_config(struct tg3
*tp
)
4512 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4516 if (!(val
& BMCR_ANENABLE
)) {
4517 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4518 tp
->link_config
.advertising
= 0;
4519 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4523 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4525 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4528 tp
->link_config
.speed
= SPEED_10
;
4531 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4534 tp
->link_config
.speed
= SPEED_100
;
4536 case BMCR_SPEED1000
:
4537 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4538 tp
->link_config
.speed
= SPEED_1000
;
4546 if (val
& BMCR_FULLDPLX
)
4547 tp
->link_config
.duplex
= DUPLEX_FULL
;
4549 tp
->link_config
.duplex
= DUPLEX_HALF
;
4551 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4557 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4558 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4559 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4561 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4564 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4568 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4569 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4571 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4573 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4576 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4579 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4580 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4584 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4586 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4590 adv
= tg3_decode_flowctrl_1000X(val
);
4591 tp
->link_config
.flowctrl
= adv
;
4593 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4594 adv
= mii_adv_to_ethtool_adv_x(val
);
4597 tp
->link_config
.advertising
|= adv
;
4604 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4608 /* Turn off tap power management. */
4609 /* Set Extended packet length bit */
4610 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4612 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4613 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4614 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4615 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4616 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4623 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4625 struct ethtool_eee eee
;
4627 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4630 tg3_eee_pull_config(tp
, &eee
);
4632 if (tp
->eee
.eee_enabled
) {
4633 if (tp
->eee
.advertised
!= eee
.advertised
||
4634 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4635 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4638 /* EEE is disabled but we're advertising */
4646 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4648 u32 advmsk
, tgtadv
, advertising
;
4650 advertising
= tp
->link_config
.advertising
;
4651 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4653 advmsk
= ADVERTISE_ALL
;
4654 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4655 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4656 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4659 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4662 if ((*lcladv
& advmsk
) != tgtadv
)
4665 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4668 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4670 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4674 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4675 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4676 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4677 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4678 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4680 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4683 if (tg3_ctrl
!= tgtadv
)
4690 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4694 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4697 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4700 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4703 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4706 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4707 tp
->link_config
.rmt_adv
= lpeth
;
4712 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4714 if (curr_link_up
!= tp
->link_up
) {
4716 netif_carrier_on(tp
->dev
);
4718 netif_carrier_off(tp
->dev
);
4719 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4720 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4723 tg3_link_report(tp
);
4730 static void tg3_clear_mac_status(struct tg3
*tp
)
4735 MAC_STATUS_SYNC_CHANGED
|
4736 MAC_STATUS_CFG_CHANGED
|
4737 MAC_STATUS_MI_COMPLETION
|
4738 MAC_STATUS_LNKSTATE_CHANGED
);
4742 static void tg3_setup_eee(struct tg3
*tp
)
4746 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4747 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4748 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4749 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4751 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4753 tw32_f(TG3_CPMU_EEE_CTRL
,
4754 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4756 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4757 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4758 TG3_CPMU_EEEMD_LPI_IN_RX
|
4759 TG3_CPMU_EEEMD_EEE_ENABLE
;
4761 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4762 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4764 if (tg3_flag(tp
, ENABLE_APE
))
4765 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4767 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4769 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4770 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4771 (tp
->eee
.tx_lpi_timer
& 0xffff));
4773 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4774 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4775 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4778 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4780 bool current_link_up
;
4782 u32 lcl_adv
, rmt_adv
;
4787 tg3_clear_mac_status(tp
);
4789 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4791 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4795 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4797 /* Some third-party PHYs need to be reset on link going
4800 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4801 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4802 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4804 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4805 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4806 !(bmsr
& BMSR_LSTATUS
))
4812 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4813 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4814 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4815 !tg3_flag(tp
, INIT_COMPLETE
))
4818 if (!(bmsr
& BMSR_LSTATUS
)) {
4819 err
= tg3_init_5401phy_dsp(tp
);
4823 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4824 for (i
= 0; i
< 1000; i
++) {
4826 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4827 (bmsr
& BMSR_LSTATUS
)) {
4833 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4834 TG3_PHY_REV_BCM5401_B0
&&
4835 !(bmsr
& BMSR_LSTATUS
) &&
4836 tp
->link_config
.active_speed
== SPEED_1000
) {
4837 err
= tg3_phy_reset(tp
);
4839 err
= tg3_init_5401phy_dsp(tp
);
4844 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4845 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4846 /* 5701 {A0,B0} CRC bug workaround */
4847 tg3_writephy(tp
, 0x15, 0x0a75);
4848 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4849 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4850 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4853 /* Clear pending interrupts... */
4854 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4855 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4857 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4858 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4859 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4860 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4862 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4863 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4864 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4865 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4866 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4868 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4871 current_link_up
= false;
4872 current_speed
= SPEED_UNKNOWN
;
4873 current_duplex
= DUPLEX_UNKNOWN
;
4874 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4875 tp
->link_config
.rmt_adv
= 0;
4877 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4878 err
= tg3_phy_auxctl_read(tp
,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4881 if (!err
&& !(val
& (1 << 10))) {
4882 tg3_phy_auxctl_write(tp
,
4883 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4890 for (i
= 0; i
< 100; i
++) {
4891 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4892 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4893 (bmsr
& BMSR_LSTATUS
))
4898 if (bmsr
& BMSR_LSTATUS
) {
4901 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4902 for (i
= 0; i
< 2000; i
++) {
4904 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4909 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4914 for (i
= 0; i
< 200; i
++) {
4915 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4916 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4918 if (bmcr
&& bmcr
!= 0x7fff)
4926 tp
->link_config
.active_speed
= current_speed
;
4927 tp
->link_config
.active_duplex
= current_duplex
;
4929 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4930 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4932 if ((bmcr
& BMCR_ANENABLE
) &&
4934 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4935 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4936 current_link_up
= true;
4938 /* EEE settings changes take effect only after a phy
4939 * reset. If we have skipped a reset due to Link Flap
4940 * Avoidance being enabled, do it now.
4942 if (!eee_config_ok
&&
4943 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4949 if (!(bmcr
& BMCR_ANENABLE
) &&
4950 tp
->link_config
.speed
== current_speed
&&
4951 tp
->link_config
.duplex
== current_duplex
) {
4952 current_link_up
= true;
4956 if (current_link_up
&&
4957 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4960 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4961 reg
= MII_TG3_FET_GEN_STAT
;
4962 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4964 reg
= MII_TG3_EXT_STAT
;
4965 bit
= MII_TG3_EXT_STAT_MDIX
;
4968 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4969 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4971 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4976 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4977 tg3_phy_copper_begin(tp
);
4979 if (tg3_flag(tp
, ROBOSWITCH
)) {
4980 current_link_up
= true;
4981 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4982 current_speed
= SPEED_1000
;
4983 current_duplex
= DUPLEX_FULL
;
4984 tp
->link_config
.active_speed
= current_speed
;
4985 tp
->link_config
.active_duplex
= current_duplex
;
4988 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4989 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4990 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4991 current_link_up
= true;
4994 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4995 if (current_link_up
) {
4996 if (tp
->link_config
.active_speed
== SPEED_100
||
4997 tp
->link_config
.active_speed
== SPEED_10
)
4998 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5000 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5001 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
5002 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5004 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5006 /* In order for the 5750 core in BCM4785 chip to work properly
5007 * in RGMII mode, the Led Control Register must be set up.
5009 if (tg3_flag(tp
, RGMII_MODE
)) {
5010 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5011 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5013 if (tp
->link_config
.active_speed
== SPEED_10
)
5014 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5015 else if (tp
->link_config
.active_speed
== SPEED_100
)
5016 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5017 LED_CTRL_100MBPS_ON
);
5018 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5019 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5020 LED_CTRL_1000MBPS_ON
);
5022 tw32(MAC_LED_CTRL
, led_ctrl
);
5026 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5027 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5028 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5030 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5031 if (current_link_up
&&
5032 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5033 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5035 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5038 /* ??? Without this setting Netgear GA302T PHY does not
5039 * ??? send/receive packets...
5041 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5042 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5043 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5044 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5048 tw32_f(MAC_MODE
, tp
->mac_mode
);
5051 tg3_phy_eee_adjust(tp
, current_link_up
);
5053 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5054 /* Polled via timer. */
5055 tw32_f(MAC_EVENT
, 0);
5057 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5061 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5063 tp
->link_config
.active_speed
== SPEED_1000
&&
5064 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5067 (MAC_STATUS_SYNC_CHANGED
|
5068 MAC_STATUS_CFG_CHANGED
));
5071 NIC_SRAM_FIRMWARE_MBOX
,
5072 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5075 /* Prevent send BD corruption. */
5076 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5077 if (tp
->link_config
.active_speed
== SPEED_100
||
5078 tp
->link_config
.active_speed
== SPEED_10
)
5079 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5080 PCI_EXP_LNKCTL_CLKREQ_EN
);
5082 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5083 PCI_EXP_LNKCTL_CLKREQ_EN
);
5086 tg3_test_and_report_link_chg(tp
, current_link_up
);
5091 struct tg3_fiber_aneginfo
{
5093 #define ANEG_STATE_UNKNOWN 0
5094 #define ANEG_STATE_AN_ENABLE 1
5095 #define ANEG_STATE_RESTART_INIT 2
5096 #define ANEG_STATE_RESTART 3
5097 #define ANEG_STATE_DISABLE_LINK_OK 4
5098 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5099 #define ANEG_STATE_ABILITY_DETECT 6
5100 #define ANEG_STATE_ACK_DETECT_INIT 7
5101 #define ANEG_STATE_ACK_DETECT 8
5102 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5103 #define ANEG_STATE_COMPLETE_ACK 10
5104 #define ANEG_STATE_IDLE_DETECT_INIT 11
5105 #define ANEG_STATE_IDLE_DETECT 12
5106 #define ANEG_STATE_LINK_OK 13
5107 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5108 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5111 #define MR_AN_ENABLE 0x00000001
5112 #define MR_RESTART_AN 0x00000002
5113 #define MR_AN_COMPLETE 0x00000004
5114 #define MR_PAGE_RX 0x00000008
5115 #define MR_NP_LOADED 0x00000010
5116 #define MR_TOGGLE_TX 0x00000020
5117 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5118 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5119 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5120 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5121 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5122 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5123 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5124 #define MR_TOGGLE_RX 0x00002000
5125 #define MR_NP_RX 0x00004000
5127 #define MR_LINK_OK 0x80000000
5129 unsigned long link_time
, cur_time
;
5131 u32 ability_match_cfg
;
5132 int ability_match_count
;
5134 char ability_match
, idle_match
, ack_match
;
5136 u32 txconfig
, rxconfig
;
5137 #define ANEG_CFG_NP 0x00000080
5138 #define ANEG_CFG_ACK 0x00000040
5139 #define ANEG_CFG_RF2 0x00000020
5140 #define ANEG_CFG_RF1 0x00000010
5141 #define ANEG_CFG_PS2 0x00000001
5142 #define ANEG_CFG_PS1 0x00008000
5143 #define ANEG_CFG_HD 0x00004000
5144 #define ANEG_CFG_FD 0x00002000
5145 #define ANEG_CFG_INVAL 0x00001f06
5150 #define ANEG_TIMER_ENAB 2
5151 #define ANEG_FAILED -1
5153 #define ANEG_STATE_SETTLE_TIME 10000
5155 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5156 struct tg3_fiber_aneginfo
*ap
)
5159 unsigned long delta
;
5163 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5167 ap
->ability_match_cfg
= 0;
5168 ap
->ability_match_count
= 0;
5169 ap
->ability_match
= 0;
5175 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5176 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5178 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5179 ap
->ability_match_cfg
= rx_cfg_reg
;
5180 ap
->ability_match
= 0;
5181 ap
->ability_match_count
= 0;
5183 if (++ap
->ability_match_count
> 1) {
5184 ap
->ability_match
= 1;
5185 ap
->ability_match_cfg
= rx_cfg_reg
;
5188 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5196 ap
->ability_match_cfg
= 0;
5197 ap
->ability_match_count
= 0;
5198 ap
->ability_match
= 0;
5204 ap
->rxconfig
= rx_cfg_reg
;
5207 switch (ap
->state
) {
5208 case ANEG_STATE_UNKNOWN
:
5209 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5210 ap
->state
= ANEG_STATE_AN_ENABLE
;
5213 case ANEG_STATE_AN_ENABLE
:
5214 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5215 if (ap
->flags
& MR_AN_ENABLE
) {
5218 ap
->ability_match_cfg
= 0;
5219 ap
->ability_match_count
= 0;
5220 ap
->ability_match
= 0;
5224 ap
->state
= ANEG_STATE_RESTART_INIT
;
5226 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5230 case ANEG_STATE_RESTART_INIT
:
5231 ap
->link_time
= ap
->cur_time
;
5232 ap
->flags
&= ~(MR_NP_LOADED
);
5234 tw32(MAC_TX_AUTO_NEG
, 0);
5235 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5236 tw32_f(MAC_MODE
, tp
->mac_mode
);
5239 ret
= ANEG_TIMER_ENAB
;
5240 ap
->state
= ANEG_STATE_RESTART
;
5243 case ANEG_STATE_RESTART
:
5244 delta
= ap
->cur_time
- ap
->link_time
;
5245 if (delta
> ANEG_STATE_SETTLE_TIME
)
5246 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5248 ret
= ANEG_TIMER_ENAB
;
5251 case ANEG_STATE_DISABLE_LINK_OK
:
5255 case ANEG_STATE_ABILITY_DETECT_INIT
:
5256 ap
->flags
&= ~(MR_TOGGLE_TX
);
5257 ap
->txconfig
= ANEG_CFG_FD
;
5258 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5259 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5260 ap
->txconfig
|= ANEG_CFG_PS1
;
5261 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5262 ap
->txconfig
|= ANEG_CFG_PS2
;
5263 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5264 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5265 tw32_f(MAC_MODE
, tp
->mac_mode
);
5268 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5271 case ANEG_STATE_ABILITY_DETECT
:
5272 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5273 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5276 case ANEG_STATE_ACK_DETECT_INIT
:
5277 ap
->txconfig
|= ANEG_CFG_ACK
;
5278 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5279 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5280 tw32_f(MAC_MODE
, tp
->mac_mode
);
5283 ap
->state
= ANEG_STATE_ACK_DETECT
;
5286 case ANEG_STATE_ACK_DETECT
:
5287 if (ap
->ack_match
!= 0) {
5288 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5289 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5290 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5292 ap
->state
= ANEG_STATE_AN_ENABLE
;
5294 } else if (ap
->ability_match
!= 0 &&
5295 ap
->rxconfig
== 0) {
5296 ap
->state
= ANEG_STATE_AN_ENABLE
;
5300 case ANEG_STATE_COMPLETE_ACK_INIT
:
5301 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5305 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5306 MR_LP_ADV_HALF_DUPLEX
|
5307 MR_LP_ADV_SYM_PAUSE
|
5308 MR_LP_ADV_ASYM_PAUSE
|
5309 MR_LP_ADV_REMOTE_FAULT1
|
5310 MR_LP_ADV_REMOTE_FAULT2
|
5311 MR_LP_ADV_NEXT_PAGE
|
5314 if (ap
->rxconfig
& ANEG_CFG_FD
)
5315 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5316 if (ap
->rxconfig
& ANEG_CFG_HD
)
5317 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5318 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5319 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5320 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5321 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5322 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5323 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5324 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5325 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5326 if (ap
->rxconfig
& ANEG_CFG_NP
)
5327 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5329 ap
->link_time
= ap
->cur_time
;
5331 ap
->flags
^= (MR_TOGGLE_TX
);
5332 if (ap
->rxconfig
& 0x0008)
5333 ap
->flags
|= MR_TOGGLE_RX
;
5334 if (ap
->rxconfig
& ANEG_CFG_NP
)
5335 ap
->flags
|= MR_NP_RX
;
5336 ap
->flags
|= MR_PAGE_RX
;
5338 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5339 ret
= ANEG_TIMER_ENAB
;
5342 case ANEG_STATE_COMPLETE_ACK
:
5343 if (ap
->ability_match
!= 0 &&
5344 ap
->rxconfig
== 0) {
5345 ap
->state
= ANEG_STATE_AN_ENABLE
;
5348 delta
= ap
->cur_time
- ap
->link_time
;
5349 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5350 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5351 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5353 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5354 !(ap
->flags
& MR_NP_RX
)) {
5355 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5363 case ANEG_STATE_IDLE_DETECT_INIT
:
5364 ap
->link_time
= ap
->cur_time
;
5365 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5366 tw32_f(MAC_MODE
, tp
->mac_mode
);
5369 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5370 ret
= ANEG_TIMER_ENAB
;
5373 case ANEG_STATE_IDLE_DETECT
:
5374 if (ap
->ability_match
!= 0 &&
5375 ap
->rxconfig
== 0) {
5376 ap
->state
= ANEG_STATE_AN_ENABLE
;
5379 delta
= ap
->cur_time
- ap
->link_time
;
5380 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5381 /* XXX another gem from the Broadcom driver :( */
5382 ap
->state
= ANEG_STATE_LINK_OK
;
5386 case ANEG_STATE_LINK_OK
:
5387 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5391 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5392 /* ??? unimplemented */
5395 case ANEG_STATE_NEXT_PAGE_WAIT
:
5396 /* ??? unimplemented */
5407 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5410 struct tg3_fiber_aneginfo aninfo
;
5411 int status
= ANEG_FAILED
;
5415 tw32_f(MAC_TX_AUTO_NEG
, 0);
5417 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5418 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5421 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5424 memset(&aninfo
, 0, sizeof(aninfo
));
5425 aninfo
.flags
|= MR_AN_ENABLE
;
5426 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5427 aninfo
.cur_time
= 0;
5429 while (++tick
< 195000) {
5430 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5431 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5437 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5438 tw32_f(MAC_MODE
, tp
->mac_mode
);
5441 *txflags
= aninfo
.txconfig
;
5442 *rxflags
= aninfo
.flags
;
5444 if (status
== ANEG_DONE
&&
5445 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5446 MR_LP_ADV_FULL_DUPLEX
)))
5452 static void tg3_init_bcm8002(struct tg3
*tp
)
5454 u32 mac_status
= tr32(MAC_STATUS
);
5457 /* Reset when initting first time or we have a link. */
5458 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5459 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5462 /* Set PLL lock range. */
5463 tg3_writephy(tp
, 0x16, 0x8007);
5466 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5468 /* Wait for reset to complete. */
5469 /* XXX schedule_timeout() ... */
5470 for (i
= 0; i
< 500; i
++)
5473 /* Config mode; select PMA/Ch 1 regs. */
5474 tg3_writephy(tp
, 0x10, 0x8411);
5476 /* Enable auto-lock and comdet, select txclk for tx. */
5477 tg3_writephy(tp
, 0x11, 0x0a10);
5479 tg3_writephy(tp
, 0x18, 0x00a0);
5480 tg3_writephy(tp
, 0x16, 0x41ff);
5482 /* Assert and deassert POR. */
5483 tg3_writephy(tp
, 0x13, 0x0400);
5485 tg3_writephy(tp
, 0x13, 0x0000);
5487 tg3_writephy(tp
, 0x11, 0x0a50);
5489 tg3_writephy(tp
, 0x11, 0x0a10);
5491 /* Wait for signal to stabilize */
5492 /* XXX schedule_timeout() ... */
5493 for (i
= 0; i
< 15000; i
++)
5496 /* Deselect the channel register so we can read the PHYID
5499 tg3_writephy(tp
, 0x10, 0x8011);
5502 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5505 bool current_link_up
;
5506 u32 sg_dig_ctrl
, sg_dig_status
;
5507 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5508 int workaround
, port_a
;
5511 expected_sg_dig_ctrl
= 0;
5514 current_link_up
= false;
5516 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5517 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5519 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5522 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5523 /* preserve bits 20-23 for voltage regulator */
5524 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5527 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5529 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5530 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5532 u32 val
= serdes_cfg
;
5538 tw32_f(MAC_SERDES_CFG
, val
);
5541 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5543 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5544 tg3_setup_flow_control(tp
, 0, 0);
5545 current_link_up
= true;
5550 /* Want auto-negotiation. */
5551 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5553 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5554 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5555 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5556 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5557 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5559 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5560 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5561 tp
->serdes_counter
&&
5562 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5563 MAC_STATUS_RCVD_CFG
)) ==
5564 MAC_STATUS_PCS_SYNCED
)) {
5565 tp
->serdes_counter
--;
5566 current_link_up
= true;
5571 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5572 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5574 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5576 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5577 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5578 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5579 MAC_STATUS_SIGNAL_DET
)) {
5580 sg_dig_status
= tr32(SG_DIG_STATUS
);
5581 mac_status
= tr32(MAC_STATUS
);
5583 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5584 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5585 u32 local_adv
= 0, remote_adv
= 0;
5587 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5588 local_adv
|= ADVERTISE_1000XPAUSE
;
5589 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5590 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5592 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5593 remote_adv
|= LPA_1000XPAUSE
;
5594 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5595 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5597 tp
->link_config
.rmt_adv
=
5598 mii_adv_to_ethtool_adv_x(remote_adv
);
5600 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5601 current_link_up
= true;
5602 tp
->serdes_counter
= 0;
5603 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5604 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5605 if (tp
->serdes_counter
)
5606 tp
->serdes_counter
--;
5609 u32 val
= serdes_cfg
;
5616 tw32_f(MAC_SERDES_CFG
, val
);
5619 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5622 /* Link parallel detection - link is up */
5623 /* only if we have PCS_SYNC and not */
5624 /* receiving config code words */
5625 mac_status
= tr32(MAC_STATUS
);
5626 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5627 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5628 tg3_setup_flow_control(tp
, 0, 0);
5629 current_link_up
= true;
5631 TG3_PHYFLG_PARALLEL_DETECT
;
5632 tp
->serdes_counter
=
5633 SERDES_PARALLEL_DET_TIMEOUT
;
5635 goto restart_autoneg
;
5639 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5640 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5644 return current_link_up
;
5647 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5649 bool current_link_up
= false;
5651 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5654 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5655 u32 txflags
, rxflags
;
5658 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5659 u32 local_adv
= 0, remote_adv
= 0;
5661 if (txflags
& ANEG_CFG_PS1
)
5662 local_adv
|= ADVERTISE_1000XPAUSE
;
5663 if (txflags
& ANEG_CFG_PS2
)
5664 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5666 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5667 remote_adv
|= LPA_1000XPAUSE
;
5668 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5669 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5671 tp
->link_config
.rmt_adv
=
5672 mii_adv_to_ethtool_adv_x(remote_adv
);
5674 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5676 current_link_up
= true;
5678 for (i
= 0; i
< 30; i
++) {
5681 (MAC_STATUS_SYNC_CHANGED
|
5682 MAC_STATUS_CFG_CHANGED
));
5684 if ((tr32(MAC_STATUS
) &
5685 (MAC_STATUS_SYNC_CHANGED
|
5686 MAC_STATUS_CFG_CHANGED
)) == 0)
5690 mac_status
= tr32(MAC_STATUS
);
5691 if (!current_link_up
&&
5692 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5693 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5694 current_link_up
= true;
5696 tg3_setup_flow_control(tp
, 0, 0);
5698 /* Forcing 1000FD link up. */
5699 current_link_up
= true;
5701 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5704 tw32_f(MAC_MODE
, tp
->mac_mode
);
5709 return current_link_up
;
5712 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5715 u32 orig_active_speed
;
5716 u8 orig_active_duplex
;
5718 bool current_link_up
;
5721 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5722 orig_active_speed
= tp
->link_config
.active_speed
;
5723 orig_active_duplex
= tp
->link_config
.active_duplex
;
5725 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5727 tg3_flag(tp
, INIT_COMPLETE
)) {
5728 mac_status
= tr32(MAC_STATUS
);
5729 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5730 MAC_STATUS_SIGNAL_DET
|
5731 MAC_STATUS_CFG_CHANGED
|
5732 MAC_STATUS_RCVD_CFG
);
5733 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5734 MAC_STATUS_SIGNAL_DET
)) {
5735 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5736 MAC_STATUS_CFG_CHANGED
));
5741 tw32_f(MAC_TX_AUTO_NEG
, 0);
5743 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5744 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5745 tw32_f(MAC_MODE
, tp
->mac_mode
);
5748 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5749 tg3_init_bcm8002(tp
);
5751 /* Enable link change event even when serdes polling. */
5752 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5755 current_link_up
= false;
5756 tp
->link_config
.rmt_adv
= 0;
5757 mac_status
= tr32(MAC_STATUS
);
5759 if (tg3_flag(tp
, HW_AUTONEG
))
5760 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5762 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5764 tp
->napi
[0].hw_status
->status
=
5765 (SD_STATUS_UPDATED
|
5766 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5768 for (i
= 0; i
< 100; i
++) {
5769 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5770 MAC_STATUS_CFG_CHANGED
));
5772 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5773 MAC_STATUS_CFG_CHANGED
|
5774 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5778 mac_status
= tr32(MAC_STATUS
);
5779 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5780 current_link_up
= false;
5781 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5782 tp
->serdes_counter
== 0) {
5783 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5784 MAC_MODE_SEND_CONFIGS
));
5786 tw32_f(MAC_MODE
, tp
->mac_mode
);
5790 if (current_link_up
) {
5791 tp
->link_config
.active_speed
= SPEED_1000
;
5792 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5793 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5794 LED_CTRL_LNKLED_OVERRIDE
|
5795 LED_CTRL_1000MBPS_ON
));
5797 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5798 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5799 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5800 LED_CTRL_LNKLED_OVERRIDE
|
5801 LED_CTRL_TRAFFIC_OVERRIDE
));
5804 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5805 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5806 if (orig_pause_cfg
!= now_pause_cfg
||
5807 orig_active_speed
!= tp
->link_config
.active_speed
||
5808 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5809 tg3_link_report(tp
);
5815 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5819 u32 current_speed
= SPEED_UNKNOWN
;
5820 u8 current_duplex
= DUPLEX_UNKNOWN
;
5821 bool current_link_up
= false;
5822 u32 local_adv
, remote_adv
, sgsr
;
5824 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5825 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5826 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5827 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5832 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5834 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5835 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5837 current_link_up
= true;
5838 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5839 current_speed
= SPEED_1000
;
5840 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5841 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5842 current_speed
= SPEED_100
;
5843 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5845 current_speed
= SPEED_10
;
5846 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5849 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5850 current_duplex
= DUPLEX_FULL
;
5852 current_duplex
= DUPLEX_HALF
;
5855 tw32_f(MAC_MODE
, tp
->mac_mode
);
5858 tg3_clear_mac_status(tp
);
5860 goto fiber_setup_done
;
5863 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5864 tw32_f(MAC_MODE
, tp
->mac_mode
);
5867 tg3_clear_mac_status(tp
);
5872 tp
->link_config
.rmt_adv
= 0;
5874 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5875 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5876 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5877 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5878 bmsr
|= BMSR_LSTATUS
;
5880 bmsr
&= ~BMSR_LSTATUS
;
5883 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5885 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5886 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5887 /* do nothing, just check for link up at the end */
5888 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5891 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5892 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5893 ADVERTISE_1000XPAUSE
|
5894 ADVERTISE_1000XPSE_ASYM
|
5897 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5898 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5900 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5901 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5902 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5903 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5905 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5906 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5907 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5914 bmcr
&= ~BMCR_SPEED1000
;
5915 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5917 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5918 new_bmcr
|= BMCR_FULLDPLX
;
5920 if (new_bmcr
!= bmcr
) {
5921 /* BMCR_SPEED1000 is a reserved bit that needs
5922 * to be set on write.
5924 new_bmcr
|= BMCR_SPEED1000
;
5926 /* Force a linkdown */
5930 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5931 adv
&= ~(ADVERTISE_1000XFULL
|
5932 ADVERTISE_1000XHALF
|
5934 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5935 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5939 tg3_carrier_off(tp
);
5941 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5943 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5944 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5945 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5946 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5947 bmsr
|= BMSR_LSTATUS
;
5949 bmsr
&= ~BMSR_LSTATUS
;
5951 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5955 if (bmsr
& BMSR_LSTATUS
) {
5956 current_speed
= SPEED_1000
;
5957 current_link_up
= true;
5958 if (bmcr
& BMCR_FULLDPLX
)
5959 current_duplex
= DUPLEX_FULL
;
5961 current_duplex
= DUPLEX_HALF
;
5966 if (bmcr
& BMCR_ANENABLE
) {
5969 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5970 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5971 common
= local_adv
& remote_adv
;
5972 if (common
& (ADVERTISE_1000XHALF
|
5973 ADVERTISE_1000XFULL
)) {
5974 if (common
& ADVERTISE_1000XFULL
)
5975 current_duplex
= DUPLEX_FULL
;
5977 current_duplex
= DUPLEX_HALF
;
5979 tp
->link_config
.rmt_adv
=
5980 mii_adv_to_ethtool_adv_x(remote_adv
);
5981 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5982 /* Link is up via parallel detect */
5984 current_link_up
= false;
5990 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5991 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5993 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5994 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5995 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5997 tw32_f(MAC_MODE
, tp
->mac_mode
);
6000 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
6002 tp
->link_config
.active_speed
= current_speed
;
6003 tp
->link_config
.active_duplex
= current_duplex
;
6005 tg3_test_and_report_link_chg(tp
, current_link_up
);
6009 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6011 if (tp
->serdes_counter
) {
6012 /* Give autoneg time to complete. */
6013 tp
->serdes_counter
--;
6018 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6021 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6022 if (bmcr
& BMCR_ANENABLE
) {
6025 /* Select shadow register 0x1f */
6026 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6027 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6029 /* Select expansion interrupt status register */
6030 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6031 MII_TG3_DSP_EXP1_INT_STAT
);
6032 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6033 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6035 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6036 /* We have signal detect and not receiving
6037 * config code words, link is up by parallel
6041 bmcr
&= ~BMCR_ANENABLE
;
6042 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6043 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6044 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6047 } else if (tp
->link_up
&&
6048 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6049 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6052 /* Select expansion interrupt status register */
6053 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6054 MII_TG3_DSP_EXP1_INT_STAT
);
6055 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6059 /* Config code words received, turn on autoneg. */
6060 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6061 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6063 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6069 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6074 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6075 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6076 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6077 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6079 err
= tg3_setup_copper_phy(tp
, force_reset
);
6081 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6084 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6085 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6087 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6092 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6093 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6094 tw32(GRC_MISC_CFG
, val
);
6097 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6098 (6 << TX_LENGTHS_IPG_SHIFT
);
6099 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6100 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6101 val
|= tr32(MAC_TX_LENGTHS
) &
6102 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6103 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6105 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6106 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6107 tw32(MAC_TX_LENGTHS
, val
|
6108 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6110 tw32(MAC_TX_LENGTHS
, val
|
6111 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6113 if (!tg3_flag(tp
, 5705_PLUS
)) {
6115 tw32(HOSTCC_STAT_COAL_TICKS
,
6116 tp
->coal
.stats_block_coalesce_usecs
);
6118 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6122 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6123 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6125 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6128 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6129 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6135 /* tp->lock must be held */
6136 static u64
tg3_refclk_read(struct tg3
*tp
, struct ptp_system_timestamp
*sts
)
6140 ptp_read_system_prets(sts
);
6141 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6142 ptp_read_system_postts(sts
);
6143 stamp
|= (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6148 /* tp->lock must be held */
6149 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6151 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6153 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6154 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6155 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6156 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6159 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6160 static inline void tg3_full_unlock(struct tg3
*tp
);
6161 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6163 struct tg3
*tp
= netdev_priv(dev
);
6165 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6166 SOF_TIMESTAMPING_RX_SOFTWARE
|
6167 SOF_TIMESTAMPING_SOFTWARE
;
6169 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6170 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6171 SOF_TIMESTAMPING_RX_HARDWARE
|
6172 SOF_TIMESTAMPING_RAW_HARDWARE
;
6176 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6178 info
->phc_index
= -1;
6180 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6182 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6183 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6184 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6185 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6189 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6191 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6192 bool neg_adj
= false;
6200 /* Frequency adjustment is performed using hardware with a 24 bit
6201 * accumulator and a programmable correction value. On each clk, the
6202 * correction value gets added to the accumulator and when it
6203 * overflows, the time counter is incremented/decremented.
6205 * So conversion from ppb to correction value is
6206 * ppb * (1 << 24) / 1000000000
6208 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6209 TG3_EAV_REF_CLK_CORRECT_MASK
;
6211 tg3_full_lock(tp
, 0);
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6215 TG3_EAV_REF_CLK_CORRECT_EN
|
6216 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6218 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6220 tg3_full_unlock(tp
);
6225 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6227 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6229 tg3_full_lock(tp
, 0);
6230 tp
->ptp_adjust
+= delta
;
6231 tg3_full_unlock(tp
);
6236 static int tg3_ptp_gettimex(struct ptp_clock_info
*ptp
, struct timespec64
*ts
,
6237 struct ptp_system_timestamp
*sts
)
6240 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6242 tg3_full_lock(tp
, 0);
6243 ns
= tg3_refclk_read(tp
, sts
);
6244 ns
+= tp
->ptp_adjust
;
6245 tg3_full_unlock(tp
);
6247 *ts
= ns_to_timespec64(ns
);
6252 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6253 const struct timespec64
*ts
)
6256 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6258 ns
= timespec64_to_ns(ts
);
6260 tg3_full_lock(tp
, 0);
6261 tg3_refclk_write(tp
, ns
);
6263 tg3_full_unlock(tp
);
6268 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6269 struct ptp_clock_request
*rq
, int on
)
6271 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6276 case PTP_CLK_REQ_PEROUT
:
6277 /* Reject requests with unsupported flags */
6278 if (rq
->perout
.flags
)
6281 if (rq
->perout
.index
!= 0)
6284 tg3_full_lock(tp
, 0);
6285 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6286 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6291 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6292 rq
->perout
.start
.nsec
;
6294 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6295 netdev_warn(tp
->dev
,
6296 "Device supports only a one-shot timesync output, period must be 0\n");
6301 if (nsec
& (1ULL << 63)) {
6302 netdev_warn(tp
->dev
,
6303 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6308 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6309 tw32(TG3_EAV_WATCHDOG0_MSB
,
6310 TG3_EAV_WATCHDOG0_EN
|
6311 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6313 tw32(TG3_EAV_REF_CLCK_CTL
,
6314 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6316 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6317 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6321 tg3_full_unlock(tp
);
6331 static const struct ptp_clock_info tg3_ptp_caps
= {
6332 .owner
= THIS_MODULE
,
6333 .name
= "tg3 clock",
6334 .max_adj
= 250000000,
6340 .adjfreq
= tg3_ptp_adjfreq
,
6341 .adjtime
= tg3_ptp_adjtime
,
6342 .gettimex64
= tg3_ptp_gettimex
,
6343 .settime64
= tg3_ptp_settime
,
6344 .enable
= tg3_ptp_enable
,
6347 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6348 struct skb_shared_hwtstamps
*timestamp
)
6350 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6351 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6355 /* tp->lock must be held */
6356 static void tg3_ptp_init(struct tg3
*tp
)
6358 if (!tg3_flag(tp
, PTP_CAPABLE
))
6361 /* Initialize the hardware clock to the system time. */
6362 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6364 tp
->ptp_info
= tg3_ptp_caps
;
6367 /* tp->lock must be held */
6368 static void tg3_ptp_resume(struct tg3
*tp
)
6370 if (!tg3_flag(tp
, PTP_CAPABLE
))
6373 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6377 static void tg3_ptp_fini(struct tg3
*tp
)
6379 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6382 ptp_clock_unregister(tp
->ptp_clock
);
6383 tp
->ptp_clock
= NULL
;
6387 static inline int tg3_irq_sync(struct tg3
*tp
)
6389 return tp
->irq_sync
;
6392 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6396 dst
= (u32
*)((u8
*)dst
+ off
);
6397 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6398 *dst
++ = tr32(off
+ i
);
6401 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6403 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6404 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6405 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6406 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6407 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6408 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6409 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6410 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6411 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6412 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6413 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6414 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6415 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6416 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6417 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6418 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6419 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6420 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6421 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6423 if (tg3_flag(tp
, SUPPORT_MSIX
))
6424 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6426 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6427 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6428 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6429 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6430 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6431 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6432 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6433 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6435 if (!tg3_flag(tp
, 5705_PLUS
)) {
6436 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6437 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6438 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6441 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6442 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6443 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6444 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6445 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6447 if (tg3_flag(tp
, NVRAM
))
6448 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6451 static void tg3_dump_state(struct tg3
*tp
)
6456 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6460 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6461 /* Read up to but not including private PCI registers */
6462 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6463 regs
[i
/ sizeof(u32
)] = tr32(i
);
6465 tg3_dump_legacy_regs(tp
, regs
);
6467 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6468 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6469 !regs
[i
+ 2] && !regs
[i
+ 3])
6472 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6474 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6479 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6480 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6482 /* SW status block */
6484 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6486 tnapi
->hw_status
->status
,
6487 tnapi
->hw_status
->status_tag
,
6488 tnapi
->hw_status
->rx_jumbo_consumer
,
6489 tnapi
->hw_status
->rx_consumer
,
6490 tnapi
->hw_status
->rx_mini_consumer
,
6491 tnapi
->hw_status
->idx
[0].rx_producer
,
6492 tnapi
->hw_status
->idx
[0].tx_consumer
);
6495 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6497 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6498 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6500 tnapi
->prodring
.rx_std_prod_idx
,
6501 tnapi
->prodring
.rx_std_cons_idx
,
6502 tnapi
->prodring
.rx_jmb_prod_idx
,
6503 tnapi
->prodring
.rx_jmb_cons_idx
);
6507 /* This is called whenever we suspect that the system chipset is re-
6508 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6509 * is bogus tx completions. We try to recover by setting the
6510 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6513 static void tg3_tx_recover(struct tg3
*tp
)
6515 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6516 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6518 netdev_warn(tp
->dev
,
6519 "The system may be re-ordering memory-mapped I/O "
6520 "cycles to the network device, attempting to recover. "
6521 "Please report the problem to the driver maintainer "
6522 "and include system chipset information.\n");
6524 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6527 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6529 /* Tell compiler to fetch tx indices from memory. */
6531 return tnapi
->tx_pending
-
6532 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6535 /* Tigon3 never reports partial packet sends. So we do not
6536 * need special logic to handle SKBs that have not had all
6537 * of their frags sent yet, like SunGEM does.
6539 static void tg3_tx(struct tg3_napi
*tnapi
)
6541 struct tg3
*tp
= tnapi
->tp
;
6542 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6543 u32 sw_idx
= tnapi
->tx_cons
;
6544 struct netdev_queue
*txq
;
6545 int index
= tnapi
- tp
->napi
;
6546 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6548 if (tg3_flag(tp
, ENABLE_TSS
))
6551 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6553 while (sw_idx
!= hw_idx
) {
6554 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6555 struct sk_buff
*skb
= ri
->skb
;
6558 if (unlikely(skb
== NULL
)) {
6563 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6564 struct skb_shared_hwtstamps timestamp
;
6565 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6566 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6568 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6570 skb_tstamp_tx(skb
, ×tamp
);
6573 pci_unmap_single(tp
->pdev
,
6574 dma_unmap_addr(ri
, mapping
),
6580 while (ri
->fragmented
) {
6581 ri
->fragmented
= false;
6582 sw_idx
= NEXT_TX(sw_idx
);
6583 ri
= &tnapi
->tx_buffers
[sw_idx
];
6586 sw_idx
= NEXT_TX(sw_idx
);
6588 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6589 ri
= &tnapi
->tx_buffers
[sw_idx
];
6590 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6593 pci_unmap_page(tp
->pdev
,
6594 dma_unmap_addr(ri
, mapping
),
6595 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6598 while (ri
->fragmented
) {
6599 ri
->fragmented
= false;
6600 sw_idx
= NEXT_TX(sw_idx
);
6601 ri
= &tnapi
->tx_buffers
[sw_idx
];
6604 sw_idx
= NEXT_TX(sw_idx
);
6608 bytes_compl
+= skb
->len
;
6610 dev_consume_skb_any(skb
);
6612 if (unlikely(tx_bug
)) {
6618 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6620 tnapi
->tx_cons
= sw_idx
;
6622 /* Need to make the tx_cons update visible to tg3_start_xmit()
6623 * before checking for netif_queue_stopped(). Without the
6624 * memory barrier, there is a small possibility that tg3_start_xmit()
6625 * will miss it and cause the queue to be stopped forever.
6629 if (unlikely(netif_tx_queue_stopped(txq
) &&
6630 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6631 __netif_tx_lock(txq
, smp_processor_id());
6632 if (netif_tx_queue_stopped(txq
) &&
6633 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6634 netif_tx_wake_queue(txq
);
6635 __netif_tx_unlock(txq
);
6639 static void tg3_frag_free(bool is_frag
, void *data
)
6642 skb_free_frag(data
);
6647 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6649 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6650 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6655 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6656 map_sz
, PCI_DMA_FROMDEVICE
);
6657 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6662 /* Returns size of skb allocated or < 0 on error.
6664 * We only need to fill in the address because the other members
6665 * of the RX descriptor are invariant, see tg3_init_rings.
6667 * Note the purposeful assymetry of cpu vs. chip accesses. For
6668 * posting buffers we only dirty the first cache line of the RX
6669 * descriptor (containing the address). Whereas for the RX status
6670 * buffers the cpu only reads the last cacheline of the RX descriptor
6671 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6673 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6674 u32 opaque_key
, u32 dest_idx_unmasked
,
6675 unsigned int *frag_size
)
6677 struct tg3_rx_buffer_desc
*desc
;
6678 struct ring_info
*map
;
6681 int skb_size
, data_size
, dest_idx
;
6683 switch (opaque_key
) {
6684 case RXD_OPAQUE_RING_STD
:
6685 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6686 desc
= &tpr
->rx_std
[dest_idx
];
6687 map
= &tpr
->rx_std_buffers
[dest_idx
];
6688 data_size
= tp
->rx_pkt_map_sz
;
6691 case RXD_OPAQUE_RING_JUMBO
:
6692 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6693 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6694 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6695 data_size
= TG3_RX_JMB_MAP_SZ
;
6702 /* Do not overwrite any of the map or rp information
6703 * until we are sure we can commit to a new buffer.
6705 * Callers depend upon this behavior and assume that
6706 * we leave everything unchanged if we fail.
6708 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6709 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6710 if (skb_size
<= PAGE_SIZE
) {
6711 data
= napi_alloc_frag(skb_size
);
6712 *frag_size
= skb_size
;
6714 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6720 mapping
= pci_map_single(tp
->pdev
,
6721 data
+ TG3_RX_OFFSET(tp
),
6723 PCI_DMA_FROMDEVICE
);
6724 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6725 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6730 dma_unmap_addr_set(map
, mapping
, mapping
);
6732 desc
->addr_hi
= ((u64
)mapping
>> 32);
6733 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6738 /* We only need to move over in the address because the other
6739 * members of the RX descriptor are invariant. See notes above
6740 * tg3_alloc_rx_data for full details.
6742 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6743 struct tg3_rx_prodring_set
*dpr
,
6744 u32 opaque_key
, int src_idx
,
6745 u32 dest_idx_unmasked
)
6747 struct tg3
*tp
= tnapi
->tp
;
6748 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6749 struct ring_info
*src_map
, *dest_map
;
6750 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6753 switch (opaque_key
) {
6754 case RXD_OPAQUE_RING_STD
:
6755 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6756 dest_desc
= &dpr
->rx_std
[dest_idx
];
6757 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6758 src_desc
= &spr
->rx_std
[src_idx
];
6759 src_map
= &spr
->rx_std_buffers
[src_idx
];
6762 case RXD_OPAQUE_RING_JUMBO
:
6763 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6764 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6765 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6766 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6767 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6774 dest_map
->data
= src_map
->data
;
6775 dma_unmap_addr_set(dest_map
, mapping
,
6776 dma_unmap_addr(src_map
, mapping
));
6777 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6778 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6780 /* Ensure that the update to the skb happens after the physical
6781 * addresses have been transferred to the new BD location.
6785 src_map
->data
= NULL
;
6788 /* The RX ring scheme is composed of multiple rings which post fresh
6789 * buffers to the chip, and one special ring the chip uses to report
6790 * status back to the host.
6792 * The special ring reports the status of received packets to the
6793 * host. The chip does not write into the original descriptor the
6794 * RX buffer was obtained from. The chip simply takes the original
6795 * descriptor as provided by the host, updates the status and length
6796 * field, then writes this into the next status ring entry.
6798 * Each ring the host uses to post buffers to the chip is described
6799 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6800 * it is first placed into the on-chip ram. When the packet's length
6801 * is known, it walks down the TG3_BDINFO entries to select the ring.
6802 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6803 * which is within the range of the new packet's length is chosen.
6805 * The "separate ring for rx status" scheme may sound queer, but it makes
6806 * sense from a cache coherency perspective. If only the host writes
6807 * to the buffer post rings, and only the chip writes to the rx status
6808 * rings, then cache lines never move beyond shared-modified state.
6809 * If both the host and chip were to write into the same ring, cache line
6810 * eviction could occur since both entities want it in an exclusive state.
6812 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6814 struct tg3
*tp
= tnapi
->tp
;
6815 u32 work_mask
, rx_std_posted
= 0;
6816 u32 std_prod_idx
, jmb_prod_idx
;
6817 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6820 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6822 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6824 * We need to order the read of hw_idx and the read of
6825 * the opaque cookie.
6830 std_prod_idx
= tpr
->rx_std_prod_idx
;
6831 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6832 while (sw_idx
!= hw_idx
&& budget
> 0) {
6833 struct ring_info
*ri
;
6834 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6836 struct sk_buff
*skb
;
6837 dma_addr_t dma_addr
;
6838 u32 opaque_key
, desc_idx
, *post_ptr
;
6842 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6843 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6844 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6845 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6846 dma_addr
= dma_unmap_addr(ri
, mapping
);
6848 post_ptr
= &std_prod_idx
;
6850 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6851 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6852 dma_addr
= dma_unmap_addr(ri
, mapping
);
6854 post_ptr
= &jmb_prod_idx
;
6856 goto next_pkt_nopost
;
6858 work_mask
|= opaque_key
;
6860 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6862 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6863 desc_idx
, *post_ptr
);
6865 /* Other statistics kept track of by card. */
6870 prefetch(data
+ TG3_RX_OFFSET(tp
));
6871 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6874 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6875 RXD_FLAG_PTPSTAT_PTPV1
||
6876 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6877 RXD_FLAG_PTPSTAT_PTPV2
) {
6878 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6879 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6882 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6884 unsigned int frag_size
;
6886 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6887 *post_ptr
, &frag_size
);
6891 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6892 PCI_DMA_FROMDEVICE
);
6894 /* Ensure that the update to the data happens
6895 * after the usage of the old DMA mapping.
6901 skb
= build_skb(data
, frag_size
);
6903 tg3_frag_free(frag_size
!= 0, data
);
6904 goto drop_it_no_recycle
;
6906 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6908 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6909 desc_idx
, *post_ptr
);
6911 skb
= netdev_alloc_skb(tp
->dev
,
6912 len
+ TG3_RAW_IP_ALIGN
);
6914 goto drop_it_no_recycle
;
6916 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6917 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6919 data
+ TG3_RX_OFFSET(tp
),
6921 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6926 tg3_hwclock_to_timestamp(tp
, tstamp
,
6927 skb_hwtstamps(skb
));
6929 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6930 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6931 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6932 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6933 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6935 skb_checksum_none_assert(skb
);
6937 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6939 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6940 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6941 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6942 dev_kfree_skb_any(skb
);
6943 goto drop_it_no_recycle
;
6946 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6947 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6948 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6949 desc
->err_vlan
& RXD_VLAN_MASK
);
6951 napi_gro_receive(&tnapi
->napi
, skb
);
6959 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6960 tpr
->rx_std_prod_idx
= std_prod_idx
&
6961 tp
->rx_std_ring_mask
;
6962 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6963 tpr
->rx_std_prod_idx
);
6964 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6969 sw_idx
&= tp
->rx_ret_ring_mask
;
6971 /* Refresh hw_idx to see if there is new work */
6972 if (sw_idx
== hw_idx
) {
6973 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6978 /* ACK the status ring. */
6979 tnapi
->rx_rcb_ptr
= sw_idx
;
6980 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6982 /* Refill RX ring(s). */
6983 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6984 /* Sync BD data before updating mailbox */
6987 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6988 tpr
->rx_std_prod_idx
= std_prod_idx
&
6989 tp
->rx_std_ring_mask
;
6990 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6991 tpr
->rx_std_prod_idx
);
6993 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6994 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6995 tp
->rx_jmb_ring_mask
;
6996 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6997 tpr
->rx_jmb_prod_idx
);
6999 } else if (work_mask
) {
7000 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7001 * updated before the producer indices can be updated.
7005 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
7006 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
7008 if (tnapi
!= &tp
->napi
[1]) {
7009 tp
->rx_refill
= true;
7010 napi_schedule(&tp
->napi
[1].napi
);
7017 static void tg3_poll_link(struct tg3
*tp
)
7019 /* handle link change and other phy events */
7020 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7021 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7023 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7024 sblk
->status
= SD_STATUS_UPDATED
|
7025 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7026 spin_lock(&tp
->lock
);
7027 if (tg3_flag(tp
, USE_PHYLIB
)) {
7029 (MAC_STATUS_SYNC_CHANGED
|
7030 MAC_STATUS_CFG_CHANGED
|
7031 MAC_STATUS_MI_COMPLETION
|
7032 MAC_STATUS_LNKSTATE_CHANGED
));
7035 tg3_setup_phy(tp
, false);
7036 spin_unlock(&tp
->lock
);
7041 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7042 struct tg3_rx_prodring_set
*dpr
,
7043 struct tg3_rx_prodring_set
*spr
)
7045 u32 si
, di
, cpycnt
, src_prod_idx
;
7049 src_prod_idx
= spr
->rx_std_prod_idx
;
7051 /* Make sure updates to the rx_std_buffers[] entries and the
7052 * standard producer index are seen in the correct order.
7056 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7059 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7060 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7062 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7063 spr
->rx_std_cons_idx
;
7065 cpycnt
= min(cpycnt
,
7066 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7068 si
= spr
->rx_std_cons_idx
;
7069 di
= dpr
->rx_std_prod_idx
;
7071 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7072 if (dpr
->rx_std_buffers
[i
].data
) {
7082 /* Ensure that updates to the rx_std_buffers ring and the
7083 * shadowed hardware producer ring from tg3_recycle_skb() are
7084 * ordered correctly WRT the skb check above.
7088 memcpy(&dpr
->rx_std_buffers
[di
],
7089 &spr
->rx_std_buffers
[si
],
7090 cpycnt
* sizeof(struct ring_info
));
7092 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7093 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7094 sbd
= &spr
->rx_std
[si
];
7095 dbd
= &dpr
->rx_std
[di
];
7096 dbd
->addr_hi
= sbd
->addr_hi
;
7097 dbd
->addr_lo
= sbd
->addr_lo
;
7100 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7101 tp
->rx_std_ring_mask
;
7102 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7103 tp
->rx_std_ring_mask
;
7107 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7109 /* Make sure updates to the rx_jmb_buffers[] entries and
7110 * the jumbo producer index are seen in the correct order.
7114 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7117 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7118 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7120 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7121 spr
->rx_jmb_cons_idx
;
7123 cpycnt
= min(cpycnt
,
7124 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7126 si
= spr
->rx_jmb_cons_idx
;
7127 di
= dpr
->rx_jmb_prod_idx
;
7129 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7130 if (dpr
->rx_jmb_buffers
[i
].data
) {
7140 /* Ensure that updates to the rx_jmb_buffers ring and the
7141 * shadowed hardware producer ring from tg3_recycle_skb() are
7142 * ordered correctly WRT the skb check above.
7146 memcpy(&dpr
->rx_jmb_buffers
[di
],
7147 &spr
->rx_jmb_buffers
[si
],
7148 cpycnt
* sizeof(struct ring_info
));
7150 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7151 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7152 sbd
= &spr
->rx_jmb
[si
].std
;
7153 dbd
= &dpr
->rx_jmb
[di
].std
;
7154 dbd
->addr_hi
= sbd
->addr_hi
;
7155 dbd
->addr_lo
= sbd
->addr_lo
;
7158 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7159 tp
->rx_jmb_ring_mask
;
7160 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7161 tp
->rx_jmb_ring_mask
;
7167 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7169 struct tg3
*tp
= tnapi
->tp
;
7171 /* run TX completion thread */
7172 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7174 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7178 if (!tnapi
->rx_rcb_prod_idx
)
7181 /* run RX thread, within the bounds set by NAPI.
7182 * All RX "locking" is done by ensuring outside
7183 * code synchronizes with tg3->napi.poll()
7185 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7186 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7188 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7189 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7191 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7192 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7194 tp
->rx_refill
= false;
7195 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7196 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7197 &tp
->napi
[i
].prodring
);
7201 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7202 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7203 dpr
->rx_std_prod_idx
);
7205 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7206 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7207 dpr
->rx_jmb_prod_idx
);
7210 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7216 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7218 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7219 schedule_work(&tp
->reset_task
);
7222 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7224 cancel_work_sync(&tp
->reset_task
);
7225 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7226 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7229 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7231 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7232 struct tg3
*tp
= tnapi
->tp
;
7234 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7237 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7239 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7242 if (unlikely(work_done
>= budget
))
7245 /* tp->last_tag is used in tg3_int_reenable() below
7246 * to tell the hw how much work has been processed,
7247 * so we must read it before checking for more work.
7249 tnapi
->last_tag
= sblk
->status_tag
;
7250 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7253 /* check for RX/TX work to do */
7254 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7255 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7257 /* This test here is not race free, but will reduce
7258 * the number of interrupts by looping again.
7260 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7263 napi_complete_done(napi
, work_done
);
7264 /* Reenable interrupts. */
7265 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7267 /* This test here is synchronized by napi_schedule()
7268 * and napi_complete() to close the race condition.
7270 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7271 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7272 HOSTCC_MODE_ENABLE
|
7279 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7283 /* work_done is guaranteed to be less than budget. */
7284 napi_complete(napi
);
7285 tg3_reset_task_schedule(tp
);
7289 static void tg3_process_error(struct tg3
*tp
)
7292 bool real_error
= false;
7294 if (tg3_flag(tp
, ERROR_PROCESSED
))
7297 /* Check Flow Attention register */
7298 val
= tr32(HOSTCC_FLOW_ATTN
);
7299 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7300 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7304 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7305 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7309 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7310 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7319 tg3_flag_set(tp
, ERROR_PROCESSED
);
7320 tg3_reset_task_schedule(tp
);
7323 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7325 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7326 struct tg3
*tp
= tnapi
->tp
;
7328 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7331 if (sblk
->status
& SD_STATUS_ERROR
)
7332 tg3_process_error(tp
);
7336 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7338 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7341 if (unlikely(work_done
>= budget
))
7344 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7345 /* tp->last_tag is used in tg3_int_reenable() below
7346 * to tell the hw how much work has been processed,
7347 * so we must read it before checking for more work.
7349 tnapi
->last_tag
= sblk
->status_tag
;
7350 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7353 sblk
->status
&= ~SD_STATUS_UPDATED
;
7355 if (likely(!tg3_has_work(tnapi
))) {
7356 napi_complete_done(napi
, work_done
);
7357 tg3_int_reenable(tnapi
);
7362 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
<< 1);
7366 /* work_done is guaranteed to be less than budget. */
7367 napi_complete(napi
);
7368 tg3_reset_task_schedule(tp
);
7372 static void tg3_napi_disable(struct tg3
*tp
)
7376 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7377 napi_disable(&tp
->napi
[i
].napi
);
7380 static void tg3_napi_enable(struct tg3
*tp
)
7384 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7385 napi_enable(&tp
->napi
[i
].napi
);
7388 static void tg3_napi_init(struct tg3
*tp
)
7392 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7393 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7394 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7397 static void tg3_napi_fini(struct tg3
*tp
)
7401 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7402 netif_napi_del(&tp
->napi
[i
].napi
);
7405 static inline void tg3_netif_stop(struct tg3
*tp
)
7407 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7408 tg3_napi_disable(tp
);
7409 netif_carrier_off(tp
->dev
);
7410 netif_tx_disable(tp
->dev
);
7413 /* tp->lock must be held */
7414 static inline void tg3_netif_start(struct tg3
*tp
)
7418 /* NOTE: unconditional netif_tx_wake_all_queues is only
7419 * appropriate so long as all callers are assured to
7420 * have free tx slots (such as after tg3_init_hw)
7422 netif_tx_wake_all_queues(tp
->dev
);
7425 netif_carrier_on(tp
->dev
);
7427 tg3_napi_enable(tp
);
7428 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7429 tg3_enable_ints(tp
);
7432 static void tg3_irq_quiesce(struct tg3
*tp
)
7433 __releases(tp
->lock
)
7434 __acquires(tp
->lock
)
7438 BUG_ON(tp
->irq_sync
);
7443 spin_unlock_bh(&tp
->lock
);
7445 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7446 synchronize_irq(tp
->napi
[i
].irq_vec
);
7448 spin_lock_bh(&tp
->lock
);
7451 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7452 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7453 * with as well. Most of the time, this is not necessary except when
7454 * shutting down the device.
7456 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7458 spin_lock_bh(&tp
->lock
);
7460 tg3_irq_quiesce(tp
);
7463 static inline void tg3_full_unlock(struct tg3
*tp
)
7465 spin_unlock_bh(&tp
->lock
);
7468 /* One-shot MSI handler - Chip automatically disables interrupt
7469 * after sending MSI so driver doesn't have to do it.
7471 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7473 struct tg3_napi
*tnapi
= dev_id
;
7474 struct tg3
*tp
= tnapi
->tp
;
7476 prefetch(tnapi
->hw_status
);
7478 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7480 if (likely(!tg3_irq_sync(tp
)))
7481 napi_schedule(&tnapi
->napi
);
7486 /* MSI ISR - No need to check for interrupt sharing and no need to
7487 * flush status block and interrupt mailbox. PCI ordering rules
7488 * guarantee that MSI will arrive after the status block.
7490 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7492 struct tg3_napi
*tnapi
= dev_id
;
7493 struct tg3
*tp
= tnapi
->tp
;
7495 prefetch(tnapi
->hw_status
);
7497 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7499 * Writing any value to intr-mbox-0 clears PCI INTA# and
7500 * chip-internal interrupt pending events.
7501 * Writing non-zero to intr-mbox-0 additional tells the
7502 * NIC to stop sending us irqs, engaging "in-intr-handler"
7505 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7506 if (likely(!tg3_irq_sync(tp
)))
7507 napi_schedule(&tnapi
->napi
);
7509 return IRQ_RETVAL(1);
7512 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7514 struct tg3_napi
*tnapi
= dev_id
;
7515 struct tg3
*tp
= tnapi
->tp
;
7516 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7517 unsigned int handled
= 1;
7519 /* In INTx mode, it is possible for the interrupt to arrive at
7520 * the CPU before the status block posted prior to the interrupt.
7521 * Reading the PCI State register will confirm whether the
7522 * interrupt is ours and will flush the status block.
7524 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7525 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7526 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7533 * Writing any value to intr-mbox-0 clears PCI INTA# and
7534 * chip-internal interrupt pending events.
7535 * Writing non-zero to intr-mbox-0 additional tells the
7536 * NIC to stop sending us irqs, engaging "in-intr-handler"
7539 * Flush the mailbox to de-assert the IRQ immediately to prevent
7540 * spurious interrupts. The flush impacts performance but
7541 * excessive spurious interrupts can be worse in some cases.
7543 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7544 if (tg3_irq_sync(tp
))
7546 sblk
->status
&= ~SD_STATUS_UPDATED
;
7547 if (likely(tg3_has_work(tnapi
))) {
7548 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7549 napi_schedule(&tnapi
->napi
);
7551 /* No work, shared interrupt perhaps? re-enable
7552 * interrupts, and flush that PCI write
7554 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7558 return IRQ_RETVAL(handled
);
7561 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7563 struct tg3_napi
*tnapi
= dev_id
;
7564 struct tg3
*tp
= tnapi
->tp
;
7565 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7566 unsigned int handled
= 1;
7568 /* In INTx mode, it is possible for the interrupt to arrive at
7569 * the CPU before the status block posted prior to the interrupt.
7570 * Reading the PCI State register will confirm whether the
7571 * interrupt is ours and will flush the status block.
7573 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7574 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7575 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7582 * writing any value to intr-mbox-0 clears PCI INTA# and
7583 * chip-internal interrupt pending events.
7584 * writing non-zero to intr-mbox-0 additional tells the
7585 * NIC to stop sending us irqs, engaging "in-intr-handler"
7588 * Flush the mailbox to de-assert the IRQ immediately to prevent
7589 * spurious interrupts. The flush impacts performance but
7590 * excessive spurious interrupts can be worse in some cases.
7592 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7595 * In a shared interrupt configuration, sometimes other devices'
7596 * interrupts will scream. We record the current status tag here
7597 * so that the above check can report that the screaming interrupts
7598 * are unhandled. Eventually they will be silenced.
7600 tnapi
->last_irq_tag
= sblk
->status_tag
;
7602 if (tg3_irq_sync(tp
))
7605 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7607 napi_schedule(&tnapi
->napi
);
7610 return IRQ_RETVAL(handled
);
7613 /* ISR for interrupt test */
7614 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7616 struct tg3_napi
*tnapi
= dev_id
;
7617 struct tg3
*tp
= tnapi
->tp
;
7618 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7620 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7621 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7622 tg3_disable_ints(tp
);
7623 return IRQ_RETVAL(1);
7625 return IRQ_RETVAL(0);
7628 #ifdef CONFIG_NET_POLL_CONTROLLER
7629 static void tg3_poll_controller(struct net_device
*dev
)
7632 struct tg3
*tp
= netdev_priv(dev
);
7634 if (tg3_irq_sync(tp
))
7637 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7638 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7642 static void tg3_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
7644 struct tg3
*tp
= netdev_priv(dev
);
7646 if (netif_msg_tx_err(tp
)) {
7647 netdev_err(dev
, "transmit timed out, resetting\n");
7651 tg3_reset_task_schedule(tp
);
7654 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7655 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7657 u32 base
= (u32
) mapping
& 0xffffffff;
7659 return base
+ len
+ 8 < base
;
7662 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7663 * of any 4GB boundaries: 4G, 8G, etc
7665 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7668 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7669 u32 base
= (u32
) mapping
& 0xffffffff;
7671 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7676 /* Test for DMA addresses > 40-bit */
7677 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7680 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7681 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7682 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7689 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7690 dma_addr_t mapping
, u32 len
, u32 flags
,
7693 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7694 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7695 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7696 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7699 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7700 dma_addr_t map
, u32 len
, u32 flags
,
7703 struct tg3
*tp
= tnapi
->tp
;
7706 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7709 if (tg3_4g_overflow_test(map
, len
))
7712 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7715 if (tg3_40bit_overflow_test(tp
, map
, len
))
7718 if (tp
->dma_limit
) {
7719 u32 prvidx
= *entry
;
7720 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7721 while (len
> tp
->dma_limit
&& *budget
) {
7722 u32 frag_len
= tp
->dma_limit
;
7723 len
-= tp
->dma_limit
;
7725 /* Avoid the 8byte DMA problem */
7727 len
+= tp
->dma_limit
/ 2;
7728 frag_len
= tp
->dma_limit
/ 2;
7731 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7733 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7734 frag_len
, tmp_flag
, mss
, vlan
);
7737 *entry
= NEXT_TX(*entry
);
7744 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7745 len
, flags
, mss
, vlan
);
7747 *entry
= NEXT_TX(*entry
);
7750 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7754 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7755 len
, flags
, mss
, vlan
);
7756 *entry
= NEXT_TX(*entry
);
7762 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7765 struct sk_buff
*skb
;
7766 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7771 pci_unmap_single(tnapi
->tp
->pdev
,
7772 dma_unmap_addr(txb
, mapping
),
7776 while (txb
->fragmented
) {
7777 txb
->fragmented
= false;
7778 entry
= NEXT_TX(entry
);
7779 txb
= &tnapi
->tx_buffers
[entry
];
7782 for (i
= 0; i
<= last
; i
++) {
7783 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7785 entry
= NEXT_TX(entry
);
7786 txb
= &tnapi
->tx_buffers
[entry
];
7788 pci_unmap_page(tnapi
->tp
->pdev
,
7789 dma_unmap_addr(txb
, mapping
),
7790 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7792 while (txb
->fragmented
) {
7793 txb
->fragmented
= false;
7794 entry
= NEXT_TX(entry
);
7795 txb
= &tnapi
->tx_buffers
[entry
];
7800 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7801 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7802 struct sk_buff
**pskb
,
7803 u32
*entry
, u32
*budget
,
7804 u32 base_flags
, u32 mss
, u32 vlan
)
7806 struct tg3
*tp
= tnapi
->tp
;
7807 struct sk_buff
*new_skb
, *skb
= *pskb
;
7808 dma_addr_t new_addr
= 0;
7811 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7812 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7814 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7816 new_skb
= skb_copy_expand(skb
,
7817 skb_headroom(skb
) + more_headroom
,
7818 skb_tailroom(skb
), GFP_ATOMIC
);
7824 /* New SKB is guaranteed to be linear. */
7825 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7827 /* Make sure the mapping succeeded */
7828 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7829 dev_kfree_skb_any(new_skb
);
7832 u32 save_entry
= *entry
;
7834 base_flags
|= TXD_FLAG_END
;
7836 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7837 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7840 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7841 new_skb
->len
, base_flags
,
7843 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7844 dev_kfree_skb_any(new_skb
);
7850 dev_consume_skb_any(skb
);
7855 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7857 /* Check if we will never have enough descriptors,
7858 * as gso_segs can be more than current ring size
7860 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7863 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7865 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7866 * indicated in tg3_tx_frag_set()
7868 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7869 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7871 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7872 struct sk_buff
*segs
, *seg
, *next
;
7874 /* Estimate the number of fragments in the worst case */
7875 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7876 netif_tx_stop_queue(txq
);
7878 /* netif_tx_stop_queue() must be done before checking
7879 * checking tx index in tg3_tx_avail() below, because in
7880 * tg3_tx(), we update tx index before checking for
7881 * netif_tx_queue_stopped().
7884 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7885 return NETDEV_TX_BUSY
;
7887 netif_tx_wake_queue(txq
);
7890 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7891 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7892 if (IS_ERR(segs
) || !segs
)
7893 goto tg3_tso_bug_end
;
7895 skb_list_walk_safe(segs
, seg
, next
) {
7896 skb_mark_not_on_list(seg
);
7897 tg3_start_xmit(seg
, tp
->dev
);
7901 dev_consume_skb_any(skb
);
7903 return NETDEV_TX_OK
;
7906 /* hard_start_xmit for all devices */
7907 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7909 struct tg3
*tp
= netdev_priv(dev
);
7910 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7912 int i
= -1, would_hit_hwbug
;
7914 struct tg3_napi
*tnapi
;
7915 struct netdev_queue
*txq
;
7917 struct iphdr
*iph
= NULL
;
7918 struct tcphdr
*tcph
= NULL
;
7919 __sum16 tcp_csum
= 0, ip_csum
= 0;
7920 __be16 ip_tot_len
= 0;
7922 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7923 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7924 if (tg3_flag(tp
, ENABLE_TSS
))
7927 budget
= tg3_tx_avail(tnapi
);
7929 /* We are running in BH disabled context with netif_tx_lock
7930 * and TX reclaim runs via tp->napi.poll inside of a software
7931 * interrupt. Furthermore, IRQ processing runs lockless so we have
7932 * no IRQ context deadlocks to worry about either. Rejoice!
7934 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7935 if (!netif_tx_queue_stopped(txq
)) {
7936 netif_tx_stop_queue(txq
);
7938 /* This is a hard error, log it. */
7940 "BUG! Tx Ring full when queue awake!\n");
7942 return NETDEV_TX_BUSY
;
7945 entry
= tnapi
->tx_prod
;
7948 mss
= skb_shinfo(skb
)->gso_size
;
7950 u32 tcp_opt_len
, hdr_len
;
7952 if (skb_cow_head(skb
, 0))
7956 tcp_opt_len
= tcp_optlen(skb
);
7958 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7960 /* HW/FW can not correctly segment packets that have been
7961 * vlan encapsulated.
7963 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7964 skb
->protocol
== htons(ETH_P_8021AD
)) {
7965 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7966 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7970 if (!skb_is_gso_v6(skb
)) {
7971 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7972 tg3_flag(tp
, TSO_BUG
)) {
7973 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7974 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7977 ip_csum
= iph
->check
;
7978 ip_tot_len
= iph
->tot_len
;
7980 iph
->tot_len
= htons(mss
+ hdr_len
);
7983 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7984 TXD_FLAG_CPU_POST_DMA
);
7986 tcph
= tcp_hdr(skb
);
7987 tcp_csum
= tcph
->check
;
7989 if (tg3_flag(tp
, HW_TSO_1
) ||
7990 tg3_flag(tp
, HW_TSO_2
) ||
7991 tg3_flag(tp
, HW_TSO_3
)) {
7993 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7995 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
7999 if (tg3_flag(tp
, HW_TSO_3
)) {
8000 mss
|= (hdr_len
& 0xc) << 12;
8002 base_flags
|= 0x00000010;
8003 base_flags
|= (hdr_len
& 0x3e0) << 5;
8004 } else if (tg3_flag(tp
, HW_TSO_2
))
8005 mss
|= hdr_len
<< 9;
8006 else if (tg3_flag(tp
, HW_TSO_1
) ||
8007 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8008 if (tcp_opt_len
|| iph
->ihl
> 5) {
8011 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8012 mss
|= (tsflags
<< 11);
8015 if (tcp_opt_len
|| iph
->ihl
> 5) {
8018 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8019 base_flags
|= tsflags
<< 12;
8022 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8023 /* HW/FW can not correctly checksum packets that have been
8024 * vlan encapsulated.
8026 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8027 skb
->protocol
== htons(ETH_P_8021AD
)) {
8028 if (skb_checksum_help(skb
))
8031 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8035 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8036 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8037 base_flags
|= TXD_FLAG_JMB_PKT
;
8039 if (skb_vlan_tag_present(skb
)) {
8040 base_flags
|= TXD_FLAG_VLAN
;
8041 vlan
= skb_vlan_tag_get(skb
);
8044 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8045 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8046 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8047 base_flags
|= TXD_FLAG_HWTSTAMP
;
8050 len
= skb_headlen(skb
);
8052 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8053 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8057 tnapi
->tx_buffers
[entry
].skb
= skb
;
8058 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8060 would_hit_hwbug
= 0;
8062 if (tg3_flag(tp
, 5701_DMA_BUG
))
8063 would_hit_hwbug
= 1;
8065 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8066 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8068 would_hit_hwbug
= 1;
8069 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8072 if (!tg3_flag(tp
, HW_TSO_1
) &&
8073 !tg3_flag(tp
, HW_TSO_2
) &&
8074 !tg3_flag(tp
, HW_TSO_3
))
8077 /* Now loop through additional data
8078 * fragments, and queue them.
8080 last
= skb_shinfo(skb
)->nr_frags
- 1;
8081 for (i
= 0; i
<= last
; i
++) {
8082 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8084 len
= skb_frag_size(frag
);
8085 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8086 len
, DMA_TO_DEVICE
);
8088 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8089 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8091 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8095 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8097 ((i
== last
) ? TXD_FLAG_END
: 0),
8099 would_hit_hwbug
= 1;
8105 if (would_hit_hwbug
) {
8106 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8108 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8109 /* If it's a TSO packet, do GSO instead of
8110 * allocating and copying to a large linear SKB
8113 iph
->check
= ip_csum
;
8114 iph
->tot_len
= ip_tot_len
;
8116 tcph
->check
= tcp_csum
;
8117 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8120 /* If the workaround fails due to memory/mapping
8121 * failure, silently drop this packet.
8123 entry
= tnapi
->tx_prod
;
8124 budget
= tg3_tx_avail(tnapi
);
8125 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8126 base_flags
, mss
, vlan
))
8130 skb_tx_timestamp(skb
);
8131 netdev_tx_sent_queue(txq
, skb
->len
);
8133 /* Sync BD data before updating mailbox */
8136 tnapi
->tx_prod
= entry
;
8137 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8138 netif_tx_stop_queue(txq
);
8140 /* netif_tx_stop_queue() must be done before checking
8141 * checking tx index in tg3_tx_avail() below, because in
8142 * tg3_tx(), we update tx index before checking for
8143 * netif_tx_queue_stopped().
8146 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8147 netif_tx_wake_queue(txq
);
8150 if (!netdev_xmit_more() || netif_xmit_stopped(txq
)) {
8151 /* Packets are ready, update Tx producer idx on card. */
8152 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8155 return NETDEV_TX_OK
;
8158 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8159 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8161 dev_kfree_skb_any(skb
);
8164 return NETDEV_TX_OK
;
8167 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8170 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8171 MAC_MODE_PORT_MODE_MASK
);
8173 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8175 if (!tg3_flag(tp
, 5705_PLUS
))
8176 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8178 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8179 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8181 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8183 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8185 if (tg3_flag(tp
, 5705_PLUS
) ||
8186 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8187 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8188 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8191 tw32(MAC_MODE
, tp
->mac_mode
);
8195 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8197 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8199 tg3_phy_toggle_apd(tp
, false);
8200 tg3_phy_toggle_automdix(tp
, false);
8202 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8205 bmcr
= BMCR_FULLDPLX
;
8210 bmcr
|= BMCR_SPEED100
;
8214 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8216 bmcr
|= BMCR_SPEED100
;
8219 bmcr
|= BMCR_SPEED1000
;
8224 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8225 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8226 val
|= CTL1000_AS_MASTER
|
8227 CTL1000_ENABLE_MASTER
;
8228 tg3_writephy(tp
, MII_CTRL1000
, val
);
8230 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8231 MII_TG3_FET_PTEST_TRIM_2
;
8232 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8235 bmcr
|= BMCR_LOOPBACK
;
8237 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8239 /* The write needs to be flushed for the FETs */
8240 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8241 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8245 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8246 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8247 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8248 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8249 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8251 /* The write needs to be flushed for the AC131 */
8252 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8255 /* Reset to prevent losing 1st rx packet intermittently */
8256 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8257 tg3_flag(tp
, 5780_CLASS
)) {
8258 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8260 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8263 mac_mode
= tp
->mac_mode
&
8264 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8265 if (speed
== SPEED_1000
)
8266 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8268 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8270 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8271 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8273 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8274 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8275 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8276 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8278 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8279 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8282 tw32(MAC_MODE
, mac_mode
);
8288 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8290 struct tg3
*tp
= netdev_priv(dev
);
8292 if (features
& NETIF_F_LOOPBACK
) {
8293 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8296 spin_lock_bh(&tp
->lock
);
8297 tg3_mac_loopback(tp
, true);
8298 netif_carrier_on(tp
->dev
);
8299 spin_unlock_bh(&tp
->lock
);
8300 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8302 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8305 spin_lock_bh(&tp
->lock
);
8306 tg3_mac_loopback(tp
, false);
8307 /* Force link status check */
8308 tg3_setup_phy(tp
, true);
8309 spin_unlock_bh(&tp
->lock
);
8310 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8314 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8315 netdev_features_t features
)
8317 struct tg3
*tp
= netdev_priv(dev
);
8319 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8320 features
&= ~NETIF_F_ALL_TSO
;
8325 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8327 netdev_features_t changed
= dev
->features
^ features
;
8329 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8330 tg3_set_loopback(dev
, features
);
8335 static void tg3_rx_prodring_free(struct tg3
*tp
,
8336 struct tg3_rx_prodring_set
*tpr
)
8340 if (tpr
!= &tp
->napi
[0].prodring
) {
8341 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8342 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8343 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8346 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8347 for (i
= tpr
->rx_jmb_cons_idx
;
8348 i
!= tpr
->rx_jmb_prod_idx
;
8349 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8350 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8358 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8359 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8362 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8363 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8364 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8369 /* Initialize rx rings for packet processing.
8371 * The chip has been shut down and the driver detached from
8372 * the networking, so no interrupts or new tx packets will
8373 * end up in the driver. tp->{tx,}lock are held and thus
8376 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8377 struct tg3_rx_prodring_set
*tpr
)
8379 u32 i
, rx_pkt_dma_sz
;
8381 tpr
->rx_std_cons_idx
= 0;
8382 tpr
->rx_std_prod_idx
= 0;
8383 tpr
->rx_jmb_cons_idx
= 0;
8384 tpr
->rx_jmb_prod_idx
= 0;
8386 if (tpr
!= &tp
->napi
[0].prodring
) {
8387 memset(&tpr
->rx_std_buffers
[0], 0,
8388 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8389 if (tpr
->rx_jmb_buffers
)
8390 memset(&tpr
->rx_jmb_buffers
[0], 0,
8391 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8395 /* Zero out all descriptors. */
8396 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8398 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8399 if (tg3_flag(tp
, 5780_CLASS
) &&
8400 tp
->dev
->mtu
> ETH_DATA_LEN
)
8401 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8402 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8404 /* Initialize invariants of the rings, we only set this
8405 * stuff once. This works because the card does not
8406 * write into the rx buffer posting rings.
8408 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8409 struct tg3_rx_buffer_desc
*rxd
;
8411 rxd
= &tpr
->rx_std
[i
];
8412 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8413 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8414 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8415 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8418 /* Now allocate fresh SKBs for each rx ring. */
8419 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8420 unsigned int frag_size
;
8422 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8424 netdev_warn(tp
->dev
,
8425 "Using a smaller RX standard ring. Only "
8426 "%d out of %d buffers were allocated "
8427 "successfully\n", i
, tp
->rx_pending
);
8435 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8438 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8440 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8443 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8444 struct tg3_rx_buffer_desc
*rxd
;
8446 rxd
= &tpr
->rx_jmb
[i
].std
;
8447 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8448 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8450 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8451 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8454 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8455 unsigned int frag_size
;
8457 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8459 netdev_warn(tp
->dev
,
8460 "Using a smaller RX jumbo ring. Only %d "
8461 "out of %d buffers were allocated "
8462 "successfully\n", i
, tp
->rx_jumbo_pending
);
8465 tp
->rx_jumbo_pending
= i
;
8474 tg3_rx_prodring_free(tp
, tpr
);
8478 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8479 struct tg3_rx_prodring_set
*tpr
)
8481 kfree(tpr
->rx_std_buffers
);
8482 tpr
->rx_std_buffers
= NULL
;
8483 kfree(tpr
->rx_jmb_buffers
);
8484 tpr
->rx_jmb_buffers
= NULL
;
8486 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8487 tpr
->rx_std
, tpr
->rx_std_mapping
);
8491 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8492 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8497 static int tg3_rx_prodring_init(struct tg3
*tp
,
8498 struct tg3_rx_prodring_set
*tpr
)
8500 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8502 if (!tpr
->rx_std_buffers
)
8505 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8506 TG3_RX_STD_RING_BYTES(tp
),
8507 &tpr
->rx_std_mapping
,
8512 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8513 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8515 if (!tpr
->rx_jmb_buffers
)
8518 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8519 TG3_RX_JMB_RING_BYTES(tp
),
8520 &tpr
->rx_jmb_mapping
,
8529 tg3_rx_prodring_fini(tp
, tpr
);
8533 /* Free up pending packets in all rx/tx rings.
8535 * The chip has been shut down and the driver detached from
8536 * the networking, so no interrupts or new tx packets will
8537 * end up in the driver. tp->{tx,}lock is not held and we are not
8538 * in an interrupt context and thus may sleep.
8540 static void tg3_free_rings(struct tg3
*tp
)
8544 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8545 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8547 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8549 if (!tnapi
->tx_buffers
)
8552 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8553 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8558 tg3_tx_skb_unmap(tnapi
, i
,
8559 skb_shinfo(skb
)->nr_frags
- 1);
8561 dev_consume_skb_any(skb
);
8563 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8567 /* Initialize tx/rx rings for packet processing.
8569 * The chip has been shut down and the driver detached from
8570 * the networking, so no interrupts or new tx packets will
8571 * end up in the driver. tp->{tx,}lock are held and thus
8574 static int tg3_init_rings(struct tg3
*tp
)
8578 /* Free up all the SKBs. */
8581 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8582 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8584 tnapi
->last_tag
= 0;
8585 tnapi
->last_irq_tag
= 0;
8586 tnapi
->hw_status
->status
= 0;
8587 tnapi
->hw_status
->status_tag
= 0;
8588 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8593 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8595 tnapi
->rx_rcb_ptr
= 0;
8597 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8599 if (tnapi
->prodring
.rx_std
&&
8600 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8609 static void tg3_mem_tx_release(struct tg3
*tp
)
8613 for (i
= 0; i
< tp
->irq_max
; i
++) {
8614 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8616 if (tnapi
->tx_ring
) {
8617 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8618 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8619 tnapi
->tx_ring
= NULL
;
8622 kfree(tnapi
->tx_buffers
);
8623 tnapi
->tx_buffers
= NULL
;
8627 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8630 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8632 /* If multivector TSS is enabled, vector 0 does not handle
8633 * tx interrupts. Don't allocate any resources for it.
8635 if (tg3_flag(tp
, ENABLE_TSS
))
8638 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8639 tnapi
->tx_buffers
= kcalloc(TG3_TX_RING_SIZE
,
8640 sizeof(struct tg3_tx_ring_info
),
8642 if (!tnapi
->tx_buffers
)
8645 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8647 &tnapi
->tx_desc_mapping
,
8649 if (!tnapi
->tx_ring
)
8656 tg3_mem_tx_release(tp
);
8660 static void tg3_mem_rx_release(struct tg3
*tp
)
8664 for (i
= 0; i
< tp
->irq_max
; i
++) {
8665 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8667 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8672 dma_free_coherent(&tp
->pdev
->dev
,
8673 TG3_RX_RCB_RING_BYTES(tp
),
8675 tnapi
->rx_rcb_mapping
);
8676 tnapi
->rx_rcb
= NULL
;
8680 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8682 unsigned int i
, limit
;
8684 limit
= tp
->rxq_cnt
;
8686 /* If RSS is enabled, we need a (dummy) producer ring
8687 * set on vector zero. This is the true hw prodring.
8689 if (tg3_flag(tp
, ENABLE_RSS
))
8692 for (i
= 0; i
< limit
; i
++) {
8693 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8695 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8698 /* If multivector RSS is enabled, vector 0
8699 * does not handle rx or tx interrupts.
8700 * Don't allocate any resources for it.
8702 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8705 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8706 TG3_RX_RCB_RING_BYTES(tp
),
8707 &tnapi
->rx_rcb_mapping
,
8716 tg3_mem_rx_release(tp
);
8721 * Must not be invoked with interrupt sources disabled and
8722 * the hardware shutdown down.
8724 static void tg3_free_consistent(struct tg3
*tp
)
8728 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8729 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8731 if (tnapi
->hw_status
) {
8732 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8734 tnapi
->status_mapping
);
8735 tnapi
->hw_status
= NULL
;
8739 tg3_mem_rx_release(tp
);
8740 tg3_mem_tx_release(tp
);
8742 /* tp->hw_stats can be referenced safely:
8743 * 1. under rtnl_lock
8744 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8747 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8748 tp
->hw_stats
, tp
->stats_mapping
);
8749 tp
->hw_stats
= NULL
;
8754 * Must not be invoked with interrupt sources disabled and
8755 * the hardware shutdown down. Can sleep.
8757 static int tg3_alloc_consistent(struct tg3
*tp
)
8761 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8762 sizeof(struct tg3_hw_stats
),
8763 &tp
->stats_mapping
, GFP_KERNEL
);
8767 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8768 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8769 struct tg3_hw_status
*sblk
;
8771 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8773 &tnapi
->status_mapping
,
8775 if (!tnapi
->hw_status
)
8778 sblk
= tnapi
->hw_status
;
8780 if (tg3_flag(tp
, ENABLE_RSS
)) {
8781 u16
*prodptr
= NULL
;
8784 * When RSS is enabled, the status block format changes
8785 * slightly. The "rx_jumbo_consumer", "reserved",
8786 * and "rx_mini_consumer" members get mapped to the
8787 * other three rx return ring producer indexes.
8791 prodptr
= &sblk
->idx
[0].rx_producer
;
8794 prodptr
= &sblk
->rx_jumbo_consumer
;
8797 prodptr
= &sblk
->reserved
;
8800 prodptr
= &sblk
->rx_mini_consumer
;
8803 tnapi
->rx_rcb_prod_idx
= prodptr
;
8805 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8809 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8815 tg3_free_consistent(tp
);
8819 #define MAX_WAIT_CNT 1000
8821 /* To stop a block, clear the enable bit and poll till it
8822 * clears. tp->lock is held.
8824 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8829 if (tg3_flag(tp
, 5705_PLUS
)) {
8836 /* We can't enable/disable these bits of the
8837 * 5705/5750, just say success.
8850 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8851 if (pci_channel_offline(tp
->pdev
)) {
8852 dev_err(&tp
->pdev
->dev
,
8853 "tg3_stop_block device offline, "
8854 "ofs=%lx enable_bit=%x\n",
8861 if ((val
& enable_bit
) == 0)
8865 if (i
== MAX_WAIT_CNT
&& !silent
) {
8866 dev_err(&tp
->pdev
->dev
,
8867 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8875 /* tp->lock is held. */
8876 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8880 tg3_disable_ints(tp
);
8882 if (pci_channel_offline(tp
->pdev
)) {
8883 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8884 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8889 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8890 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8893 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8894 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8895 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8896 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8897 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8898 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8900 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8901 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8902 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8903 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8904 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8905 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8906 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8908 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8909 tw32_f(MAC_MODE
, tp
->mac_mode
);
8912 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8913 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8915 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8917 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8920 if (i
>= MAX_WAIT_CNT
) {
8921 dev_err(&tp
->pdev
->dev
,
8922 "%s timed out, TX_MODE_ENABLE will not clear "
8923 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8927 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8928 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8929 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8931 tw32(FTQ_RESET
, 0xffffffff);
8932 tw32(FTQ_RESET
, 0x00000000);
8934 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8935 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8938 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8939 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8940 if (tnapi
->hw_status
)
8941 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8947 /* Save PCI command register before chip reset */
8948 static void tg3_save_pci_state(struct tg3
*tp
)
8950 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8953 /* Restore PCI state after chip reset */
8954 static void tg3_restore_pci_state(struct tg3
*tp
)
8958 /* Re-enable indirect register accesses. */
8959 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8960 tp
->misc_host_ctrl
);
8962 /* Set MAX PCI retry to zero. */
8963 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8964 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8965 tg3_flag(tp
, PCIX_MODE
))
8966 val
|= PCISTATE_RETRY_SAME_DMA
;
8967 /* Allow reads and writes to the APE register and memory space. */
8968 if (tg3_flag(tp
, ENABLE_APE
))
8969 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8970 PCISTATE_ALLOW_APE_SHMEM_WR
|
8971 PCISTATE_ALLOW_APE_PSPACE_WR
;
8972 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8974 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8976 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8977 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8978 tp
->pci_cacheline_sz
);
8979 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8983 /* Make sure PCI-X relaxed ordering bit is clear. */
8984 if (tg3_flag(tp
, PCIX_MODE
)) {
8987 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8989 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8990 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8994 if (tg3_flag(tp
, 5780_CLASS
)) {
8996 /* Chip reset on 5780 will reset MSI enable bit,
8997 * so need to restore it.
8999 if (tg3_flag(tp
, USING_MSI
)) {
9002 pci_read_config_word(tp
->pdev
,
9003 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9005 pci_write_config_word(tp
->pdev
,
9006 tp
->msi_cap
+ PCI_MSI_FLAGS
,
9007 ctrl
| PCI_MSI_FLAGS_ENABLE
);
9008 val
= tr32(MSGINT_MODE
);
9009 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
9014 static void tg3_override_clk(struct tg3
*tp
)
9018 switch (tg3_asic_rev(tp
)) {
9020 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9021 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9022 TG3_CPMU_MAC_ORIDE_ENABLE
);
9027 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9035 static void tg3_restore_clk(struct tg3
*tp
)
9039 switch (tg3_asic_rev(tp
)) {
9041 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9042 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9043 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9048 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9049 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9057 /* tp->lock is held. */
9058 static int tg3_chip_reset(struct tg3
*tp
)
9059 __releases(tp
->lock
)
9060 __acquires(tp
->lock
)
9063 void (*write_op
)(struct tg3
*, u32
, u32
);
9066 if (!pci_device_is_present(tp
->pdev
))
9071 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9073 /* No matching tg3_nvram_unlock() after this because
9074 * chip reset below will undo the nvram lock.
9076 tp
->nvram_lock_cnt
= 0;
9078 /* GRC_MISC_CFG core clock reset will clear the memory
9079 * enable bit in PCI register 4 and the MSI enable bit
9080 * on some chips, so we save relevant registers here.
9082 tg3_save_pci_state(tp
);
9084 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9085 tg3_flag(tp
, 5755_PLUS
))
9086 tw32(GRC_FASTBOOT_PC
, 0);
9089 * We must avoid the readl() that normally takes place.
9090 * It locks machines, causes machine checks, and other
9091 * fun things. So, temporarily disable the 5701
9092 * hardware workaround, while we do the reset.
9094 write_op
= tp
->write32
;
9095 if (write_op
== tg3_write_flush_reg32
)
9096 tp
->write32
= tg3_write32
;
9098 /* Prevent the irq handler from reading or writing PCI registers
9099 * during chip reset when the memory enable bit in the PCI command
9100 * register may be cleared. The chip does not generate interrupt
9101 * at this time, but the irq handler may still be called due to irq
9102 * sharing or irqpoll.
9104 tg3_flag_set(tp
, CHIP_RESETTING
);
9105 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9106 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9107 if (tnapi
->hw_status
) {
9108 tnapi
->hw_status
->status
= 0;
9109 tnapi
->hw_status
->status_tag
= 0;
9111 tnapi
->last_tag
= 0;
9112 tnapi
->last_irq_tag
= 0;
9116 tg3_full_unlock(tp
);
9118 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9119 synchronize_irq(tp
->napi
[i
].irq_vec
);
9121 tg3_full_lock(tp
, 0);
9123 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9124 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9125 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9129 val
= GRC_MISC_CFG_CORECLK_RESET
;
9131 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9132 /* Force PCIe 1.0a mode */
9133 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9134 !tg3_flag(tp
, 57765_PLUS
) &&
9135 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9136 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9137 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9139 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9140 tw32(GRC_MISC_CFG
, (1 << 29));
9145 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9146 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9147 tw32(GRC_VCPU_EXT_CTRL
,
9148 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9151 /* Set the clock to the highest frequency to avoid timeouts. With link
9152 * aware mode, the clock speed could be slow and bootcode does not
9153 * complete within the expected time. Override the clock to allow the
9154 * bootcode to finish sooner and then restore it.
9156 tg3_override_clk(tp
);
9158 /* Manage gphy power for all CPMU absent PCIe devices. */
9159 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9160 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9162 tw32(GRC_MISC_CFG
, val
);
9164 /* restore 5701 hardware bug workaround write method */
9165 tp
->write32
= write_op
;
9167 /* Unfortunately, we have to delay before the PCI read back.
9168 * Some 575X chips even will not respond to a PCI cfg access
9169 * when the reset command is given to the chip.
9171 * How do these hardware designers expect things to work
9172 * properly if the PCI write is posted for a long period
9173 * of time? It is always necessary to have some method by
9174 * which a register read back can occur to push the write
9175 * out which does the reset.
9177 * For most tg3 variants the trick below was working.
9182 /* Flush PCI posted writes. The normal MMIO registers
9183 * are inaccessible at this time so this is the only
9184 * way to make this reliably (actually, this is no longer
9185 * the case, see above). I tried to use indirect
9186 * register read/write but this upset some 5701 variants.
9188 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9192 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9195 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9199 /* Wait for link training to complete. */
9200 for (j
= 0; j
< 5000; j
++)
9203 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9204 pci_write_config_dword(tp
->pdev
, 0xc4,
9205 cfg_val
| (1 << 15));
9208 /* Clear the "no snoop" and "relaxed ordering" bits. */
9209 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9211 * Older PCIe devices only support the 128 byte
9212 * MPS setting. Enforce the restriction.
9214 if (!tg3_flag(tp
, CPMU_PRESENT
))
9215 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9216 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9218 /* Clear error status */
9219 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9220 PCI_EXP_DEVSTA_CED
|
9221 PCI_EXP_DEVSTA_NFED
|
9222 PCI_EXP_DEVSTA_FED
|
9223 PCI_EXP_DEVSTA_URD
);
9226 tg3_restore_pci_state(tp
);
9228 tg3_flag_clear(tp
, CHIP_RESETTING
);
9229 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9232 if (tg3_flag(tp
, 5780_CLASS
))
9233 val
= tr32(MEMARB_MODE
);
9234 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9236 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9238 tw32(0x5000, 0x400);
9241 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9243 * BCM4785: In order to avoid repercussions from using
9244 * potentially defective internal ROM, stop the Rx RISC CPU,
9245 * which is not required.
9248 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9251 err
= tg3_poll_fw(tp
);
9255 tw32(GRC_MODE
, tp
->grc_mode
);
9257 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9260 tw32(0xc4, val
| (1 << 15));
9263 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9264 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9265 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9266 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9267 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9268 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9271 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9272 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9274 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9275 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9280 tw32_f(MAC_MODE
, val
);
9283 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9287 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9288 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9289 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9290 !tg3_flag(tp
, 57765_PLUS
)) {
9293 tw32(0x7c00, val
| (1 << 25));
9296 tg3_restore_clk(tp
);
9298 /* Increase the core clock speed to fix tx timeout issue for 5762
9299 * with 100Mbps link speed.
9301 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9302 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9303 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9304 TG3_CPMU_MAC_ORIDE_ENABLE
);
9307 /* Reprobe ASF enable state. */
9308 tg3_flag_clear(tp
, ENABLE_ASF
);
9309 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9310 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9312 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9313 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9314 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9317 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9318 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9319 tg3_flag_set(tp
, ENABLE_ASF
);
9320 tp
->last_event_jiffies
= jiffies
;
9321 if (tg3_flag(tp
, 5750_PLUS
))
9322 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9324 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9325 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9326 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9327 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9328 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9335 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9336 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9337 static void __tg3_set_rx_mode(struct net_device
*);
9339 /* tp->lock is held. */
9340 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9346 tg3_write_sig_pre_reset(tp
, kind
);
9348 tg3_abort_hw(tp
, silent
);
9349 err
= tg3_chip_reset(tp
);
9351 __tg3_set_mac_addr(tp
, false);
9353 tg3_write_sig_legacy(tp
, kind
);
9354 tg3_write_sig_post_reset(tp
, kind
);
9357 /* Save the stats across chip resets... */
9358 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9359 tg3_get_estats(tp
, &tp
->estats_prev
);
9361 /* And make sure the next sample is new data */
9362 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9368 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9370 struct tg3
*tp
= netdev_priv(dev
);
9371 struct sockaddr
*addr
= p
;
9373 bool skip_mac_1
= false;
9375 if (!is_valid_ether_addr(addr
->sa_data
))
9376 return -EADDRNOTAVAIL
;
9378 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9380 if (!netif_running(dev
))
9383 if (tg3_flag(tp
, ENABLE_ASF
)) {
9384 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9386 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9387 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9388 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9389 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9391 /* Skip MAC addr 1 if ASF is using it. */
9392 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9393 !(addr1_high
== 0 && addr1_low
== 0))
9396 spin_lock_bh(&tp
->lock
);
9397 __tg3_set_mac_addr(tp
, skip_mac_1
);
9398 __tg3_set_rx_mode(dev
);
9399 spin_unlock_bh(&tp
->lock
);
9404 /* tp->lock is held. */
9405 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9406 dma_addr_t mapping
, u32 maxlen_flags
,
9410 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9411 ((u64
) mapping
>> 32));
9413 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9414 ((u64
) mapping
& 0xffffffff));
9416 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9419 if (!tg3_flag(tp
, 5705_PLUS
))
9421 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9426 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9430 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9431 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9432 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9433 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9435 tw32(HOSTCC_TXCOL_TICKS
, 0);
9436 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9437 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9439 for (; i
< tp
->txq_cnt
; i
++) {
9442 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9443 tw32(reg
, ec
->tx_coalesce_usecs
);
9444 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9445 tw32(reg
, ec
->tx_max_coalesced_frames
);
9446 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9447 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9451 for (; i
< tp
->irq_max
- 1; i
++) {
9452 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9453 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9454 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9458 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9461 u32 limit
= tp
->rxq_cnt
;
9463 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9464 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9465 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9466 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9469 tw32(HOSTCC_RXCOL_TICKS
, 0);
9470 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9471 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9474 for (; i
< limit
; i
++) {
9477 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9478 tw32(reg
, ec
->rx_coalesce_usecs
);
9479 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9480 tw32(reg
, ec
->rx_max_coalesced_frames
);
9481 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9482 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9485 for (; i
< tp
->irq_max
- 1; i
++) {
9486 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9487 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9488 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9492 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9494 tg3_coal_tx_init(tp
, ec
);
9495 tg3_coal_rx_init(tp
, ec
);
9497 if (!tg3_flag(tp
, 5705_PLUS
)) {
9498 u32 val
= ec
->stats_block_coalesce_usecs
;
9500 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9501 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9506 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9510 /* tp->lock is held. */
9511 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9515 /* Disable all transmit rings but the first. */
9516 if (!tg3_flag(tp
, 5705_PLUS
))
9517 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9518 else if (tg3_flag(tp
, 5717_PLUS
))
9519 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9520 else if (tg3_flag(tp
, 57765_CLASS
) ||
9521 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9522 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9524 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9526 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9527 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9528 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9529 BDINFO_FLAGS_DISABLED
);
9532 /* tp->lock is held. */
9533 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9536 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9538 if (tg3_flag(tp
, ENABLE_TSS
))
9541 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9542 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9544 if (!tnapi
->tx_ring
)
9547 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9548 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9549 NIC_SRAM_TX_BUFFER_DESC
);
9553 /* tp->lock is held. */
9554 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9558 /* Disable all receive return rings but the first. */
9559 if (tg3_flag(tp
, 5717_PLUS
))
9560 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9561 else if (!tg3_flag(tp
, 5705_PLUS
))
9562 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9563 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9564 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9565 tg3_flag(tp
, 57765_CLASS
))
9566 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9568 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9570 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9571 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9572 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9573 BDINFO_FLAGS_DISABLED
);
9576 /* tp->lock is held. */
9577 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9580 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9582 if (tg3_flag(tp
, ENABLE_RSS
))
9585 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9586 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9591 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9592 (tp
->rx_ret_ring_mask
+ 1) <<
9593 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9597 /* tp->lock is held. */
9598 static void tg3_rings_reset(struct tg3
*tp
)
9602 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9604 tg3_tx_rcbs_disable(tp
);
9606 tg3_rx_ret_rcbs_disable(tp
);
9608 /* Disable interrupts */
9609 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9610 tp
->napi
[0].chk_msi_cnt
= 0;
9611 tp
->napi
[0].last_rx_cons
= 0;
9612 tp
->napi
[0].last_tx_cons
= 0;
9614 /* Zero mailbox registers. */
9615 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9616 for (i
= 1; i
< tp
->irq_max
; i
++) {
9617 tp
->napi
[i
].tx_prod
= 0;
9618 tp
->napi
[i
].tx_cons
= 0;
9619 if (tg3_flag(tp
, ENABLE_TSS
))
9620 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9621 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9622 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9623 tp
->napi
[i
].chk_msi_cnt
= 0;
9624 tp
->napi
[i
].last_rx_cons
= 0;
9625 tp
->napi
[i
].last_tx_cons
= 0;
9627 if (!tg3_flag(tp
, ENABLE_TSS
))
9628 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9630 tp
->napi
[0].tx_prod
= 0;
9631 tp
->napi
[0].tx_cons
= 0;
9632 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9633 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9636 /* Make sure the NIC-based send BD rings are disabled. */
9637 if (!tg3_flag(tp
, 5705_PLUS
)) {
9638 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9639 for (i
= 0; i
< 16; i
++)
9640 tw32_tx_mbox(mbox
+ i
* 8, 0);
9643 /* Clear status block in ram. */
9644 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9646 /* Set status block DMA address */
9647 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9648 ((u64
) tnapi
->status_mapping
>> 32));
9649 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9650 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9652 stblk
= HOSTCC_STATBLCK_RING1
;
9654 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9655 u64 mapping
= (u64
)tnapi
->status_mapping
;
9656 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9657 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9660 /* Clear status block in ram. */
9661 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9664 tg3_tx_rcbs_init(tp
);
9665 tg3_rx_ret_rcbs_init(tp
);
9668 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9670 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9672 if (!tg3_flag(tp
, 5750_PLUS
) ||
9673 tg3_flag(tp
, 5780_CLASS
) ||
9674 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9675 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9676 tg3_flag(tp
, 57765_PLUS
))
9677 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9678 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9679 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9680 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9682 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9684 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9685 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9687 val
= min(nic_rep_thresh
, host_rep_thresh
);
9688 tw32(RCVBDI_STD_THRESH
, val
);
9690 if (tg3_flag(tp
, 57765_PLUS
))
9691 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9693 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9696 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9698 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9700 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9701 tw32(RCVBDI_JUMBO_THRESH
, val
);
9703 if (tg3_flag(tp
, 57765_PLUS
))
9704 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9707 static inline u32
calc_crc(unsigned char *buf
, int len
)
9715 for (j
= 0; j
< len
; j
++) {
9718 for (k
= 0; k
< 8; k
++) {
9724 reg
^= CRC32_POLY_LE
;
9731 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9733 /* accept or reject all multicast frames */
9734 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9735 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9736 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9737 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9740 static void __tg3_set_rx_mode(struct net_device
*dev
)
9742 struct tg3
*tp
= netdev_priv(dev
);
9745 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9746 RX_MODE_KEEP_VLAN_TAG
);
9748 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9749 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9752 if (!tg3_flag(tp
, ENABLE_ASF
))
9753 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9756 if (dev
->flags
& IFF_PROMISC
) {
9757 /* Promiscuous mode. */
9758 rx_mode
|= RX_MODE_PROMISC
;
9759 } else if (dev
->flags
& IFF_ALLMULTI
) {
9760 /* Accept all multicast. */
9761 tg3_set_multi(tp
, 1);
9762 } else if (netdev_mc_empty(dev
)) {
9763 /* Reject all multicast. */
9764 tg3_set_multi(tp
, 0);
9766 /* Accept one or more multicast(s). */
9767 struct netdev_hw_addr
*ha
;
9768 u32 mc_filter
[4] = { 0, };
9773 netdev_for_each_mc_addr(ha
, dev
) {
9774 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9776 regidx
= (bit
& 0x60) >> 5;
9778 mc_filter
[regidx
] |= (1 << bit
);
9781 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9782 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9783 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9784 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9787 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9788 rx_mode
|= RX_MODE_PROMISC
;
9789 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9790 /* Add all entries into to the mac addr filter list */
9792 struct netdev_hw_addr
*ha
;
9794 netdev_for_each_uc_addr(ha
, dev
) {
9795 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9796 i
+ TG3_UCAST_ADDR_IDX(tp
));
9801 if (rx_mode
!= tp
->rx_mode
) {
9802 tp
->rx_mode
= rx_mode
;
9803 tw32_f(MAC_RX_MODE
, rx_mode
);
9808 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9812 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9813 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9816 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9820 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9823 if (tp
->rxq_cnt
== 1) {
9824 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9828 /* Validate table against current IRQ count */
9829 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9830 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9834 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9835 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9838 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9841 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9843 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9844 u32 val
= tp
->rss_ind_tbl
[i
];
9846 for (; i
% 8; i
++) {
9848 val
|= tp
->rss_ind_tbl
[i
];
9855 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9857 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9858 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9860 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9863 /* tp->lock is held. */
9864 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9866 u32 val
, rdmac_mode
;
9868 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9870 tg3_disable_ints(tp
);
9874 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9876 if (tg3_flag(tp
, INIT_COMPLETE
))
9877 tg3_abort_hw(tp
, 1);
9879 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9880 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9881 tg3_phy_pull_config(tp
);
9882 tg3_eee_pull_config(tp
, NULL
);
9883 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9886 /* Enable MAC control of LPI */
9887 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9893 err
= tg3_chip_reset(tp
);
9897 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9899 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9900 val
= tr32(TG3_CPMU_CTRL
);
9901 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9902 tw32(TG3_CPMU_CTRL
, val
);
9904 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9905 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9906 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9907 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9909 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9910 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9911 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9912 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9914 val
= tr32(TG3_CPMU_HST_ACC
);
9915 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9916 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9917 tw32(TG3_CPMU_HST_ACC
, val
);
9920 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9921 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9922 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9923 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9924 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9926 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9927 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9929 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9931 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9932 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9935 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9936 u32 grc_mode
= tr32(GRC_MODE
);
9938 /* Access the lower 1K of PL PCIE block registers. */
9939 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9940 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9942 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9943 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9944 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9946 tw32(GRC_MODE
, grc_mode
);
9949 if (tg3_flag(tp
, 57765_CLASS
)) {
9950 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9951 u32 grc_mode
= tr32(GRC_MODE
);
9953 /* Access the lower 1K of PL PCIE block registers. */
9954 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9955 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9957 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9958 TG3_PCIE_PL_LO_PHYCTL5
);
9959 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9960 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9962 tw32(GRC_MODE
, grc_mode
);
9965 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9968 /* Fix transmit hangs */
9969 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9970 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9971 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9973 grc_mode
= tr32(GRC_MODE
);
9975 /* Access the lower 1K of DL PCIE block registers. */
9976 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9977 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9979 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9980 TG3_PCIE_DL_LO_FTSMAX
);
9981 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9982 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9983 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9985 tw32(GRC_MODE
, grc_mode
);
9988 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9989 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9990 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9991 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9994 /* This works around an issue with Athlon chipsets on
9995 * B3 tigon3 silicon. This bit has no effect on any
9996 * other revision. But do not set this on PCI Express
9997 * chips and don't even touch the clocks if the CPMU is present.
9999 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
10000 if (!tg3_flag(tp
, PCI_EXPRESS
))
10001 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
10002 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
10005 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
10006 tg3_flag(tp
, PCIX_MODE
)) {
10007 val
= tr32(TG3PCI_PCISTATE
);
10008 val
|= PCISTATE_RETRY_SAME_DMA
;
10009 tw32(TG3PCI_PCISTATE
, val
);
10012 if (tg3_flag(tp
, ENABLE_APE
)) {
10013 /* Allow reads and writes to the
10014 * APE register and memory space.
10016 val
= tr32(TG3PCI_PCISTATE
);
10017 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
10018 PCISTATE_ALLOW_APE_SHMEM_WR
|
10019 PCISTATE_ALLOW_APE_PSPACE_WR
;
10020 tw32(TG3PCI_PCISTATE
, val
);
10023 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
10024 /* Enable some hw fixes. */
10025 val
= tr32(TG3PCI_MSI_DATA
);
10026 val
|= (1 << 26) | (1 << 28) | (1 << 29);
10027 tw32(TG3PCI_MSI_DATA
, val
);
10030 /* Descriptor ring init may make accesses to the
10031 * NIC SRAM area to setup the TX descriptors, so we
10032 * can only do this after the hardware has been
10033 * successfully reset.
10035 err
= tg3_init_rings(tp
);
10039 if (tg3_flag(tp
, 57765_PLUS
)) {
10040 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10041 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10042 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10043 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10044 if (!tg3_flag(tp
, 57765_CLASS
) &&
10045 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10046 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10047 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10048 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10049 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10050 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10051 /* This value is determined during the probe time DMA
10052 * engine test, tg3_test_dma.
10054 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10057 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10058 GRC_MODE_4X_NIC_SEND_RINGS
|
10059 GRC_MODE_NO_TX_PHDR_CSUM
|
10060 GRC_MODE_NO_RX_PHDR_CSUM
);
10061 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10063 /* Pseudo-header checksum is done by hardware logic and not
10064 * the offload processers, so make the chip do the pseudo-
10065 * header checksums on receive. For transmit it is more
10066 * convenient to do the pseudo-header checksum in software
10067 * as Linux does that on transmit for us in all cases.
10069 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10071 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10073 tw32(TG3_RX_PTP_CTL
,
10074 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10076 if (tg3_flag(tp
, PTP_CAPABLE
))
10077 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10079 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10081 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10082 * south bridge limitation. As a workaround, Driver is setting MRRS
10083 * to 2048 instead of default 4096.
10085 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10086 tp
->pdev
->subsystem_device
== TG3PCI_SUBDEVICE_ID_DELL_5762
) {
10087 val
= tr32(TG3PCI_DEV_STATUS_CTRL
) & ~MAX_READ_REQ_MASK
;
10088 tw32(TG3PCI_DEV_STATUS_CTRL
, val
| MAX_READ_REQ_SIZE_2048
);
10091 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10092 val
= tr32(GRC_MISC_CFG
);
10094 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10095 tw32(GRC_MISC_CFG
, val
);
10097 /* Initialize MBUF/DESC pool. */
10098 if (tg3_flag(tp
, 5750_PLUS
)) {
10100 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10101 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10102 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10103 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10105 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10106 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10107 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10108 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10111 fw_len
= tp
->fw_len
;
10112 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10113 tw32(BUFMGR_MB_POOL_ADDR
,
10114 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10115 tw32(BUFMGR_MB_POOL_SIZE
,
10116 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10119 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10120 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10121 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10122 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10123 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10124 tw32(BUFMGR_MB_HIGH_WATER
,
10125 tp
->bufmgr_config
.mbuf_high_water
);
10127 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10128 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10129 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10130 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10131 tw32(BUFMGR_MB_HIGH_WATER
,
10132 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10134 tw32(BUFMGR_DMA_LOW_WATER
,
10135 tp
->bufmgr_config
.dma_low_water
);
10136 tw32(BUFMGR_DMA_HIGH_WATER
,
10137 tp
->bufmgr_config
.dma_high_water
);
10139 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10140 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10141 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10142 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10143 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10144 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10145 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10146 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10147 tw32(BUFMGR_MODE
, val
);
10148 for (i
= 0; i
< 2000; i
++) {
10149 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10154 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10158 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10159 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10161 tg3_setup_rxbd_thresholds(tp
);
10163 /* Initialize TG3_BDINFO's at:
10164 * RCVDBDI_STD_BD: standard eth size rx ring
10165 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10166 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10169 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10170 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10171 * ring attribute flags
10172 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10174 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10175 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10177 * The size of each ring is fixed in the firmware, but the location is
10180 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10181 ((u64
) tpr
->rx_std_mapping
>> 32));
10182 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10183 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10184 if (!tg3_flag(tp
, 5717_PLUS
))
10185 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10186 NIC_SRAM_RX_BUFFER_DESC
);
10188 /* Disable the mini ring */
10189 if (!tg3_flag(tp
, 5705_PLUS
))
10190 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10191 BDINFO_FLAGS_DISABLED
);
10193 /* Program the jumbo buffer descriptor ring control
10194 * blocks on those devices that have them.
10196 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10197 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10199 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10200 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10201 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10202 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10203 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10204 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10205 BDINFO_FLAGS_MAXLEN_SHIFT
;
10206 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10207 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10208 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10209 tg3_flag(tp
, 57765_CLASS
) ||
10210 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10211 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10212 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10214 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10215 BDINFO_FLAGS_DISABLED
);
10218 if (tg3_flag(tp
, 57765_PLUS
)) {
10219 val
= TG3_RX_STD_RING_SIZE(tp
);
10220 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10221 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10223 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10225 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10227 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10229 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10230 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10232 tpr
->rx_jmb_prod_idx
=
10233 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10234 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10236 tg3_rings_reset(tp
);
10238 /* Initialize MAC address and backoff seed. */
10239 __tg3_set_mac_addr(tp
, false);
10241 /* MTU + ethernet header + FCS + optional VLAN tag */
10242 tw32(MAC_RX_MTU_SIZE
,
10243 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10245 /* The slot time is changed by tg3_setup_phy if we
10246 * run at gigabit with half duplex.
10248 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10249 (6 << TX_LENGTHS_IPG_SHIFT
) |
10250 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10252 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10253 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10254 val
|= tr32(MAC_TX_LENGTHS
) &
10255 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10256 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10258 tw32(MAC_TX_LENGTHS
, val
);
10260 /* Receive rules. */
10261 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10262 tw32(RCVLPC_CONFIG
, 0x0181);
10264 /* Calculate RDMAC_MODE setting early, we need it to determine
10265 * the RCVLPC_STATE_ENABLE mask.
10267 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10268 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10269 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10270 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10271 RDMAC_MODE_LNGREAD_ENAB
);
10273 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10274 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10276 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10277 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10278 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10279 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10280 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10281 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10283 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10284 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10285 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10286 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10287 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10288 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10289 !tg3_flag(tp
, IS_5788
)) {
10290 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10294 if (tg3_flag(tp
, PCI_EXPRESS
))
10295 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10297 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10299 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10300 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10301 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10305 if (tg3_flag(tp
, HW_TSO_1
) ||
10306 tg3_flag(tp
, HW_TSO_2
) ||
10307 tg3_flag(tp
, HW_TSO_3
))
10308 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10310 if (tg3_flag(tp
, 57765_PLUS
) ||
10311 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10312 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10313 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10315 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10316 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10317 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10319 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10320 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10321 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10322 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10323 tg3_flag(tp
, 57765_PLUS
)) {
10326 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10327 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10329 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10331 val
= tr32(tgtreg
);
10332 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10333 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10334 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10335 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10336 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10337 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10338 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10339 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10341 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10344 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10345 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10346 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10349 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10350 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10352 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10354 val
= tr32(tgtreg
);
10356 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10357 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10360 /* Receive/send statistics. */
10361 if (tg3_flag(tp
, 5750_PLUS
)) {
10362 val
= tr32(RCVLPC_STATS_ENABLE
);
10363 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10364 tw32(RCVLPC_STATS_ENABLE
, val
);
10365 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10366 tg3_flag(tp
, TSO_CAPABLE
)) {
10367 val
= tr32(RCVLPC_STATS_ENABLE
);
10368 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10369 tw32(RCVLPC_STATS_ENABLE
, val
);
10371 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10373 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10374 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10375 tw32(SNDDATAI_STATSCTRL
,
10376 (SNDDATAI_SCTRL_ENABLE
|
10377 SNDDATAI_SCTRL_FASTUPD
));
10379 /* Setup host coalescing engine. */
10380 tw32(HOSTCC_MODE
, 0);
10381 for (i
= 0; i
< 2000; i
++) {
10382 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10387 __tg3_set_coalesce(tp
, &tp
->coal
);
10389 if (!tg3_flag(tp
, 5705_PLUS
)) {
10390 /* Status/statistics block address. See tg3_timer,
10391 * the tg3_periodic_fetch_stats call there, and
10392 * tg3_get_stats to see how this works for 5705/5750 chips.
10394 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10395 ((u64
) tp
->stats_mapping
>> 32));
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10397 ((u64
) tp
->stats_mapping
& 0xffffffff));
10398 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10400 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10402 /* Clear statistics and status block memory areas */
10403 for (i
= NIC_SRAM_STATS_BLK
;
10404 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10405 i
+= sizeof(u32
)) {
10406 tg3_write_mem(tp
, i
, 0);
10411 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10413 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10414 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10415 if (!tg3_flag(tp
, 5705_PLUS
))
10416 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10418 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10419 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10420 /* reset to prevent losing 1st rx packet intermittently */
10421 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10425 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10426 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10427 MAC_MODE_FHDE_ENABLE
;
10428 if (tg3_flag(tp
, ENABLE_APE
))
10429 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10430 if (!tg3_flag(tp
, 5705_PLUS
) &&
10431 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10432 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10433 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10434 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10437 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10438 * If TG3_FLAG_IS_NIC is zero, we should read the
10439 * register to preserve the GPIO settings for LOMs. The GPIOs,
10440 * whether used as inputs or outputs, are set by boot code after
10443 if (!tg3_flag(tp
, IS_NIC
)) {
10446 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10447 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10448 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10450 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10451 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10452 GRC_LCLCTRL_GPIO_OUTPUT3
;
10454 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10455 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10457 tp
->grc_local_ctrl
&= ~gpio_mask
;
10458 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10460 /* GPIO1 must be driven high for eeprom write protect */
10461 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10462 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10463 GRC_LCLCTRL_GPIO_OUTPUT1
);
10465 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10468 if (tg3_flag(tp
, USING_MSIX
)) {
10469 val
= tr32(MSGINT_MODE
);
10470 val
|= MSGINT_MODE_ENABLE
;
10471 if (tp
->irq_cnt
> 1)
10472 val
|= MSGINT_MODE_MULTIVEC_EN
;
10473 if (!tg3_flag(tp
, 1SHOT_MSI
))
10474 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10475 tw32(MSGINT_MODE
, val
);
10478 if (!tg3_flag(tp
, 5705_PLUS
)) {
10479 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10483 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10484 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10485 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10486 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10487 WDMAC_MODE_LNGREAD_ENAB
);
10489 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10490 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10491 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10492 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10493 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10495 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10496 !tg3_flag(tp
, IS_5788
)) {
10497 val
|= WDMAC_MODE_RX_ACCEL
;
10501 /* Enable host coalescing bug fix */
10502 if (tg3_flag(tp
, 5755_PLUS
))
10503 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10505 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10506 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10508 tw32_f(WDMAC_MODE
, val
);
10511 if (tg3_flag(tp
, PCIX_MODE
)) {
10514 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10516 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10517 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10518 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10519 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10520 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10521 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10523 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10527 tw32_f(RDMAC_MODE
, rdmac_mode
);
10530 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10531 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10532 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10533 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10536 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10537 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10538 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10539 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10540 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10544 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10545 if (!tg3_flag(tp
, 5705_PLUS
))
10546 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10548 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10549 tw32(SNDDATAC_MODE
,
10550 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10552 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10554 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10555 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10556 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10557 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10558 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10559 tw32(RCVDBDI_MODE
, val
);
10560 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10561 if (tg3_flag(tp
, HW_TSO_1
) ||
10562 tg3_flag(tp
, HW_TSO_2
) ||
10563 tg3_flag(tp
, HW_TSO_3
))
10564 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10565 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10566 if (tg3_flag(tp
, ENABLE_TSS
))
10567 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10568 tw32(SNDBDI_MODE
, val
);
10569 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10571 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10572 err
= tg3_load_5701_a0_firmware_fix(tp
);
10577 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10578 /* Ignore any errors for the firmware download. If download
10579 * fails, the device will operate with EEE disabled
10581 tg3_load_57766_firmware(tp
);
10584 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10585 err
= tg3_load_tso_firmware(tp
);
10590 tp
->tx_mode
= TX_MODE_ENABLE
;
10592 if (tg3_flag(tp
, 5755_PLUS
) ||
10593 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10594 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10596 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10597 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10598 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10599 tp
->tx_mode
&= ~val
;
10600 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10603 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10606 if (tg3_flag(tp
, ENABLE_RSS
)) {
10609 tg3_rss_write_indir_tbl(tp
);
10611 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10613 for (i
= 0; i
< 10 ; i
++)
10614 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10617 tp
->rx_mode
= RX_MODE_ENABLE
;
10618 if (tg3_flag(tp
, 5755_PLUS
))
10619 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10621 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10622 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10624 if (tg3_flag(tp
, ENABLE_RSS
))
10625 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10626 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10627 RX_MODE_RSS_IPV6_HASH_EN
|
10628 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10629 RX_MODE_RSS_IPV4_HASH_EN
|
10630 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10632 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10635 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10637 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10638 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10639 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10642 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10645 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10646 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10647 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10648 /* Set drive transmission level to 1.2V */
10649 /* only if the signal pre-emphasis bit is not set */
10650 val
= tr32(MAC_SERDES_CFG
);
10653 tw32(MAC_SERDES_CFG
, val
);
10655 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10656 tw32(MAC_SERDES_CFG
, 0x616000);
10659 /* Prevent chip from dropping frames when flow control
10662 if (tg3_flag(tp
, 57765_CLASS
))
10666 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10668 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10669 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10670 /* Use hardware link auto-negotiation */
10671 tg3_flag_set(tp
, HW_AUTONEG
);
10674 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10675 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10678 tmp
= tr32(SERDES_RX_CTRL
);
10679 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10680 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10681 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10682 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10685 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10686 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10687 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10689 err
= tg3_setup_phy(tp
, false);
10693 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10694 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10697 /* Clear CRC stats. */
10698 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10699 tg3_writephy(tp
, MII_TG3_TEST1
,
10700 tmp
| MII_TG3_TEST1_CRC_EN
);
10701 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10706 __tg3_set_rx_mode(tp
->dev
);
10708 /* Initialize receive rules. */
10709 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10710 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10711 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10712 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10714 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10718 if (tg3_flag(tp
, ENABLE_ASF
))
10722 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10725 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10728 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10731 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10734 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10737 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10740 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10743 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10746 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10749 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10752 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10755 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10758 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10760 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10768 if (tg3_flag(tp
, ENABLE_APE
))
10769 /* Write our heartbeat update interval to APE. */
10770 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10771 APE_HOST_HEARTBEAT_INT_5SEC
);
10773 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10778 /* Called at device open time to get the chip ready for
10779 * packet processing. Invoked with tp->lock held.
10781 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10783 /* Chip may have been just powered on. If so, the boot code may still
10784 * be running initialization. Wait for it to finish to avoid races in
10785 * accessing the hardware.
10787 tg3_enable_register_access(tp
);
10790 tg3_switch_clocks(tp
);
10792 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10794 return tg3_reset_hw(tp
, reset_phy
);
10797 #ifdef CONFIG_TIGON3_HWMON
10798 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10802 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10803 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10805 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10808 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10809 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10810 memset(ocir
, 0, TG3_OCIR_LEN
);
10814 /* sysfs attributes for hwmon */
10815 static ssize_t
tg3_show_temp(struct device
*dev
,
10816 struct device_attribute
*devattr
, char *buf
)
10818 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10819 struct tg3
*tp
= dev_get_drvdata(dev
);
10822 spin_lock_bh(&tp
->lock
);
10823 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10824 sizeof(temperature
));
10825 spin_unlock_bh(&tp
->lock
);
10826 return sprintf(buf
, "%u\n", temperature
* 1000);
10830 static SENSOR_DEVICE_ATTR(temp1_input
, 0444, tg3_show_temp
, NULL
,
10831 TG3_TEMP_SENSOR_OFFSET
);
10832 static SENSOR_DEVICE_ATTR(temp1_crit
, 0444, tg3_show_temp
, NULL
,
10833 TG3_TEMP_CAUTION_OFFSET
);
10834 static SENSOR_DEVICE_ATTR(temp1_max
, 0444, tg3_show_temp
, NULL
,
10835 TG3_TEMP_MAX_OFFSET
);
10837 static struct attribute
*tg3_attrs
[] = {
10838 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10839 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10840 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10843 ATTRIBUTE_GROUPS(tg3
);
10845 static void tg3_hwmon_close(struct tg3
*tp
)
10847 if (tp
->hwmon_dev
) {
10848 hwmon_device_unregister(tp
->hwmon_dev
);
10849 tp
->hwmon_dev
= NULL
;
10853 static void tg3_hwmon_open(struct tg3
*tp
)
10857 struct pci_dev
*pdev
= tp
->pdev
;
10858 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10860 tg3_sd_scan_scratchpad(tp
, ocirs
);
10862 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10863 if (!ocirs
[i
].src_data_length
)
10866 size
+= ocirs
[i
].src_hdr_length
;
10867 size
+= ocirs
[i
].src_data_length
;
10873 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10875 if (IS_ERR(tp
->hwmon_dev
)) {
10876 tp
->hwmon_dev
= NULL
;
10877 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10881 static inline void tg3_hwmon_close(struct tg3
*tp
) { }
10882 static inline void tg3_hwmon_open(struct tg3
*tp
) { }
10883 #endif /* CONFIG_TIGON3_HWMON */
10886 #define TG3_STAT_ADD32(PSTAT, REG) \
10887 do { u32 __val = tr32(REG); \
10888 (PSTAT)->low += __val; \
10889 if ((PSTAT)->low < __val) \
10890 (PSTAT)->high += 1; \
10893 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10895 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10900 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10901 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10902 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10903 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10904 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10905 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10906 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10907 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10908 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10909 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10910 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10911 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10912 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10913 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10914 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10915 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10918 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10919 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10920 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10921 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10924 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10925 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10926 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10927 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10928 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10929 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10930 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10931 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10932 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10933 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10934 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10935 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10936 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10937 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10939 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10940 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10941 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10942 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10943 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10944 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10946 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10947 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10949 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10950 sp
->rx_discards
.low
+= val
;
10951 if (sp
->rx_discards
.low
< val
)
10952 sp
->rx_discards
.high
+= 1;
10954 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10956 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10959 static void tg3_chk_missed_msi(struct tg3
*tp
)
10963 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10964 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10966 if (tg3_has_work(tnapi
)) {
10967 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10968 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10969 if (tnapi
->chk_msi_cnt
< 1) {
10970 tnapi
->chk_msi_cnt
++;
10976 tnapi
->chk_msi_cnt
= 0;
10977 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10978 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10982 static void tg3_timer(struct timer_list
*t
)
10984 struct tg3
*tp
= from_timer(tp
, t
, timer
);
10986 spin_lock(&tp
->lock
);
10988 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10989 spin_unlock(&tp
->lock
);
10990 goto restart_timer
;
10993 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10994 tg3_flag(tp
, 57765_CLASS
))
10995 tg3_chk_missed_msi(tp
);
10997 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10998 /* BCM4785: Flush posted writes from GbE to host memory. */
11002 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
11003 /* All of this garbage is because when using non-tagged
11004 * IRQ status the mailbox/status_block protocol the chip
11005 * uses with the cpu is race prone.
11007 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
11008 tw32(GRC_LOCAL_CTRL
,
11009 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
11011 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
11012 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
11015 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11016 spin_unlock(&tp
->lock
);
11017 tg3_reset_task_schedule(tp
);
11018 goto restart_timer
;
11022 /* This part only runs once per second. */
11023 if (!--tp
->timer_counter
) {
11024 if (tg3_flag(tp
, 5705_PLUS
))
11025 tg3_periodic_fetch_stats(tp
);
11027 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
11028 tg3_phy_eee_enable(tp
);
11030 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
11034 mac_stat
= tr32(MAC_STATUS
);
11037 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
11038 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
11040 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
11044 tg3_setup_phy(tp
, false);
11045 } else if (tg3_flag(tp
, POLL_SERDES
)) {
11046 u32 mac_stat
= tr32(MAC_STATUS
);
11047 int need_setup
= 0;
11050 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
11053 if (!tp
->link_up
&&
11054 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11055 MAC_STATUS_SIGNAL_DET
))) {
11059 if (!tp
->serdes_counter
) {
11062 ~MAC_MODE_PORT_MODE_MASK
));
11064 tw32_f(MAC_MODE
, tp
->mac_mode
);
11067 tg3_setup_phy(tp
, false);
11069 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11070 tg3_flag(tp
, 5780_CLASS
)) {
11071 tg3_serdes_parallel_detect(tp
);
11072 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11073 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11074 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11075 TG3_CPMU_STATUS_LINK_MASK
);
11077 if (link_up
!= tp
->link_up
)
11078 tg3_setup_phy(tp
, false);
11081 tp
->timer_counter
= tp
->timer_multiplier
;
11084 /* Heartbeat is only sent once every 2 seconds.
11086 * The heartbeat is to tell the ASF firmware that the host
11087 * driver is still alive. In the event that the OS crashes,
11088 * ASF needs to reset the hardware to free up the FIFO space
11089 * that may be filled with rx packets destined for the host.
11090 * If the FIFO is full, ASF will no longer function properly.
11092 * Unintended resets have been reported on real time kernels
11093 * where the timer doesn't run on time. Netpoll will also have
11096 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11097 * to check the ring condition when the heartbeat is expiring
11098 * before doing the reset. This will prevent most unintended
11101 if (!--tp
->asf_counter
) {
11102 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11103 tg3_wait_for_event_ack(tp
);
11105 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11106 FWCMD_NICDRV_ALIVE3
);
11107 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11108 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11109 TG3_FW_UPDATE_TIMEOUT_SEC
);
11111 tg3_generate_fw_event(tp
);
11113 tp
->asf_counter
= tp
->asf_multiplier
;
11116 /* Update the APE heartbeat every 5 seconds.*/
11117 tg3_send_ape_heartbeat(tp
, TG3_APE_HB_INTERVAL
);
11119 spin_unlock(&tp
->lock
);
11122 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11123 add_timer(&tp
->timer
);
11126 static void tg3_timer_init(struct tg3
*tp
)
11128 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11129 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11130 !tg3_flag(tp
, 57765_CLASS
))
11131 tp
->timer_offset
= HZ
;
11133 tp
->timer_offset
= HZ
/ 10;
11135 BUG_ON(tp
->timer_offset
> HZ
);
11137 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11138 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11139 TG3_FW_UPDATE_FREQ_SEC
;
11141 timer_setup(&tp
->timer
, tg3_timer
, 0);
11144 static void tg3_timer_start(struct tg3
*tp
)
11146 tp
->asf_counter
= tp
->asf_multiplier
;
11147 tp
->timer_counter
= tp
->timer_multiplier
;
11149 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11150 add_timer(&tp
->timer
);
11153 static void tg3_timer_stop(struct tg3
*tp
)
11155 del_timer_sync(&tp
->timer
);
11158 /* Restart hardware after configuration changes, self-test, etc.
11159 * Invoked with tp->lock held.
11161 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11162 __releases(tp
->lock
)
11163 __acquires(tp
->lock
)
11167 err
= tg3_init_hw(tp
, reset_phy
);
11169 netdev_err(tp
->dev
,
11170 "Failed to re-initialize device, aborting\n");
11171 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11172 tg3_full_unlock(tp
);
11173 tg3_timer_stop(tp
);
11175 tg3_napi_enable(tp
);
11176 dev_close(tp
->dev
);
11177 tg3_full_lock(tp
, 0);
11182 static void tg3_reset_task(struct work_struct
*work
)
11184 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11188 tg3_full_lock(tp
, 0);
11190 if (!netif_running(tp
->dev
)) {
11191 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11192 tg3_full_unlock(tp
);
11197 tg3_full_unlock(tp
);
11201 tg3_netif_stop(tp
);
11203 tg3_full_lock(tp
, 1);
11205 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11206 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11207 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11208 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11209 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11212 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11213 err
= tg3_init_hw(tp
, true);
11217 tg3_netif_start(tp
);
11220 tg3_full_unlock(tp
);
11225 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11229 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11232 unsigned long flags
;
11234 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11236 if (tp
->irq_cnt
== 1)
11237 name
= tp
->dev
->name
;
11239 name
= &tnapi
->irq_lbl
[0];
11240 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11241 snprintf(name
, IFNAMSIZ
,
11242 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11243 else if (tnapi
->tx_buffers
)
11244 snprintf(name
, IFNAMSIZ
,
11245 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11246 else if (tnapi
->rx_rcb
)
11247 snprintf(name
, IFNAMSIZ
,
11248 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11250 snprintf(name
, IFNAMSIZ
,
11251 "%s-%d", tp
->dev
->name
, irq_num
);
11252 name
[IFNAMSIZ
-1] = 0;
11255 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11257 if (tg3_flag(tp
, 1SHOT_MSI
))
11258 fn
= tg3_msi_1shot
;
11261 fn
= tg3_interrupt
;
11262 if (tg3_flag(tp
, TAGGED_STATUS
))
11263 fn
= tg3_interrupt_tagged
;
11264 flags
= IRQF_SHARED
;
11267 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11270 static int tg3_test_interrupt(struct tg3
*tp
)
11272 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11273 struct net_device
*dev
= tp
->dev
;
11274 int err
, i
, intr_ok
= 0;
11277 if (!netif_running(dev
))
11280 tg3_disable_ints(tp
);
11282 free_irq(tnapi
->irq_vec
, tnapi
);
11285 * Turn off MSI one shot mode. Otherwise this test has no
11286 * observable way to know whether the interrupt was delivered.
11288 if (tg3_flag(tp
, 57765_PLUS
)) {
11289 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11290 tw32(MSGINT_MODE
, val
);
11293 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11294 IRQF_SHARED
, dev
->name
, tnapi
);
11298 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11299 tg3_enable_ints(tp
);
11301 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11304 for (i
= 0; i
< 5; i
++) {
11305 u32 int_mbox
, misc_host_ctrl
;
11307 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11308 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11310 if ((int_mbox
!= 0) ||
11311 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11316 if (tg3_flag(tp
, 57765_PLUS
) &&
11317 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11318 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11323 tg3_disable_ints(tp
);
11325 free_irq(tnapi
->irq_vec
, tnapi
);
11327 err
= tg3_request_irq(tp
, 0);
11333 /* Reenable MSI one shot mode. */
11334 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11335 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11336 tw32(MSGINT_MODE
, val
);
11344 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11345 * successfully restored
11347 static int tg3_test_msi(struct tg3
*tp
)
11352 if (!tg3_flag(tp
, USING_MSI
))
11355 /* Turn off SERR reporting in case MSI terminates with Master
11358 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11359 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11360 pci_cmd
& ~PCI_COMMAND_SERR
);
11362 err
= tg3_test_interrupt(tp
);
11364 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11369 /* other failures */
11373 /* MSI test failed, go back to INTx mode */
11374 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11375 "to INTx mode. Please report this failure to the PCI "
11376 "maintainer and include system chipset information\n");
11378 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11380 pci_disable_msi(tp
->pdev
);
11382 tg3_flag_clear(tp
, USING_MSI
);
11383 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11385 err
= tg3_request_irq(tp
, 0);
11389 /* Need to reset the chip because the MSI cycle may have terminated
11390 * with Master Abort.
11392 tg3_full_lock(tp
, 1);
11394 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11395 err
= tg3_init_hw(tp
, true);
11397 tg3_full_unlock(tp
);
11400 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11405 static int tg3_request_firmware(struct tg3
*tp
)
11407 const struct tg3_firmware_hdr
*fw_hdr
;
11409 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11410 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11415 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11417 /* Firmware blob starts with version numbers, followed by
11418 * start address and _full_ length including BSS sections
11419 * (which must be longer than the actual data, of course
11422 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11423 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11424 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11425 tp
->fw_len
, tp
->fw_needed
);
11426 release_firmware(tp
->fw
);
11431 /* We no longer need firmware; we have it. */
11432 tp
->fw_needed
= NULL
;
11436 static u32
tg3_irq_count(struct tg3
*tp
)
11438 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11441 /* We want as many rx rings enabled as there are cpus.
11442 * In multiqueue MSI-X mode, the first MSI-X vector
11443 * only deals with link interrupts, etc, so we add
11444 * one to the number of vectors we are requesting.
11446 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11452 static bool tg3_enable_msix(struct tg3
*tp
)
11455 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11457 tp
->txq_cnt
= tp
->txq_req
;
11458 tp
->rxq_cnt
= tp
->rxq_req
;
11460 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11461 if (tp
->rxq_cnt
> tp
->rxq_max
)
11462 tp
->rxq_cnt
= tp
->rxq_max
;
11464 /* Disable multiple TX rings by default. Simple round-robin hardware
11465 * scheduling of the TX rings can cause starvation of rings with
11466 * small packets when other rings have TSO or jumbo packets.
11471 tp
->irq_cnt
= tg3_irq_count(tp
);
11473 for (i
= 0; i
< tp
->irq_max
; i
++) {
11474 msix_ent
[i
].entry
= i
;
11475 msix_ent
[i
].vector
= 0;
11478 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11481 } else if (rc
< tp
->irq_cnt
) {
11482 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11485 tp
->rxq_cnt
= max(rc
- 1, 1);
11487 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11490 for (i
= 0; i
< tp
->irq_max
; i
++)
11491 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11493 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11494 pci_disable_msix(tp
->pdev
);
11498 if (tp
->irq_cnt
== 1)
11501 tg3_flag_set(tp
, ENABLE_RSS
);
11503 if (tp
->txq_cnt
> 1)
11504 tg3_flag_set(tp
, ENABLE_TSS
);
11506 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11511 static void tg3_ints_init(struct tg3
*tp
)
11513 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11514 !tg3_flag(tp
, TAGGED_STATUS
)) {
11515 /* All MSI supporting chips should support tagged
11516 * status. Assert that this is the case.
11518 netdev_warn(tp
->dev
,
11519 "MSI without TAGGED_STATUS? Not using MSI\n");
11523 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11524 tg3_flag_set(tp
, USING_MSIX
);
11525 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11526 tg3_flag_set(tp
, USING_MSI
);
11528 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11529 u32 msi_mode
= tr32(MSGINT_MODE
);
11530 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11531 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11532 if (!tg3_flag(tp
, 1SHOT_MSI
))
11533 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11534 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11537 if (!tg3_flag(tp
, USING_MSIX
)) {
11539 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11542 if (tp
->irq_cnt
== 1) {
11545 netif_set_real_num_tx_queues(tp
->dev
, 1);
11546 netif_set_real_num_rx_queues(tp
->dev
, 1);
11550 static void tg3_ints_fini(struct tg3
*tp
)
11552 if (tg3_flag(tp
, USING_MSIX
))
11553 pci_disable_msix(tp
->pdev
);
11554 else if (tg3_flag(tp
, USING_MSI
))
11555 pci_disable_msi(tp
->pdev
);
11556 tg3_flag_clear(tp
, USING_MSI
);
11557 tg3_flag_clear(tp
, USING_MSIX
);
11558 tg3_flag_clear(tp
, ENABLE_RSS
);
11559 tg3_flag_clear(tp
, ENABLE_TSS
);
11562 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11565 struct net_device
*dev
= tp
->dev
;
11569 * Setup interrupts first so we know how
11570 * many NAPI resources to allocate
11574 tg3_rss_check_indir_tbl(tp
);
11576 /* The placement of this call is tied
11577 * to the setup and use of Host TX descriptors.
11579 err
= tg3_alloc_consistent(tp
);
11581 goto out_ints_fini
;
11585 tg3_napi_enable(tp
);
11587 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11588 err
= tg3_request_irq(tp
, i
);
11590 for (i
--; i
>= 0; i
--) {
11591 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11593 free_irq(tnapi
->irq_vec
, tnapi
);
11595 goto out_napi_fini
;
11599 tg3_full_lock(tp
, 0);
11602 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11604 err
= tg3_init_hw(tp
, reset_phy
);
11606 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11607 tg3_free_rings(tp
);
11610 tg3_full_unlock(tp
);
11615 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11616 err
= tg3_test_msi(tp
);
11619 tg3_full_lock(tp
, 0);
11620 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11621 tg3_free_rings(tp
);
11622 tg3_full_unlock(tp
);
11624 goto out_napi_fini
;
11627 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11628 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11630 tw32(PCIE_TRANSACTION_CFG
,
11631 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11637 tg3_hwmon_open(tp
);
11639 tg3_full_lock(tp
, 0);
11641 tg3_timer_start(tp
);
11642 tg3_flag_set(tp
, INIT_COMPLETE
);
11643 tg3_enable_ints(tp
);
11645 tg3_ptp_resume(tp
);
11647 tg3_full_unlock(tp
);
11649 netif_tx_start_all_queues(dev
);
11652 * Reset loopback feature if it was turned on while the device was down
11653 * make sure that it's installed properly now.
11655 if (dev
->features
& NETIF_F_LOOPBACK
)
11656 tg3_set_loopback(dev
, dev
->features
);
11661 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11662 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11663 free_irq(tnapi
->irq_vec
, tnapi
);
11667 tg3_napi_disable(tp
);
11669 tg3_free_consistent(tp
);
11677 static void tg3_stop(struct tg3
*tp
)
11681 tg3_reset_task_cancel(tp
);
11682 tg3_netif_stop(tp
);
11684 tg3_timer_stop(tp
);
11686 tg3_hwmon_close(tp
);
11690 tg3_full_lock(tp
, 1);
11692 tg3_disable_ints(tp
);
11694 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11695 tg3_free_rings(tp
);
11696 tg3_flag_clear(tp
, INIT_COMPLETE
);
11698 tg3_full_unlock(tp
);
11700 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11701 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11702 free_irq(tnapi
->irq_vec
, tnapi
);
11709 tg3_free_consistent(tp
);
11712 static int tg3_open(struct net_device
*dev
)
11714 struct tg3
*tp
= netdev_priv(dev
);
11717 if (tp
->pcierr_recovery
) {
11718 netdev_err(dev
, "Failed to open device. PCI error recovery "
11723 if (tp
->fw_needed
) {
11724 err
= tg3_request_firmware(tp
);
11725 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11727 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11728 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11729 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11730 netdev_warn(tp
->dev
, "EEE capability restored\n");
11731 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11733 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11737 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11738 tg3_flag_clear(tp
, TSO_CAPABLE
);
11739 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11740 netdev_notice(tp
->dev
, "TSO capability restored\n");
11741 tg3_flag_set(tp
, TSO_CAPABLE
);
11745 tg3_carrier_off(tp
);
11747 err
= tg3_power_up(tp
);
11751 tg3_full_lock(tp
, 0);
11753 tg3_disable_ints(tp
);
11754 tg3_flag_clear(tp
, INIT_COMPLETE
);
11756 tg3_full_unlock(tp
);
11758 err
= tg3_start(tp
,
11759 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11762 tg3_frob_aux_power(tp
, false);
11763 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11769 static int tg3_close(struct net_device
*dev
)
11771 struct tg3
*tp
= netdev_priv(dev
);
11773 if (tp
->pcierr_recovery
) {
11774 netdev_err(dev
, "Failed to close device. PCI error recovery "
11781 if (pci_device_is_present(tp
->pdev
)) {
11782 tg3_power_down_prepare(tp
);
11784 tg3_carrier_off(tp
);
11789 static inline u64
get_stat64(tg3_stat64_t
*val
)
11791 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11794 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11796 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11798 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11799 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11800 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11803 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11804 tg3_writephy(tp
, MII_TG3_TEST1
,
11805 val
| MII_TG3_TEST1_CRC_EN
);
11806 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11810 tp
->phy_crc_errors
+= val
;
11812 return tp
->phy_crc_errors
;
11815 return get_stat64(&hw_stats
->rx_fcs_errors
);
11818 #define ESTAT_ADD(member) \
11819 estats->member = old_estats->member + \
11820 get_stat64(&hw_stats->member)
11822 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11824 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11825 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11827 ESTAT_ADD(rx_octets
);
11828 ESTAT_ADD(rx_fragments
);
11829 ESTAT_ADD(rx_ucast_packets
);
11830 ESTAT_ADD(rx_mcast_packets
);
11831 ESTAT_ADD(rx_bcast_packets
);
11832 ESTAT_ADD(rx_fcs_errors
);
11833 ESTAT_ADD(rx_align_errors
);
11834 ESTAT_ADD(rx_xon_pause_rcvd
);
11835 ESTAT_ADD(rx_xoff_pause_rcvd
);
11836 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11837 ESTAT_ADD(rx_xoff_entered
);
11838 ESTAT_ADD(rx_frame_too_long_errors
);
11839 ESTAT_ADD(rx_jabbers
);
11840 ESTAT_ADD(rx_undersize_packets
);
11841 ESTAT_ADD(rx_in_length_errors
);
11842 ESTAT_ADD(rx_out_length_errors
);
11843 ESTAT_ADD(rx_64_or_less_octet_packets
);
11844 ESTAT_ADD(rx_65_to_127_octet_packets
);
11845 ESTAT_ADD(rx_128_to_255_octet_packets
);
11846 ESTAT_ADD(rx_256_to_511_octet_packets
);
11847 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11848 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11849 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11850 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11851 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11852 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11854 ESTAT_ADD(tx_octets
);
11855 ESTAT_ADD(tx_collisions
);
11856 ESTAT_ADD(tx_xon_sent
);
11857 ESTAT_ADD(tx_xoff_sent
);
11858 ESTAT_ADD(tx_flow_control
);
11859 ESTAT_ADD(tx_mac_errors
);
11860 ESTAT_ADD(tx_single_collisions
);
11861 ESTAT_ADD(tx_mult_collisions
);
11862 ESTAT_ADD(tx_deferred
);
11863 ESTAT_ADD(tx_excessive_collisions
);
11864 ESTAT_ADD(tx_late_collisions
);
11865 ESTAT_ADD(tx_collide_2times
);
11866 ESTAT_ADD(tx_collide_3times
);
11867 ESTAT_ADD(tx_collide_4times
);
11868 ESTAT_ADD(tx_collide_5times
);
11869 ESTAT_ADD(tx_collide_6times
);
11870 ESTAT_ADD(tx_collide_7times
);
11871 ESTAT_ADD(tx_collide_8times
);
11872 ESTAT_ADD(tx_collide_9times
);
11873 ESTAT_ADD(tx_collide_10times
);
11874 ESTAT_ADD(tx_collide_11times
);
11875 ESTAT_ADD(tx_collide_12times
);
11876 ESTAT_ADD(tx_collide_13times
);
11877 ESTAT_ADD(tx_collide_14times
);
11878 ESTAT_ADD(tx_collide_15times
);
11879 ESTAT_ADD(tx_ucast_packets
);
11880 ESTAT_ADD(tx_mcast_packets
);
11881 ESTAT_ADD(tx_bcast_packets
);
11882 ESTAT_ADD(tx_carrier_sense_errors
);
11883 ESTAT_ADD(tx_discards
);
11884 ESTAT_ADD(tx_errors
);
11886 ESTAT_ADD(dma_writeq_full
);
11887 ESTAT_ADD(dma_write_prioq_full
);
11888 ESTAT_ADD(rxbds_empty
);
11889 ESTAT_ADD(rx_discards
);
11890 ESTAT_ADD(rx_errors
);
11891 ESTAT_ADD(rx_threshold_hit
);
11893 ESTAT_ADD(dma_readq_full
);
11894 ESTAT_ADD(dma_read_prioq_full
);
11895 ESTAT_ADD(tx_comp_queue_full
);
11897 ESTAT_ADD(ring_set_send_prod_index
);
11898 ESTAT_ADD(ring_status_update
);
11899 ESTAT_ADD(nic_irqs
);
11900 ESTAT_ADD(nic_avoided_irqs
);
11901 ESTAT_ADD(nic_tx_threshold_hit
);
11903 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11906 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11908 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11909 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11911 stats
->rx_packets
= old_stats
->rx_packets
+
11912 get_stat64(&hw_stats
->rx_ucast_packets
) +
11913 get_stat64(&hw_stats
->rx_mcast_packets
) +
11914 get_stat64(&hw_stats
->rx_bcast_packets
);
11916 stats
->tx_packets
= old_stats
->tx_packets
+
11917 get_stat64(&hw_stats
->tx_ucast_packets
) +
11918 get_stat64(&hw_stats
->tx_mcast_packets
) +
11919 get_stat64(&hw_stats
->tx_bcast_packets
);
11921 stats
->rx_bytes
= old_stats
->rx_bytes
+
11922 get_stat64(&hw_stats
->rx_octets
);
11923 stats
->tx_bytes
= old_stats
->tx_bytes
+
11924 get_stat64(&hw_stats
->tx_octets
);
11926 stats
->rx_errors
= old_stats
->rx_errors
+
11927 get_stat64(&hw_stats
->rx_errors
);
11928 stats
->tx_errors
= old_stats
->tx_errors
+
11929 get_stat64(&hw_stats
->tx_errors
) +
11930 get_stat64(&hw_stats
->tx_mac_errors
) +
11931 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11932 get_stat64(&hw_stats
->tx_discards
);
11934 stats
->multicast
= old_stats
->multicast
+
11935 get_stat64(&hw_stats
->rx_mcast_packets
);
11936 stats
->collisions
= old_stats
->collisions
+
11937 get_stat64(&hw_stats
->tx_collisions
);
11939 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11940 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11941 get_stat64(&hw_stats
->rx_undersize_packets
);
11943 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11944 get_stat64(&hw_stats
->rx_align_errors
);
11945 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11946 get_stat64(&hw_stats
->tx_discards
);
11947 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11948 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11950 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11951 tg3_calc_crc_errors(tp
);
11953 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11954 get_stat64(&hw_stats
->rx_discards
);
11956 stats
->rx_dropped
= tp
->rx_dropped
;
11957 stats
->tx_dropped
= tp
->tx_dropped
;
11960 static int tg3_get_regs_len(struct net_device
*dev
)
11962 return TG3_REG_BLK_SIZE
;
11965 static void tg3_get_regs(struct net_device
*dev
,
11966 struct ethtool_regs
*regs
, void *_p
)
11968 struct tg3
*tp
= netdev_priv(dev
);
11972 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11974 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11977 tg3_full_lock(tp
, 0);
11979 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11981 tg3_full_unlock(tp
);
11984 static int tg3_get_eeprom_len(struct net_device
*dev
)
11986 struct tg3
*tp
= netdev_priv(dev
);
11988 return tp
->nvram_size
;
11991 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11993 struct tg3
*tp
= netdev_priv(dev
);
11994 int ret
, cpmu_restore
= 0;
11996 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
11999 if (tg3_flag(tp
, NO_NVRAM
))
12002 offset
= eeprom
->offset
;
12006 eeprom
->magic
= TG3_EEPROM_MAGIC
;
12008 /* Override clock, link aware and link idle modes */
12009 if (tg3_flag(tp
, CPMU_PRESENT
)) {
12010 cpmu_val
= tr32(TG3_CPMU_CTRL
);
12011 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
12012 CPMU_CTRL_LINK_IDLE_MODE
)) {
12013 tw32(TG3_CPMU_CTRL
, cpmu_val
&
12014 ~(CPMU_CTRL_LINK_AWARE_MODE
|
12015 CPMU_CTRL_LINK_IDLE_MODE
));
12019 tg3_override_clk(tp
);
12022 /* adjustments to start on required 4 byte boundary */
12023 b_offset
= offset
& 3;
12024 b_count
= 4 - b_offset
;
12025 if (b_count
> len
) {
12026 /* i.e. offset=1 len=2 */
12029 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
12032 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
12035 eeprom
->len
+= b_count
;
12038 /* read bytes up to the last 4 byte boundary */
12039 pd
= &data
[eeprom
->len
];
12040 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
12041 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
12048 memcpy(pd
+ i
, &val
, 4);
12049 if (need_resched()) {
12050 if (signal_pending(current
)) {
12061 /* read last bytes not ending on 4 byte boundary */
12062 pd
= &data
[eeprom
->len
];
12064 b_offset
= offset
+ len
- b_count
;
12065 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12068 memcpy(pd
, &val
, b_count
);
12069 eeprom
->len
+= b_count
;
12074 /* Restore clock, link aware and link idle modes */
12075 tg3_restore_clk(tp
);
12077 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12082 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12084 struct tg3
*tp
= netdev_priv(dev
);
12086 u32 offset
, len
, b_offset
, odd_len
;
12088 __be32 start
= 0, end
;
12090 if (tg3_flag(tp
, NO_NVRAM
) ||
12091 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12094 offset
= eeprom
->offset
;
12097 if ((b_offset
= (offset
& 3))) {
12098 /* adjustments to start on required 4 byte boundary */
12099 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12110 /* adjustments to end on required 4 byte boundary */
12112 len
= (len
+ 3) & ~3;
12113 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12119 if (b_offset
|| odd_len
) {
12120 buf
= kmalloc(len
, GFP_KERNEL
);
12124 memcpy(buf
, &start
, 4);
12126 memcpy(buf
+len
-4, &end
, 4);
12127 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12130 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12138 static int tg3_get_link_ksettings(struct net_device
*dev
,
12139 struct ethtool_link_ksettings
*cmd
)
12141 struct tg3
*tp
= netdev_priv(dev
);
12142 u32 supported
, advertising
;
12144 if (tg3_flag(tp
, USE_PHYLIB
)) {
12145 struct phy_device
*phydev
;
12146 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12148 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12149 phy_ethtool_ksettings_get(phydev
, cmd
);
12154 supported
= (SUPPORTED_Autoneg
);
12156 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12157 supported
|= (SUPPORTED_1000baseT_Half
|
12158 SUPPORTED_1000baseT_Full
);
12160 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12161 supported
|= (SUPPORTED_100baseT_Half
|
12162 SUPPORTED_100baseT_Full
|
12163 SUPPORTED_10baseT_Half
|
12164 SUPPORTED_10baseT_Full
|
12166 cmd
->base
.port
= PORT_TP
;
12168 supported
|= SUPPORTED_FIBRE
;
12169 cmd
->base
.port
= PORT_FIBRE
;
12171 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12174 advertising
= tp
->link_config
.advertising
;
12175 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12176 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12177 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12178 advertising
|= ADVERTISED_Pause
;
12180 advertising
|= ADVERTISED_Pause
|
12181 ADVERTISED_Asym_Pause
;
12183 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12184 advertising
|= ADVERTISED_Asym_Pause
;
12187 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12190 if (netif_running(dev
) && tp
->link_up
) {
12191 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12192 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12193 ethtool_convert_legacy_u32_to_link_mode(
12194 cmd
->link_modes
.lp_advertising
,
12195 tp
->link_config
.rmt_adv
);
12197 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12198 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12199 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12201 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12204 cmd
->base
.speed
= SPEED_UNKNOWN
;
12205 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12206 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12208 cmd
->base
.phy_address
= tp
->phy_addr
;
12209 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12213 static int tg3_set_link_ksettings(struct net_device
*dev
,
12214 const struct ethtool_link_ksettings
*cmd
)
12216 struct tg3
*tp
= netdev_priv(dev
);
12217 u32 speed
= cmd
->base
.speed
;
12220 if (tg3_flag(tp
, USE_PHYLIB
)) {
12221 struct phy_device
*phydev
;
12222 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12224 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12225 return phy_ethtool_ksettings_set(phydev
, cmd
);
12228 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12229 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12232 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12233 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12234 cmd
->base
.duplex
!= DUPLEX_HALF
)
12237 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12238 cmd
->link_modes
.advertising
);
12240 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12241 u32 mask
= ADVERTISED_Autoneg
|
12243 ADVERTISED_Asym_Pause
;
12245 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12246 mask
|= ADVERTISED_1000baseT_Half
|
12247 ADVERTISED_1000baseT_Full
;
12249 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12250 mask
|= ADVERTISED_100baseT_Half
|
12251 ADVERTISED_100baseT_Full
|
12252 ADVERTISED_10baseT_Half
|
12253 ADVERTISED_10baseT_Full
|
12256 mask
|= ADVERTISED_FIBRE
;
12258 if (advertising
& ~mask
)
12261 mask
&= (ADVERTISED_1000baseT_Half
|
12262 ADVERTISED_1000baseT_Full
|
12263 ADVERTISED_100baseT_Half
|
12264 ADVERTISED_100baseT_Full
|
12265 ADVERTISED_10baseT_Half
|
12266 ADVERTISED_10baseT_Full
);
12268 advertising
&= mask
;
12270 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12271 if (speed
!= SPEED_1000
)
12274 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12277 if (speed
!= SPEED_100
&&
12283 tg3_full_lock(tp
, 0);
12285 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12286 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12287 tp
->link_config
.advertising
= (advertising
|
12288 ADVERTISED_Autoneg
);
12289 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12290 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12292 tp
->link_config
.advertising
= 0;
12293 tp
->link_config
.speed
= speed
;
12294 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12297 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12299 tg3_warn_mgmt_link_flap(tp
);
12301 if (netif_running(dev
))
12302 tg3_setup_phy(tp
, true);
12304 tg3_full_unlock(tp
);
12309 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12311 struct tg3
*tp
= netdev_priv(dev
);
12313 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12314 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12315 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12318 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12320 struct tg3
*tp
= netdev_priv(dev
);
12322 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12323 wol
->supported
= WAKE_MAGIC
;
12325 wol
->supported
= 0;
12327 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12328 wol
->wolopts
= WAKE_MAGIC
;
12329 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12332 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12334 struct tg3
*tp
= netdev_priv(dev
);
12335 struct device
*dp
= &tp
->pdev
->dev
;
12337 if (wol
->wolopts
& ~WAKE_MAGIC
)
12339 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12340 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12343 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12345 if (device_may_wakeup(dp
))
12346 tg3_flag_set(tp
, WOL_ENABLE
);
12348 tg3_flag_clear(tp
, WOL_ENABLE
);
12353 static u32
tg3_get_msglevel(struct net_device
*dev
)
12355 struct tg3
*tp
= netdev_priv(dev
);
12356 return tp
->msg_enable
;
12359 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12361 struct tg3
*tp
= netdev_priv(dev
);
12362 tp
->msg_enable
= value
;
12365 static int tg3_nway_reset(struct net_device
*dev
)
12367 struct tg3
*tp
= netdev_priv(dev
);
12370 if (!netif_running(dev
))
12373 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12376 tg3_warn_mgmt_link_flap(tp
);
12378 if (tg3_flag(tp
, USE_PHYLIB
)) {
12379 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12381 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12385 spin_lock_bh(&tp
->lock
);
12387 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12388 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12389 ((bmcr
& BMCR_ANENABLE
) ||
12390 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12391 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12395 spin_unlock_bh(&tp
->lock
);
12401 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12403 struct tg3
*tp
= netdev_priv(dev
);
12405 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12406 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12407 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12409 ering
->rx_jumbo_max_pending
= 0;
12411 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12413 ering
->rx_pending
= tp
->rx_pending
;
12414 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12415 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12417 ering
->rx_jumbo_pending
= 0;
12419 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12422 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12424 struct tg3
*tp
= netdev_priv(dev
);
12425 int i
, irq_sync
= 0, err
= 0;
12426 bool reset_phy
= false;
12428 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12429 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12430 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12431 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12432 (tg3_flag(tp
, TSO_BUG
) &&
12433 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12436 if (netif_running(dev
)) {
12438 tg3_netif_stop(tp
);
12442 tg3_full_lock(tp
, irq_sync
);
12444 tp
->rx_pending
= ering
->rx_pending
;
12446 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12447 tp
->rx_pending
> 63)
12448 tp
->rx_pending
= 63;
12450 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12451 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12453 for (i
= 0; i
< tp
->irq_max
; i
++)
12454 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12456 if (netif_running(dev
)) {
12457 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12458 /* Reset PHY to avoid PHY lock up */
12459 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12460 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12461 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12464 err
= tg3_restart_hw(tp
, reset_phy
);
12466 tg3_netif_start(tp
);
12469 tg3_full_unlock(tp
);
12471 if (irq_sync
&& !err
)
12477 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12479 struct tg3
*tp
= netdev_priv(dev
);
12481 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12483 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12484 epause
->rx_pause
= 1;
12486 epause
->rx_pause
= 0;
12488 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12489 epause
->tx_pause
= 1;
12491 epause
->tx_pause
= 0;
12494 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12496 struct tg3
*tp
= netdev_priv(dev
);
12498 bool reset_phy
= false;
12500 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12501 tg3_warn_mgmt_link_flap(tp
);
12503 if (tg3_flag(tp
, USE_PHYLIB
)) {
12504 struct phy_device
*phydev
;
12506 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12508 if (!phy_validate_pause(phydev
, epause
))
12511 tp
->link_config
.flowctrl
= 0;
12512 phy_set_asym_pause(phydev
, epause
->rx_pause
, epause
->tx_pause
);
12513 if (epause
->rx_pause
) {
12514 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12516 if (epause
->tx_pause
) {
12517 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12519 } else if (epause
->tx_pause
) {
12520 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12523 if (epause
->autoneg
)
12524 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12526 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12528 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12529 if (phydev
->autoneg
) {
12530 /* phy_set_asym_pause() will
12531 * renegotiate the link to inform our
12532 * link partner of our flow control
12533 * settings, even if the flow control
12534 * is forced. Let tg3_adjust_link()
12535 * do the final flow control setup.
12540 if (!epause
->autoneg
)
12541 tg3_setup_flow_control(tp
, 0, 0);
12546 if (netif_running(dev
)) {
12547 tg3_netif_stop(tp
);
12551 tg3_full_lock(tp
, irq_sync
);
12553 if (epause
->autoneg
)
12554 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12556 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12557 if (epause
->rx_pause
)
12558 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12560 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12561 if (epause
->tx_pause
)
12562 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12564 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12566 if (netif_running(dev
)) {
12567 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12568 /* Reset PHY to avoid PHY lock up */
12569 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
12570 tg3_asic_rev(tp
) == ASIC_REV_5719
||
12571 tg3_asic_rev(tp
) == ASIC_REV_5720
)
12574 err
= tg3_restart_hw(tp
, reset_phy
);
12576 tg3_netif_start(tp
);
12579 tg3_full_unlock(tp
);
12582 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12587 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12591 return TG3_NUM_TEST
;
12593 return TG3_NUM_STATS
;
12595 return -EOPNOTSUPP
;
12599 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12600 u32
*rules __always_unused
)
12602 struct tg3
*tp
= netdev_priv(dev
);
12604 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12605 return -EOPNOTSUPP
;
12607 switch (info
->cmd
) {
12608 case ETHTOOL_GRXRINGS
:
12609 if (netif_running(tp
->dev
))
12610 info
->data
= tp
->rxq_cnt
;
12612 info
->data
= num_online_cpus();
12613 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12614 info
->data
= TG3_RSS_MAX_NUM_QS
;
12620 return -EOPNOTSUPP
;
12624 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12627 struct tg3
*tp
= netdev_priv(dev
);
12629 if (tg3_flag(tp
, SUPPORT_MSIX
))
12630 size
= TG3_RSS_INDIR_TBL_SIZE
;
12635 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12637 struct tg3
*tp
= netdev_priv(dev
);
12641 *hfunc
= ETH_RSS_HASH_TOP
;
12645 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12646 indir
[i
] = tp
->rss_ind_tbl
[i
];
12651 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12654 struct tg3
*tp
= netdev_priv(dev
);
12657 /* We require at least one supported parameter to be changed and no
12658 * change in any of the unsupported parameters
12661 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12662 return -EOPNOTSUPP
;
12667 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12668 tp
->rss_ind_tbl
[i
] = indir
[i
];
12670 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12673 /* It is legal to write the indirection
12674 * table while the device is running.
12676 tg3_full_lock(tp
, 0);
12677 tg3_rss_write_indir_tbl(tp
);
12678 tg3_full_unlock(tp
);
12683 static void tg3_get_channels(struct net_device
*dev
,
12684 struct ethtool_channels
*channel
)
12686 struct tg3
*tp
= netdev_priv(dev
);
12687 u32 deflt_qs
= netif_get_num_default_rss_queues();
12689 channel
->max_rx
= tp
->rxq_max
;
12690 channel
->max_tx
= tp
->txq_max
;
12692 if (netif_running(dev
)) {
12693 channel
->rx_count
= tp
->rxq_cnt
;
12694 channel
->tx_count
= tp
->txq_cnt
;
12697 channel
->rx_count
= tp
->rxq_req
;
12699 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12702 channel
->tx_count
= tp
->txq_req
;
12704 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12708 static int tg3_set_channels(struct net_device
*dev
,
12709 struct ethtool_channels
*channel
)
12711 struct tg3
*tp
= netdev_priv(dev
);
12713 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12714 return -EOPNOTSUPP
;
12716 if (channel
->rx_count
> tp
->rxq_max
||
12717 channel
->tx_count
> tp
->txq_max
)
12720 tp
->rxq_req
= channel
->rx_count
;
12721 tp
->txq_req
= channel
->tx_count
;
12723 if (!netif_running(dev
))
12728 tg3_carrier_off(tp
);
12730 tg3_start(tp
, true, false, false);
12735 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12737 switch (stringset
) {
12739 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12742 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12745 WARN_ON(1); /* we need a WARN() */
12750 static int tg3_set_phys_id(struct net_device
*dev
,
12751 enum ethtool_phys_id_state state
)
12753 struct tg3
*tp
= netdev_priv(dev
);
12756 case ETHTOOL_ID_ACTIVE
:
12757 return 1; /* cycle on/off once per second */
12759 case ETHTOOL_ID_ON
:
12760 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12761 LED_CTRL_1000MBPS_ON
|
12762 LED_CTRL_100MBPS_ON
|
12763 LED_CTRL_10MBPS_ON
|
12764 LED_CTRL_TRAFFIC_OVERRIDE
|
12765 LED_CTRL_TRAFFIC_BLINK
|
12766 LED_CTRL_TRAFFIC_LED
);
12769 case ETHTOOL_ID_OFF
:
12770 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12771 LED_CTRL_TRAFFIC_OVERRIDE
);
12774 case ETHTOOL_ID_INACTIVE
:
12775 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12782 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12783 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12785 struct tg3
*tp
= netdev_priv(dev
);
12788 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12790 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12793 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12797 u32 offset
= 0, len
= 0;
12800 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12803 if (magic
== TG3_EEPROM_MAGIC
) {
12804 for (offset
= TG3_NVM_DIR_START
;
12805 offset
< TG3_NVM_DIR_END
;
12806 offset
+= TG3_NVM_DIRENT_SIZE
) {
12807 if (tg3_nvram_read(tp
, offset
, &val
))
12810 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12811 TG3_NVM_DIRTYPE_EXTVPD
)
12815 if (offset
!= TG3_NVM_DIR_END
) {
12816 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12817 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12820 offset
= tg3_nvram_logical_addr(tp
, offset
);
12824 if (!offset
|| !len
) {
12825 offset
= TG3_NVM_VPD_OFF
;
12826 len
= TG3_NVM_VPD_LEN
;
12829 buf
= kmalloc(len
, GFP_KERNEL
);
12833 if (magic
== TG3_EEPROM_MAGIC
) {
12834 for (i
= 0; i
< len
; i
+= 4) {
12835 /* The data is in little-endian format in NVRAM.
12836 * Use the big-endian read routines to preserve
12837 * the byte order as it exists in NVRAM.
12839 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12845 unsigned int pos
= 0;
12847 ptr
= (u8
*)&buf
[0];
12848 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12849 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12851 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12869 #define NVRAM_TEST_SIZE 0x100
12870 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12871 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12872 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12873 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12874 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12875 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12876 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12877 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12879 static int tg3_test_nvram(struct tg3
*tp
)
12881 u32 csum
, magic
, len
;
12883 int i
, j
, k
, err
= 0, size
;
12885 if (tg3_flag(tp
, NO_NVRAM
))
12888 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12891 if (magic
== TG3_EEPROM_MAGIC
)
12892 size
= NVRAM_TEST_SIZE
;
12893 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12894 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12895 TG3_EEPROM_SB_FORMAT_1
) {
12896 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12897 case TG3_EEPROM_SB_REVISION_0
:
12898 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12900 case TG3_EEPROM_SB_REVISION_2
:
12901 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12903 case TG3_EEPROM_SB_REVISION_3
:
12904 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12906 case TG3_EEPROM_SB_REVISION_4
:
12907 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12909 case TG3_EEPROM_SB_REVISION_5
:
12910 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12912 case TG3_EEPROM_SB_REVISION_6
:
12913 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12920 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12921 size
= NVRAM_SELFBOOT_HW_SIZE
;
12925 buf
= kmalloc(size
, GFP_KERNEL
);
12930 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12931 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12938 /* Selfboot format */
12939 magic
= be32_to_cpu(buf
[0]);
12940 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12941 TG3_EEPROM_MAGIC_FW
) {
12942 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12944 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12945 TG3_EEPROM_SB_REVISION_2
) {
12946 /* For rev 2, the csum doesn't include the MBA. */
12947 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12949 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12952 for (i
= 0; i
< size
; i
++)
12965 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12966 TG3_EEPROM_MAGIC_HW
) {
12967 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12968 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12969 u8
*buf8
= (u8
*) buf
;
12971 /* Separate the parity bits and the data bytes. */
12972 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12973 if ((i
== 0) || (i
== 8)) {
12977 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12978 parity
[k
++] = buf8
[i
] & msk
;
12980 } else if (i
== 16) {
12984 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12985 parity
[k
++] = buf8
[i
] & msk
;
12988 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12989 parity
[k
++] = buf8
[i
] & msk
;
12992 data
[j
++] = buf8
[i
];
12996 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12997 u8 hw8
= hweight8(data
[i
]);
12999 if ((hw8
& 0x1) && parity
[i
])
13001 else if (!(hw8
& 0x1) && !parity
[i
])
13010 /* Bootstrap checksum at offset 0x10 */
13011 csum
= calc_crc((unsigned char *) buf
, 0x10);
13012 if (csum
!= le32_to_cpu(buf
[0x10/4]))
13015 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13016 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
13017 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
13022 buf
= tg3_vpd_readblock(tp
, &len
);
13026 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
13028 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
13032 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
13035 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13036 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
13037 PCI_VPD_RO_KEYWORD_CHKSUM
);
13041 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13043 for (i
= 0; i
<= j
; i
++)
13044 csum8
+= ((u8
*)buf
)[i
];
13058 #define TG3_SERDES_TIMEOUT_SEC 2
13059 #define TG3_COPPER_TIMEOUT_SEC 6
13061 static int tg3_test_link(struct tg3
*tp
)
13065 if (!netif_running(tp
->dev
))
13068 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13069 max
= TG3_SERDES_TIMEOUT_SEC
;
13071 max
= TG3_COPPER_TIMEOUT_SEC
;
13073 for (i
= 0; i
< max
; i
++) {
13077 if (msleep_interruptible(1000))
13084 /* Only test the commonly used registers */
13085 static int tg3_test_registers(struct tg3
*tp
)
13087 int i
, is_5705
, is_5750
;
13088 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13092 #define TG3_FL_5705 0x1
13093 #define TG3_FL_NOT_5705 0x2
13094 #define TG3_FL_NOT_5788 0x4
13095 #define TG3_FL_NOT_5750 0x8
13099 /* MAC Control Registers */
13100 { MAC_MODE
, TG3_FL_NOT_5705
,
13101 0x00000000, 0x00ef6f8c },
13102 { MAC_MODE
, TG3_FL_5705
,
13103 0x00000000, 0x01ef6b8c },
13104 { MAC_STATUS
, TG3_FL_NOT_5705
,
13105 0x03800107, 0x00000000 },
13106 { MAC_STATUS
, TG3_FL_5705
,
13107 0x03800100, 0x00000000 },
13108 { MAC_ADDR_0_HIGH
, 0x0000,
13109 0x00000000, 0x0000ffff },
13110 { MAC_ADDR_0_LOW
, 0x0000,
13111 0x00000000, 0xffffffff },
13112 { MAC_RX_MTU_SIZE
, 0x0000,
13113 0x00000000, 0x0000ffff },
13114 { MAC_TX_MODE
, 0x0000,
13115 0x00000000, 0x00000070 },
13116 { MAC_TX_LENGTHS
, 0x0000,
13117 0x00000000, 0x00003fff },
13118 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13119 0x00000000, 0x000007fc },
13120 { MAC_RX_MODE
, TG3_FL_5705
,
13121 0x00000000, 0x000007dc },
13122 { MAC_HASH_REG_0
, 0x0000,
13123 0x00000000, 0xffffffff },
13124 { MAC_HASH_REG_1
, 0x0000,
13125 0x00000000, 0xffffffff },
13126 { MAC_HASH_REG_2
, 0x0000,
13127 0x00000000, 0xffffffff },
13128 { MAC_HASH_REG_3
, 0x0000,
13129 0x00000000, 0xffffffff },
13131 /* Receive Data and Receive BD Initiator Control Registers. */
13132 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13133 0x00000000, 0xffffffff },
13134 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13135 0x00000000, 0xffffffff },
13136 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13137 0x00000000, 0x00000003 },
13138 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13139 0x00000000, 0xffffffff },
13140 { RCVDBDI_STD_BD
+0, 0x0000,
13141 0x00000000, 0xffffffff },
13142 { RCVDBDI_STD_BD
+4, 0x0000,
13143 0x00000000, 0xffffffff },
13144 { RCVDBDI_STD_BD
+8, 0x0000,
13145 0x00000000, 0xffff0002 },
13146 { RCVDBDI_STD_BD
+0xc, 0x0000,
13147 0x00000000, 0xffffffff },
13149 /* Receive BD Initiator Control Registers. */
13150 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13151 0x00000000, 0xffffffff },
13152 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13153 0x00000000, 0x000003ff },
13154 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13155 0x00000000, 0xffffffff },
13157 /* Host Coalescing Control Registers. */
13158 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13159 0x00000000, 0x00000004 },
13160 { HOSTCC_MODE
, TG3_FL_5705
,
13161 0x00000000, 0x000000f6 },
13162 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13165 0x00000000, 0x000003ff },
13166 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13169 0x00000000, 0x000003ff },
13170 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13173 0x00000000, 0x000000ff },
13174 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13177 0x00000000, 0x000000ff },
13178 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13181 0x00000000, 0xffffffff },
13182 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13185 0x00000000, 0x000000ff },
13186 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13187 0x00000000, 0xffffffff },
13188 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13189 0x00000000, 0x000000ff },
13190 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13191 0x00000000, 0xffffffff },
13192 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13193 0x00000000, 0xffffffff },
13194 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13197 0x00000000, 0xffffffff },
13198 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13199 0x00000000, 0xffffffff },
13200 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13201 0xffffffff, 0x00000000 },
13202 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13203 0xffffffff, 0x00000000 },
13205 /* Buffer Manager Control Registers. */
13206 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13207 0x00000000, 0x007fff80 },
13208 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13209 0x00000000, 0x007fffff },
13210 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13211 0x00000000, 0x0000003f },
13212 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13213 0x00000000, 0x000001ff },
13214 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13215 0x00000000, 0x000001ff },
13216 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13217 0xffffffff, 0x00000000 },
13218 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13219 0xffffffff, 0x00000000 },
13221 /* Mailbox Registers */
13222 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13223 0x00000000, 0x000001ff },
13224 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13225 0x00000000, 0x000001ff },
13226 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13227 0x00000000, 0x000007ff },
13228 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13229 0x00000000, 0x000001ff },
13231 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13234 is_5705
= is_5750
= 0;
13235 if (tg3_flag(tp
, 5705_PLUS
)) {
13237 if (tg3_flag(tp
, 5750_PLUS
))
13241 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13242 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13245 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13248 if (tg3_flag(tp
, IS_5788
) &&
13249 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13252 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13255 offset
= (u32
) reg_tbl
[i
].offset
;
13256 read_mask
= reg_tbl
[i
].read_mask
;
13257 write_mask
= reg_tbl
[i
].write_mask
;
13259 /* Save the original register content */
13260 save_val
= tr32(offset
);
13262 /* Determine the read-only value. */
13263 read_val
= save_val
& read_mask
;
13265 /* Write zero to the register, then make sure the read-only bits
13266 * are not changed and the read/write bits are all zeros.
13270 val
= tr32(offset
);
13272 /* Test the read-only and read/write bits. */
13273 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13276 /* Write ones to all the bits defined by RdMask and WrMask, then
13277 * make sure the read-only bits are not changed and the
13278 * read/write bits are all ones.
13280 tw32(offset
, read_mask
| write_mask
);
13282 val
= tr32(offset
);
13284 /* Test the read-only bits. */
13285 if ((val
& read_mask
) != read_val
)
13288 /* Test the read/write bits. */
13289 if ((val
& write_mask
) != write_mask
)
13292 tw32(offset
, save_val
);
13298 if (netif_msg_hw(tp
))
13299 netdev_err(tp
->dev
,
13300 "Register test failed at offset %x\n", offset
);
13301 tw32(offset
, save_val
);
13305 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13307 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13311 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13312 for (j
= 0; j
< len
; j
+= 4) {
13315 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13316 tg3_read_mem(tp
, offset
+ j
, &val
);
13317 if (val
!= test_pattern
[i
])
13324 static int tg3_test_memory(struct tg3
*tp
)
13326 static struct mem_entry
{
13329 } mem_tbl_570x
[] = {
13330 { 0x00000000, 0x00b50},
13331 { 0x00002000, 0x1c000},
13332 { 0xffffffff, 0x00000}
13333 }, mem_tbl_5705
[] = {
13334 { 0x00000100, 0x0000c},
13335 { 0x00000200, 0x00008},
13336 { 0x00004000, 0x00800},
13337 { 0x00006000, 0x01000},
13338 { 0x00008000, 0x02000},
13339 { 0x00010000, 0x0e000},
13340 { 0xffffffff, 0x00000}
13341 }, mem_tbl_5755
[] = {
13342 { 0x00000200, 0x00008},
13343 { 0x00004000, 0x00800},
13344 { 0x00006000, 0x00800},
13345 { 0x00008000, 0x02000},
13346 { 0x00010000, 0x0c000},
13347 { 0xffffffff, 0x00000}
13348 }, mem_tbl_5906
[] = {
13349 { 0x00000200, 0x00008},
13350 { 0x00004000, 0x00400},
13351 { 0x00006000, 0x00400},
13352 { 0x00008000, 0x01000},
13353 { 0x00010000, 0x01000},
13354 { 0xffffffff, 0x00000}
13355 }, mem_tbl_5717
[] = {
13356 { 0x00000200, 0x00008},
13357 { 0x00010000, 0x0a000},
13358 { 0x00020000, 0x13c00},
13359 { 0xffffffff, 0x00000}
13360 }, mem_tbl_57765
[] = {
13361 { 0x00000200, 0x00008},
13362 { 0x00004000, 0x00800},
13363 { 0x00006000, 0x09800},
13364 { 0x00010000, 0x0a000},
13365 { 0xffffffff, 0x00000}
13367 struct mem_entry
*mem_tbl
;
13371 if (tg3_flag(tp
, 5717_PLUS
))
13372 mem_tbl
= mem_tbl_5717
;
13373 else if (tg3_flag(tp
, 57765_CLASS
) ||
13374 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13375 mem_tbl
= mem_tbl_57765
;
13376 else if (tg3_flag(tp
, 5755_PLUS
))
13377 mem_tbl
= mem_tbl_5755
;
13378 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13379 mem_tbl
= mem_tbl_5906
;
13380 else if (tg3_flag(tp
, 5705_PLUS
))
13381 mem_tbl
= mem_tbl_5705
;
13383 mem_tbl
= mem_tbl_570x
;
13385 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13386 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13394 #define TG3_TSO_MSS 500
13396 #define TG3_TSO_IP_HDR_LEN 20
13397 #define TG3_TSO_TCP_HDR_LEN 20
13398 #define TG3_TSO_TCP_OPT_LEN 12
13400 static const u8 tg3_tso_header
[] = {
13402 0x45, 0x00, 0x00, 0x00,
13403 0x00, 0x00, 0x40, 0x00,
13404 0x40, 0x06, 0x00, 0x00,
13405 0x0a, 0x00, 0x00, 0x01,
13406 0x0a, 0x00, 0x00, 0x02,
13407 0x0d, 0x00, 0xe0, 0x00,
13408 0x00, 0x00, 0x01, 0x00,
13409 0x00, 0x00, 0x02, 0x00,
13410 0x80, 0x10, 0x10, 0x00,
13411 0x14, 0x09, 0x00, 0x00,
13412 0x01, 0x01, 0x08, 0x0a,
13413 0x11, 0x11, 0x11, 0x11,
13414 0x11, 0x11, 0x11, 0x11,
13417 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13419 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13420 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13422 struct sk_buff
*skb
;
13423 u8
*tx_data
, *rx_data
;
13425 int num_pkts
, tx_len
, rx_len
, i
, err
;
13426 struct tg3_rx_buffer_desc
*desc
;
13427 struct tg3_napi
*tnapi
, *rnapi
;
13428 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13430 tnapi
= &tp
->napi
[0];
13431 rnapi
= &tp
->napi
[0];
13432 if (tp
->irq_cnt
> 1) {
13433 if (tg3_flag(tp
, ENABLE_RSS
))
13434 rnapi
= &tp
->napi
[1];
13435 if (tg3_flag(tp
, ENABLE_TSS
))
13436 tnapi
= &tp
->napi
[1];
13438 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13443 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13447 tx_data
= skb_put(skb
, tx_len
);
13448 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13449 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13451 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13453 if (tso_loopback
) {
13454 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13456 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13457 TG3_TSO_TCP_OPT_LEN
;
13459 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13460 sizeof(tg3_tso_header
));
13463 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13464 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13466 /* Set the total length field in the IP header */
13467 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13469 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13470 TXD_FLAG_CPU_POST_DMA
);
13472 if (tg3_flag(tp
, HW_TSO_1
) ||
13473 tg3_flag(tp
, HW_TSO_2
) ||
13474 tg3_flag(tp
, HW_TSO_3
)) {
13476 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13477 th
= (struct tcphdr
*)&tx_data
[val
];
13480 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13482 if (tg3_flag(tp
, HW_TSO_3
)) {
13483 mss
|= (hdr_len
& 0xc) << 12;
13484 if (hdr_len
& 0x10)
13485 base_flags
|= 0x00000010;
13486 base_flags
|= (hdr_len
& 0x3e0) << 5;
13487 } else if (tg3_flag(tp
, HW_TSO_2
))
13488 mss
|= hdr_len
<< 9;
13489 else if (tg3_flag(tp
, HW_TSO_1
) ||
13490 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13491 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13493 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13496 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13499 data_off
= ETH_HLEN
;
13501 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13502 tx_len
> VLAN_ETH_FRAME_LEN
)
13503 base_flags
|= TXD_FLAG_JMB_PKT
;
13506 for (i
= data_off
; i
< tx_len
; i
++)
13507 tx_data
[i
] = (u8
) (i
& 0xff);
13509 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13510 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13511 dev_kfree_skb(skb
);
13515 val
= tnapi
->tx_prod
;
13516 tnapi
->tx_buffers
[val
].skb
= skb
;
13517 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13519 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13524 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13526 budget
= tg3_tx_avail(tnapi
);
13527 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13528 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13529 tnapi
->tx_buffers
[val
].skb
= NULL
;
13530 dev_kfree_skb(skb
);
13536 /* Sync BD data before updating mailbox */
13539 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13540 tr32_mailbox(tnapi
->prodmbox
);
13544 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13545 for (i
= 0; i
< 35; i
++) {
13546 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13551 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13552 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13553 if ((tx_idx
== tnapi
->tx_prod
) &&
13554 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13558 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13559 dev_kfree_skb(skb
);
13561 if (tx_idx
!= tnapi
->tx_prod
)
13564 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13568 while (rx_idx
!= rx_start_idx
) {
13569 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13570 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13571 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13573 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13574 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13577 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13580 if (!tso_loopback
) {
13581 if (rx_len
!= tx_len
)
13584 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13585 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13588 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13591 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13592 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13593 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13597 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13598 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13599 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13601 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13602 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13603 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13608 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13609 PCI_DMA_FROMDEVICE
);
13611 rx_data
+= TG3_RX_OFFSET(tp
);
13612 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13613 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13620 /* tg3_free_rings will unmap and free the rx_data */
13625 #define TG3_STD_LOOPBACK_FAILED 1
13626 #define TG3_JMB_LOOPBACK_FAILED 2
13627 #define TG3_TSO_LOOPBACK_FAILED 4
13628 #define TG3_LOOPBACK_FAILED \
13629 (TG3_STD_LOOPBACK_FAILED | \
13630 TG3_JMB_LOOPBACK_FAILED | \
13631 TG3_TSO_LOOPBACK_FAILED)
13633 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13637 u32 jmb_pkt_sz
= 9000;
13640 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13642 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13643 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13645 if (!netif_running(tp
->dev
)) {
13646 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13647 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13649 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13653 err
= tg3_reset_hw(tp
, true);
13655 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13656 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13658 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13662 if (tg3_flag(tp
, ENABLE_RSS
)) {
13665 /* Reroute all rx packets to the 1st queue */
13666 for (i
= MAC_RSS_INDIR_TBL_0
;
13667 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13671 /* HW errata - mac loopback fails in some cases on 5780.
13672 * Normal traffic and PHY loopback are not affected by
13673 * errata. Also, the MAC loopback test is deprecated for
13674 * all newer ASIC revisions.
13676 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13677 !tg3_flag(tp
, CPMU_PRESENT
)) {
13678 tg3_mac_loopback(tp
, true);
13680 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13681 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13683 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13684 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13685 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13687 tg3_mac_loopback(tp
, false);
13690 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13691 !tg3_flag(tp
, USE_PHYLIB
)) {
13694 tg3_phy_lpbk_set(tp
, 0, false);
13696 /* Wait for link */
13697 for (i
= 0; i
< 100; i
++) {
13698 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13703 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13704 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13705 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13706 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13707 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13708 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13709 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13710 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13713 tg3_phy_lpbk_set(tp
, 0, true);
13715 /* All link indications report up, but the hardware
13716 * isn't really ready for about 20 msec. Double it
13721 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13722 data
[TG3_EXT_LOOPB_TEST
] |=
13723 TG3_STD_LOOPBACK_FAILED
;
13724 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13725 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13726 data
[TG3_EXT_LOOPB_TEST
] |=
13727 TG3_TSO_LOOPBACK_FAILED
;
13728 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13729 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13730 data
[TG3_EXT_LOOPB_TEST
] |=
13731 TG3_JMB_LOOPBACK_FAILED
;
13734 /* Re-enable gphy autopowerdown. */
13735 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13736 tg3_phy_toggle_apd(tp
, true);
13739 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13740 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13743 tp
->phy_flags
|= eee_cap
;
13748 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13751 struct tg3
*tp
= netdev_priv(dev
);
13752 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13754 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13755 if (tg3_power_up(tp
)) {
13756 etest
->flags
|= ETH_TEST_FL_FAILED
;
13757 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13760 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13763 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13765 if (tg3_test_nvram(tp
) != 0) {
13766 etest
->flags
|= ETH_TEST_FL_FAILED
;
13767 data
[TG3_NVRAM_TEST
] = 1;
13769 if (!doextlpbk
&& tg3_test_link(tp
)) {
13770 etest
->flags
|= ETH_TEST_FL_FAILED
;
13771 data
[TG3_LINK_TEST
] = 1;
13773 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13774 int err
, err2
= 0, irq_sync
= 0;
13776 if (netif_running(dev
)) {
13778 tg3_netif_stop(tp
);
13782 tg3_full_lock(tp
, irq_sync
);
13783 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13784 err
= tg3_nvram_lock(tp
);
13785 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13786 if (!tg3_flag(tp
, 5705_PLUS
))
13787 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13789 tg3_nvram_unlock(tp
);
13791 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13794 if (tg3_test_registers(tp
) != 0) {
13795 etest
->flags
|= ETH_TEST_FL_FAILED
;
13796 data
[TG3_REGISTER_TEST
] = 1;
13799 if (tg3_test_memory(tp
) != 0) {
13800 etest
->flags
|= ETH_TEST_FL_FAILED
;
13801 data
[TG3_MEMORY_TEST
] = 1;
13805 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13807 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13808 etest
->flags
|= ETH_TEST_FL_FAILED
;
13810 tg3_full_unlock(tp
);
13812 if (tg3_test_interrupt(tp
) != 0) {
13813 etest
->flags
|= ETH_TEST_FL_FAILED
;
13814 data
[TG3_INTERRUPT_TEST
] = 1;
13817 tg3_full_lock(tp
, 0);
13819 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13820 if (netif_running(dev
)) {
13821 tg3_flag_set(tp
, INIT_COMPLETE
);
13822 err2
= tg3_restart_hw(tp
, true);
13824 tg3_netif_start(tp
);
13827 tg3_full_unlock(tp
);
13829 if (irq_sync
&& !err2
)
13832 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13833 tg3_power_down_prepare(tp
);
13837 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13839 struct tg3
*tp
= netdev_priv(dev
);
13840 struct hwtstamp_config stmpconf
;
13842 if (!tg3_flag(tp
, PTP_CAPABLE
))
13843 return -EOPNOTSUPP
;
13845 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13848 if (stmpconf
.flags
)
13851 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13852 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13855 switch (stmpconf
.rx_filter
) {
13856 case HWTSTAMP_FILTER_NONE
:
13859 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13860 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13861 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13863 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13864 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13865 TG3_RX_PTP_CTL_SYNC_EVNT
;
13867 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13868 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13869 TG3_RX_PTP_CTL_DELAY_REQ
;
13871 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13872 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13873 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13875 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13876 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13877 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13879 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13880 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13881 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13883 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13884 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13885 TG3_RX_PTP_CTL_SYNC_EVNT
;
13887 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13888 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13889 TG3_RX_PTP_CTL_SYNC_EVNT
;
13891 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13892 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13893 TG3_RX_PTP_CTL_SYNC_EVNT
;
13895 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13896 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13897 TG3_RX_PTP_CTL_DELAY_REQ
;
13899 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13900 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13901 TG3_RX_PTP_CTL_DELAY_REQ
;
13903 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13904 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13905 TG3_RX_PTP_CTL_DELAY_REQ
;
13911 if (netif_running(dev
) && tp
->rxptpctl
)
13912 tw32(TG3_RX_PTP_CTL
,
13913 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13915 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13916 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13918 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13920 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13924 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13926 struct tg3
*tp
= netdev_priv(dev
);
13927 struct hwtstamp_config stmpconf
;
13929 if (!tg3_flag(tp
, PTP_CAPABLE
))
13930 return -EOPNOTSUPP
;
13932 stmpconf
.flags
= 0;
13933 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13934 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13936 switch (tp
->rxptpctl
) {
13938 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13940 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13941 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13943 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13944 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13946 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13947 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13949 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13950 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13952 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13953 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13955 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13956 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13958 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13959 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13961 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13962 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13964 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13965 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13967 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13968 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13970 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13971 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13973 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13974 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13981 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13985 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13987 struct mii_ioctl_data
*data
= if_mii(ifr
);
13988 struct tg3
*tp
= netdev_priv(dev
);
13991 if (tg3_flag(tp
, USE_PHYLIB
)) {
13992 struct phy_device
*phydev
;
13993 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13995 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
13996 return phy_mii_ioctl(phydev
, ifr
, cmd
);
14001 data
->phy_id
= tp
->phy_addr
;
14004 case SIOCGMIIREG
: {
14007 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14008 break; /* We have no PHY */
14010 if (!netif_running(dev
))
14013 spin_lock_bh(&tp
->lock
);
14014 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
14015 data
->reg_num
& 0x1f, &mii_regval
);
14016 spin_unlock_bh(&tp
->lock
);
14018 data
->val_out
= mii_regval
;
14024 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14025 break; /* We have no PHY */
14027 if (!netif_running(dev
))
14030 spin_lock_bh(&tp
->lock
);
14031 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
14032 data
->reg_num
& 0x1f, data
->val_in
);
14033 spin_unlock_bh(&tp
->lock
);
14037 case SIOCSHWTSTAMP
:
14038 return tg3_hwtstamp_set(dev
, ifr
);
14040 case SIOCGHWTSTAMP
:
14041 return tg3_hwtstamp_get(dev
, ifr
);
14047 return -EOPNOTSUPP
;
14050 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14052 struct tg3
*tp
= netdev_priv(dev
);
14054 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14058 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14060 struct tg3
*tp
= netdev_priv(dev
);
14061 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14062 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14064 if (!tg3_flag(tp
, 5705_PLUS
)) {
14065 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14066 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14067 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14068 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14071 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14072 (!ec
->rx_coalesce_usecs
) ||
14073 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14074 (!ec
->tx_coalesce_usecs
) ||
14075 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14076 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14077 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14078 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14079 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14080 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14081 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14082 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14085 /* Only copy relevant parameters, ignore all others. */
14086 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14087 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14088 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14089 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14090 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14091 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14092 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14093 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14094 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14096 if (netif_running(dev
)) {
14097 tg3_full_lock(tp
, 0);
14098 __tg3_set_coalesce(tp
, &tp
->coal
);
14099 tg3_full_unlock(tp
);
14104 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14106 struct tg3
*tp
= netdev_priv(dev
);
14108 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14109 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14110 return -EOPNOTSUPP
;
14113 if (edata
->advertised
!= tp
->eee
.advertised
) {
14114 netdev_warn(tp
->dev
,
14115 "Direct manipulation of EEE advertisement is not supported\n");
14119 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14120 netdev_warn(tp
->dev
,
14121 "Maximal Tx Lpi timer supported is %#x(u)\n",
14122 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14128 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14129 tg3_warn_mgmt_link_flap(tp
);
14131 if (netif_running(tp
->dev
)) {
14132 tg3_full_lock(tp
, 0);
14135 tg3_full_unlock(tp
);
14141 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14143 struct tg3
*tp
= netdev_priv(dev
);
14145 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14146 netdev_warn(tp
->dev
,
14147 "Board does not support EEE!\n");
14148 return -EOPNOTSUPP
;
14155 static const struct ethtool_ops tg3_ethtool_ops
= {
14156 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
14157 ETHTOOL_COALESCE_MAX_FRAMES
|
14158 ETHTOOL_COALESCE_USECS_IRQ
|
14159 ETHTOOL_COALESCE_MAX_FRAMES_IRQ
|
14160 ETHTOOL_COALESCE_STATS_BLOCK_USECS
,
14161 .get_drvinfo
= tg3_get_drvinfo
,
14162 .get_regs_len
= tg3_get_regs_len
,
14163 .get_regs
= tg3_get_regs
,
14164 .get_wol
= tg3_get_wol
,
14165 .set_wol
= tg3_set_wol
,
14166 .get_msglevel
= tg3_get_msglevel
,
14167 .set_msglevel
= tg3_set_msglevel
,
14168 .nway_reset
= tg3_nway_reset
,
14169 .get_link
= ethtool_op_get_link
,
14170 .get_eeprom_len
= tg3_get_eeprom_len
,
14171 .get_eeprom
= tg3_get_eeprom
,
14172 .set_eeprom
= tg3_set_eeprom
,
14173 .get_ringparam
= tg3_get_ringparam
,
14174 .set_ringparam
= tg3_set_ringparam
,
14175 .get_pauseparam
= tg3_get_pauseparam
,
14176 .set_pauseparam
= tg3_set_pauseparam
,
14177 .self_test
= tg3_self_test
,
14178 .get_strings
= tg3_get_strings
,
14179 .set_phys_id
= tg3_set_phys_id
,
14180 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14181 .get_coalesce
= tg3_get_coalesce
,
14182 .set_coalesce
= tg3_set_coalesce
,
14183 .get_sset_count
= tg3_get_sset_count
,
14184 .get_rxnfc
= tg3_get_rxnfc
,
14185 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14186 .get_rxfh
= tg3_get_rxfh
,
14187 .set_rxfh
= tg3_set_rxfh
,
14188 .get_channels
= tg3_get_channels
,
14189 .set_channels
= tg3_set_channels
,
14190 .get_ts_info
= tg3_get_ts_info
,
14191 .get_eee
= tg3_get_eee
,
14192 .set_eee
= tg3_set_eee
,
14193 .get_link_ksettings
= tg3_get_link_ksettings
,
14194 .set_link_ksettings
= tg3_set_link_ksettings
,
14197 static void tg3_get_stats64(struct net_device
*dev
,
14198 struct rtnl_link_stats64
*stats
)
14200 struct tg3
*tp
= netdev_priv(dev
);
14202 spin_lock_bh(&tp
->lock
);
14203 if (!tp
->hw_stats
|| !tg3_flag(tp
, INIT_COMPLETE
)) {
14204 *stats
= tp
->net_stats_prev
;
14205 spin_unlock_bh(&tp
->lock
);
14209 tg3_get_nstats(tp
, stats
);
14210 spin_unlock_bh(&tp
->lock
);
14213 static void tg3_set_rx_mode(struct net_device
*dev
)
14215 struct tg3
*tp
= netdev_priv(dev
);
14217 if (!netif_running(dev
))
14220 tg3_full_lock(tp
, 0);
14221 __tg3_set_rx_mode(dev
);
14222 tg3_full_unlock(tp
);
14225 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14228 dev
->mtu
= new_mtu
;
14230 if (new_mtu
> ETH_DATA_LEN
) {
14231 if (tg3_flag(tp
, 5780_CLASS
)) {
14232 netdev_update_features(dev
);
14233 tg3_flag_clear(tp
, TSO_CAPABLE
);
14235 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14238 if (tg3_flag(tp
, 5780_CLASS
)) {
14239 tg3_flag_set(tp
, TSO_CAPABLE
);
14240 netdev_update_features(dev
);
14242 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14246 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14248 struct tg3
*tp
= netdev_priv(dev
);
14250 bool reset_phy
= false;
14252 if (!netif_running(dev
)) {
14253 /* We'll just catch it later when the
14256 tg3_set_mtu(dev
, tp
, new_mtu
);
14262 tg3_netif_stop(tp
);
14264 tg3_set_mtu(dev
, tp
, new_mtu
);
14266 tg3_full_lock(tp
, 1);
14268 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14270 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14271 * breaks all requests to 256 bytes.
14273 if (tg3_asic_rev(tp
) == ASIC_REV_57766
||
14274 tg3_asic_rev(tp
) == ASIC_REV_5717
||
14275 tg3_asic_rev(tp
) == ASIC_REV_5719
||
14276 tg3_asic_rev(tp
) == ASIC_REV_5720
)
14279 err
= tg3_restart_hw(tp
, reset_phy
);
14282 tg3_netif_start(tp
);
14284 tg3_full_unlock(tp
);
14292 static const struct net_device_ops tg3_netdev_ops
= {
14293 .ndo_open
= tg3_open
,
14294 .ndo_stop
= tg3_close
,
14295 .ndo_start_xmit
= tg3_start_xmit
,
14296 .ndo_get_stats64
= tg3_get_stats64
,
14297 .ndo_validate_addr
= eth_validate_addr
,
14298 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14299 .ndo_set_mac_address
= tg3_set_mac_addr
,
14300 .ndo_do_ioctl
= tg3_ioctl
,
14301 .ndo_tx_timeout
= tg3_tx_timeout
,
14302 .ndo_change_mtu
= tg3_change_mtu
,
14303 .ndo_fix_features
= tg3_fix_features
,
14304 .ndo_set_features
= tg3_set_features
,
14305 #ifdef CONFIG_NET_POLL_CONTROLLER
14306 .ndo_poll_controller
= tg3_poll_controller
,
14310 static void tg3_get_eeprom_size(struct tg3
*tp
)
14312 u32 cursize
, val
, magic
;
14314 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14316 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14319 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14320 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14321 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14325 * Size the chip by reading offsets at increasing powers of two.
14326 * When we encounter our validation signature, we know the addressing
14327 * has wrapped around, and thus have our chip size.
14331 while (cursize
< tp
->nvram_size
) {
14332 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14341 tp
->nvram_size
= cursize
;
14344 static void tg3_get_nvram_size(struct tg3
*tp
)
14348 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14351 /* Selfboot format */
14352 if (val
!= TG3_EEPROM_MAGIC
) {
14353 tg3_get_eeprom_size(tp
);
14357 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14359 /* This is confusing. We want to operate on the
14360 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14361 * call will read from NVRAM and byteswap the data
14362 * according to the byteswapping settings for all
14363 * other register accesses. This ensures the data we
14364 * want will always reside in the lower 16-bits.
14365 * However, the data in NVRAM is in LE format, which
14366 * means the data from the NVRAM read will always be
14367 * opposite the endianness of the CPU. The 16-bit
14368 * byteswap then brings the data to CPU endianness.
14370 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14374 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14377 static void tg3_get_nvram_info(struct tg3
*tp
)
14381 nvcfg1
= tr32(NVRAM_CFG1
);
14382 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14383 tg3_flag_set(tp
, FLASH
);
14385 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14386 tw32(NVRAM_CFG1
, nvcfg1
);
14389 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14390 tg3_flag(tp
, 5780_CLASS
)) {
14391 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14392 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14393 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14394 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14395 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14397 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14398 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14399 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14401 case FLASH_VENDOR_ATMEL_EEPROM
:
14402 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14403 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14404 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14406 case FLASH_VENDOR_ST
:
14407 tp
->nvram_jedecnum
= JEDEC_ST
;
14408 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14409 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14411 case FLASH_VENDOR_SAIFUN
:
14412 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14413 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14415 case FLASH_VENDOR_SST_SMALL
:
14416 case FLASH_VENDOR_SST_LARGE
:
14417 tp
->nvram_jedecnum
= JEDEC_SST
;
14418 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14422 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14423 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14424 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14428 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14430 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14431 case FLASH_5752PAGE_SIZE_256
:
14432 tp
->nvram_pagesize
= 256;
14434 case FLASH_5752PAGE_SIZE_512
:
14435 tp
->nvram_pagesize
= 512;
14437 case FLASH_5752PAGE_SIZE_1K
:
14438 tp
->nvram_pagesize
= 1024;
14440 case FLASH_5752PAGE_SIZE_2K
:
14441 tp
->nvram_pagesize
= 2048;
14443 case FLASH_5752PAGE_SIZE_4K
:
14444 tp
->nvram_pagesize
= 4096;
14446 case FLASH_5752PAGE_SIZE_264
:
14447 tp
->nvram_pagesize
= 264;
14449 case FLASH_5752PAGE_SIZE_528
:
14450 tp
->nvram_pagesize
= 528;
14455 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14459 nvcfg1
= tr32(NVRAM_CFG1
);
14461 /* NVRAM protection for TPM */
14462 if (nvcfg1
& (1 << 27))
14463 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14465 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14466 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14467 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14468 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14469 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14471 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14472 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14473 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14474 tg3_flag_set(tp
, FLASH
);
14476 case FLASH_5752VENDOR_ST_M45PE10
:
14477 case FLASH_5752VENDOR_ST_M45PE20
:
14478 case FLASH_5752VENDOR_ST_M45PE40
:
14479 tp
->nvram_jedecnum
= JEDEC_ST
;
14480 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14481 tg3_flag_set(tp
, FLASH
);
14485 if (tg3_flag(tp
, FLASH
)) {
14486 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14488 /* For eeprom, set pagesize to maximum eeprom size */
14489 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14491 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14492 tw32(NVRAM_CFG1
, nvcfg1
);
14496 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14498 u32 nvcfg1
, protect
= 0;
14500 nvcfg1
= tr32(NVRAM_CFG1
);
14502 /* NVRAM protection for TPM */
14503 if (nvcfg1
& (1 << 27)) {
14504 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14508 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14510 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14511 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14512 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14513 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14514 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14515 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14516 tg3_flag_set(tp
, FLASH
);
14517 tp
->nvram_pagesize
= 264;
14518 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14519 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14520 tp
->nvram_size
= (protect
? 0x3e200 :
14521 TG3_NVRAM_SIZE_512KB
);
14522 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14523 tp
->nvram_size
= (protect
? 0x1f200 :
14524 TG3_NVRAM_SIZE_256KB
);
14526 tp
->nvram_size
= (protect
? 0x1f200 :
14527 TG3_NVRAM_SIZE_128KB
);
14529 case FLASH_5752VENDOR_ST_M45PE10
:
14530 case FLASH_5752VENDOR_ST_M45PE20
:
14531 case FLASH_5752VENDOR_ST_M45PE40
:
14532 tp
->nvram_jedecnum
= JEDEC_ST
;
14533 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14534 tg3_flag_set(tp
, FLASH
);
14535 tp
->nvram_pagesize
= 256;
14536 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14537 tp
->nvram_size
= (protect
?
14538 TG3_NVRAM_SIZE_64KB
:
14539 TG3_NVRAM_SIZE_128KB
);
14540 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14541 tp
->nvram_size
= (protect
?
14542 TG3_NVRAM_SIZE_64KB
:
14543 TG3_NVRAM_SIZE_256KB
);
14545 tp
->nvram_size
= (protect
?
14546 TG3_NVRAM_SIZE_128KB
:
14547 TG3_NVRAM_SIZE_512KB
);
14552 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14556 nvcfg1
= tr32(NVRAM_CFG1
);
14558 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14559 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14560 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14561 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14562 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14563 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14564 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14565 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14567 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14568 tw32(NVRAM_CFG1
, nvcfg1
);
14570 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14571 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14572 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14573 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14574 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14575 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14576 tg3_flag_set(tp
, FLASH
);
14577 tp
->nvram_pagesize
= 264;
14579 case FLASH_5752VENDOR_ST_M45PE10
:
14580 case FLASH_5752VENDOR_ST_M45PE20
:
14581 case FLASH_5752VENDOR_ST_M45PE40
:
14582 tp
->nvram_jedecnum
= JEDEC_ST
;
14583 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14584 tg3_flag_set(tp
, FLASH
);
14585 tp
->nvram_pagesize
= 256;
14590 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14592 u32 nvcfg1
, protect
= 0;
14594 nvcfg1
= tr32(NVRAM_CFG1
);
14596 /* NVRAM protection for TPM */
14597 if (nvcfg1
& (1 << 27)) {
14598 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14602 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14604 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14605 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14606 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14607 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14608 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14609 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14610 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14611 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14612 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14613 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14614 tg3_flag_set(tp
, FLASH
);
14615 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14616 tp
->nvram_pagesize
= 256;
14618 case FLASH_5761VENDOR_ST_A_M45PE20
:
14619 case FLASH_5761VENDOR_ST_A_M45PE40
:
14620 case FLASH_5761VENDOR_ST_A_M45PE80
:
14621 case FLASH_5761VENDOR_ST_A_M45PE16
:
14622 case FLASH_5761VENDOR_ST_M_M45PE20
:
14623 case FLASH_5761VENDOR_ST_M_M45PE40
:
14624 case FLASH_5761VENDOR_ST_M_M45PE80
:
14625 case FLASH_5761VENDOR_ST_M_M45PE16
:
14626 tp
->nvram_jedecnum
= JEDEC_ST
;
14627 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14628 tg3_flag_set(tp
, FLASH
);
14629 tp
->nvram_pagesize
= 256;
14634 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14637 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14638 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14639 case FLASH_5761VENDOR_ST_A_M45PE16
:
14640 case FLASH_5761VENDOR_ST_M_M45PE16
:
14641 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14643 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14644 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14645 case FLASH_5761VENDOR_ST_A_M45PE80
:
14646 case FLASH_5761VENDOR_ST_M_M45PE80
:
14647 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14649 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14650 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14651 case FLASH_5761VENDOR_ST_A_M45PE40
:
14652 case FLASH_5761VENDOR_ST_M_M45PE40
:
14653 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14655 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14656 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14657 case FLASH_5761VENDOR_ST_A_M45PE20
:
14658 case FLASH_5761VENDOR_ST_M_M45PE20
:
14659 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14665 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14667 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14668 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14669 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14672 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14676 nvcfg1
= tr32(NVRAM_CFG1
);
14678 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14679 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14680 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14681 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14682 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14683 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14685 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14686 tw32(NVRAM_CFG1
, nvcfg1
);
14688 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14689 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14690 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14691 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14694 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14695 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14696 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14697 tg3_flag_set(tp
, FLASH
);
14699 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14700 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14702 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14703 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14705 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14706 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14707 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14709 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14710 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14711 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14715 case FLASH_5752VENDOR_ST_M45PE10
:
14716 case FLASH_5752VENDOR_ST_M45PE20
:
14717 case FLASH_5752VENDOR_ST_M45PE40
:
14718 tp
->nvram_jedecnum
= JEDEC_ST
;
14719 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14720 tg3_flag_set(tp
, FLASH
);
14722 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14723 case FLASH_5752VENDOR_ST_M45PE10
:
14724 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14726 case FLASH_5752VENDOR_ST_M45PE20
:
14727 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14729 case FLASH_5752VENDOR_ST_M45PE40
:
14730 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14735 tg3_flag_set(tp
, NO_NVRAM
);
14739 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14740 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14741 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14745 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14749 nvcfg1
= tr32(NVRAM_CFG1
);
14751 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14752 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14753 case FLASH_5717VENDOR_MICRO_EEPROM
:
14754 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14755 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14756 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14758 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14759 tw32(NVRAM_CFG1
, nvcfg1
);
14761 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14762 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14763 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14764 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14765 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14766 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14767 case FLASH_5717VENDOR_ATMEL_45USPT
:
14768 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14769 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14770 tg3_flag_set(tp
, FLASH
);
14772 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14773 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14774 /* Detect size with tg3_nvram_get_size() */
14776 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14777 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14778 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14781 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14785 case FLASH_5717VENDOR_ST_M_M25PE10
:
14786 case FLASH_5717VENDOR_ST_A_M25PE10
:
14787 case FLASH_5717VENDOR_ST_M_M45PE10
:
14788 case FLASH_5717VENDOR_ST_A_M45PE10
:
14789 case FLASH_5717VENDOR_ST_M_M25PE20
:
14790 case FLASH_5717VENDOR_ST_A_M25PE20
:
14791 case FLASH_5717VENDOR_ST_M_M45PE20
:
14792 case FLASH_5717VENDOR_ST_A_M45PE20
:
14793 case FLASH_5717VENDOR_ST_25USPT
:
14794 case FLASH_5717VENDOR_ST_45USPT
:
14795 tp
->nvram_jedecnum
= JEDEC_ST
;
14796 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14797 tg3_flag_set(tp
, FLASH
);
14799 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14800 case FLASH_5717VENDOR_ST_M_M25PE20
:
14801 case FLASH_5717VENDOR_ST_M_M45PE20
:
14802 /* Detect size with tg3_nvram_get_size() */
14804 case FLASH_5717VENDOR_ST_A_M25PE20
:
14805 case FLASH_5717VENDOR_ST_A_M45PE20
:
14806 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14809 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14814 tg3_flag_set(tp
, NO_NVRAM
);
14818 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14819 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14820 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14823 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14825 u32 nvcfg1
, nvmpinstrp
, nv_status
;
14827 nvcfg1
= tr32(NVRAM_CFG1
);
14828 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14830 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14831 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14832 tg3_flag_set(tp
, NO_NVRAM
);
14836 switch (nvmpinstrp
) {
14837 case FLASH_5762_MX25L_100
:
14838 case FLASH_5762_MX25L_200
:
14839 case FLASH_5762_MX25L_400
:
14840 case FLASH_5762_MX25L_800
:
14841 case FLASH_5762_MX25L_160_320
:
14842 tp
->nvram_pagesize
= 4096;
14843 tp
->nvram_jedecnum
= JEDEC_MACRONIX
;
14844 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14845 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14846 tg3_flag_set(tp
, FLASH
);
14847 nv_status
= tr32(NVRAM_AUTOSENSE_STATUS
);
14849 (1 << (nv_status
>> AUTOSENSE_DEVID
&
14850 AUTOSENSE_DEVID_MASK
)
14851 << AUTOSENSE_SIZE_IN_MB
);
14854 case FLASH_5762_EEPROM_HD
:
14855 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14857 case FLASH_5762_EEPROM_LD
:
14858 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14860 case FLASH_5720VENDOR_M_ST_M45PE20
:
14861 /* This pinstrap supports multiple sizes, so force it
14862 * to read the actual size from location 0xf0.
14864 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14869 switch (nvmpinstrp
) {
14870 case FLASH_5720_EEPROM_HD
:
14871 case FLASH_5720_EEPROM_LD
:
14872 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14873 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14875 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14876 tw32(NVRAM_CFG1
, nvcfg1
);
14877 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14878 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14880 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14882 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14883 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14884 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14885 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14886 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14887 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14888 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14889 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14890 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14891 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14892 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14893 case FLASH_5720VENDOR_ATMEL_45USPT
:
14894 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14895 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14896 tg3_flag_set(tp
, FLASH
);
14898 switch (nvmpinstrp
) {
14899 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14900 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14901 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14902 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14904 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14905 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14906 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14907 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14909 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14910 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14911 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14914 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14915 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14919 case FLASH_5720VENDOR_M_ST_M25PE10
:
14920 case FLASH_5720VENDOR_M_ST_M45PE10
:
14921 case FLASH_5720VENDOR_A_ST_M25PE10
:
14922 case FLASH_5720VENDOR_A_ST_M45PE10
:
14923 case FLASH_5720VENDOR_M_ST_M25PE20
:
14924 case FLASH_5720VENDOR_M_ST_M45PE20
:
14925 case FLASH_5720VENDOR_A_ST_M25PE20
:
14926 case FLASH_5720VENDOR_A_ST_M45PE20
:
14927 case FLASH_5720VENDOR_M_ST_M25PE40
:
14928 case FLASH_5720VENDOR_M_ST_M45PE40
:
14929 case FLASH_5720VENDOR_A_ST_M25PE40
:
14930 case FLASH_5720VENDOR_A_ST_M45PE40
:
14931 case FLASH_5720VENDOR_M_ST_M25PE80
:
14932 case FLASH_5720VENDOR_M_ST_M45PE80
:
14933 case FLASH_5720VENDOR_A_ST_M25PE80
:
14934 case FLASH_5720VENDOR_A_ST_M45PE80
:
14935 case FLASH_5720VENDOR_ST_25USPT
:
14936 case FLASH_5720VENDOR_ST_45USPT
:
14937 tp
->nvram_jedecnum
= JEDEC_ST
;
14938 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14939 tg3_flag_set(tp
, FLASH
);
14941 switch (nvmpinstrp
) {
14942 case FLASH_5720VENDOR_M_ST_M25PE20
:
14943 case FLASH_5720VENDOR_M_ST_M45PE20
:
14944 case FLASH_5720VENDOR_A_ST_M25PE20
:
14945 case FLASH_5720VENDOR_A_ST_M45PE20
:
14946 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14948 case FLASH_5720VENDOR_M_ST_M25PE40
:
14949 case FLASH_5720VENDOR_M_ST_M45PE40
:
14950 case FLASH_5720VENDOR_A_ST_M25PE40
:
14951 case FLASH_5720VENDOR_A_ST_M45PE40
:
14952 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14954 case FLASH_5720VENDOR_M_ST_M25PE80
:
14955 case FLASH_5720VENDOR_M_ST_M45PE80
:
14956 case FLASH_5720VENDOR_A_ST_M25PE80
:
14957 case FLASH_5720VENDOR_A_ST_M45PE80
:
14958 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14961 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14962 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14967 tg3_flag_set(tp
, NO_NVRAM
);
14971 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14972 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14973 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14975 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14978 if (tg3_nvram_read(tp
, 0, &val
))
14981 if (val
!= TG3_EEPROM_MAGIC
&&
14982 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14983 tg3_flag_set(tp
, NO_NVRAM
);
14987 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14988 static void tg3_nvram_init(struct tg3
*tp
)
14990 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14991 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14992 tg3_flag_clear(tp
, NVRAM
);
14993 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14994 tg3_flag_set(tp
, NO_NVRAM
);
14998 tw32_f(GRC_EEPROM_ADDR
,
14999 (EEPROM_ADDR_FSM_RESET
|
15000 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
15001 EEPROM_ADDR_CLKPERD_SHIFT
)));
15005 /* Enable seeprom accesses. */
15006 tw32_f(GRC_LOCAL_CTRL
,
15007 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
15010 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15011 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
15012 tg3_flag_set(tp
, NVRAM
);
15014 if (tg3_nvram_lock(tp
)) {
15015 netdev_warn(tp
->dev
,
15016 "Cannot get nvram lock, %s failed\n",
15020 tg3_enable_nvram_access(tp
);
15022 tp
->nvram_size
= 0;
15024 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15025 tg3_get_5752_nvram_info(tp
);
15026 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
15027 tg3_get_5755_nvram_info(tp
);
15028 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
15029 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15030 tg3_asic_rev(tp
) == ASIC_REV_5785
)
15031 tg3_get_5787_nvram_info(tp
);
15032 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
15033 tg3_get_5761_nvram_info(tp
);
15034 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15035 tg3_get_5906_nvram_info(tp
);
15036 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
15037 tg3_flag(tp
, 57765_CLASS
))
15038 tg3_get_57780_nvram_info(tp
);
15039 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15040 tg3_asic_rev(tp
) == ASIC_REV_5719
)
15041 tg3_get_5717_nvram_info(tp
);
15042 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15043 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15044 tg3_get_5720_nvram_info(tp
);
15046 tg3_get_nvram_info(tp
);
15048 if (tp
->nvram_size
== 0)
15049 tg3_get_nvram_size(tp
);
15051 tg3_disable_nvram_access(tp
);
15052 tg3_nvram_unlock(tp
);
15055 tg3_flag_clear(tp
, NVRAM
);
15056 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
15058 tg3_get_eeprom_size(tp
);
15062 struct subsys_tbl_ent
{
15063 u16 subsys_vendor
, subsys_devid
;
15067 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15068 /* Broadcom boards. */
15069 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15070 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15071 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15072 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15073 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15074 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15075 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15076 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15077 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15078 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15079 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15081 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15083 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15084 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15085 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15087 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15088 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15089 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15090 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15093 { TG3PCI_SUBVENDOR_ID_3COM
,
15094 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15095 { TG3PCI_SUBVENDOR_ID_3COM
,
15096 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15097 { TG3PCI_SUBVENDOR_ID_3COM
,
15098 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15099 { TG3PCI_SUBVENDOR_ID_3COM
,
15100 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15101 { TG3PCI_SUBVENDOR_ID_3COM
,
15102 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15105 { TG3PCI_SUBVENDOR_ID_DELL
,
15106 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15107 { TG3PCI_SUBVENDOR_ID_DELL
,
15108 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15109 { TG3PCI_SUBVENDOR_ID_DELL
,
15110 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15111 { TG3PCI_SUBVENDOR_ID_DELL
,
15112 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15114 /* Compaq boards. */
15115 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15116 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15117 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15118 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15119 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15120 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15121 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15122 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15123 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15124 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15127 { TG3PCI_SUBVENDOR_ID_IBM
,
15128 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15131 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15135 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15136 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15137 tp
->pdev
->subsystem_vendor
) &&
15138 (subsys_id_to_phy_id
[i
].subsys_devid
==
15139 tp
->pdev
->subsystem_device
))
15140 return &subsys_id_to_phy_id
[i
];
15145 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15149 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15150 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15152 /* Assume an onboard device and WOL capable by default. */
15153 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15154 tg3_flag_set(tp
, WOL_CAP
);
15156 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15157 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15158 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15159 tg3_flag_set(tp
, IS_NIC
);
15161 val
= tr32(VCPU_CFGSHDW
);
15162 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15163 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15164 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15165 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15166 tg3_flag_set(tp
, WOL_ENABLE
);
15167 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15172 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15173 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15174 u32 nic_cfg
, led_cfg
;
15175 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15176 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15177 int eeprom_phy_serdes
= 0;
15179 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15180 tp
->nic_sram_data_cfg
= nic_cfg
;
15182 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15183 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15184 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15185 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15186 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15187 (ver
> 0) && (ver
< 0x100))
15188 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15190 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15191 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15193 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15194 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15195 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15196 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15198 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15199 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15200 eeprom_phy_serdes
= 1;
15202 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15203 if (nic_phy_id
!= 0) {
15204 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15205 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15207 eeprom_phy_id
= (id1
>> 16) << 10;
15208 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15209 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15213 tp
->phy_id
= eeprom_phy_id
;
15214 if (eeprom_phy_serdes
) {
15215 if (!tg3_flag(tp
, 5705_PLUS
))
15216 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15218 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15221 if (tg3_flag(tp
, 5750_PLUS
))
15222 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15223 SHASTA_EXT_LED_MODE_MASK
);
15225 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15229 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15230 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15233 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15234 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15237 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15238 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15240 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15241 * read on some older 5700/5701 bootcode.
15243 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15244 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15245 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15249 case SHASTA_EXT_LED_SHARED
:
15250 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15251 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15252 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15253 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15254 LED_CTRL_MODE_PHY_2
);
15256 if (tg3_flag(tp
, 5717_PLUS
) ||
15257 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15258 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15259 LED_CTRL_BLINK_RATE_MASK
;
15263 case SHASTA_EXT_LED_MAC
:
15264 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15267 case SHASTA_EXT_LED_COMBO
:
15268 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15269 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15270 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15271 LED_CTRL_MODE_PHY_2
);
15276 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15277 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15278 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15279 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15281 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15282 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15284 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15285 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15286 if ((tp
->pdev
->subsystem_vendor
==
15287 PCI_VENDOR_ID_ARIMA
) &&
15288 (tp
->pdev
->subsystem_device
== 0x205a ||
15289 tp
->pdev
->subsystem_device
== 0x2063))
15290 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15292 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15293 tg3_flag_set(tp
, IS_NIC
);
15296 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15297 tg3_flag_set(tp
, ENABLE_ASF
);
15298 if (tg3_flag(tp
, 5750_PLUS
))
15299 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15302 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15303 tg3_flag(tp
, 5750_PLUS
))
15304 tg3_flag_set(tp
, ENABLE_APE
);
15306 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15307 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15308 tg3_flag_clear(tp
, WOL_CAP
);
15310 if (tg3_flag(tp
, WOL_CAP
) &&
15311 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15312 tg3_flag_set(tp
, WOL_ENABLE
);
15313 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15316 if (cfg2
& (1 << 17))
15317 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15319 /* serdes signal pre-emphasis in register 0x590 set by */
15320 /* bootcode if bit 18 is set */
15321 if (cfg2
& (1 << 18))
15322 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15324 if ((tg3_flag(tp
, 57765_PLUS
) ||
15325 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15326 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15327 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15328 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15330 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15333 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15334 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15335 !tg3_flag(tp
, 57765_PLUS
) &&
15336 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15337 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15338 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15339 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15340 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15341 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15344 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15345 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15346 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15347 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15348 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15349 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15351 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15352 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15355 if (tg3_flag(tp
, WOL_CAP
))
15356 device_set_wakeup_enable(&tp
->pdev
->dev
,
15357 tg3_flag(tp
, WOL_ENABLE
));
15359 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15362 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15365 u32 val2
, off
= offset
* 8;
15367 err
= tg3_nvram_lock(tp
);
15371 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15372 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15373 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15374 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15377 for (i
= 0; i
< 100; i
++) {
15378 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15379 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15380 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15386 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15388 tg3_nvram_unlock(tp
);
15389 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15395 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15400 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15401 tw32(OTP_CTRL
, cmd
);
15403 /* Wait for up to 1 ms for command to execute. */
15404 for (i
= 0; i
< 100; i
++) {
15405 val
= tr32(OTP_STATUS
);
15406 if (val
& OTP_STATUS_CMD_DONE
)
15411 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15414 /* Read the gphy configuration from the OTP region of the chip. The gphy
15415 * configuration is a 32-bit value that straddles the alignment boundary.
15416 * We do two 32-bit reads and then shift and merge the results.
15418 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15420 u32 bhalf_otp
, thalf_otp
;
15422 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15424 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15427 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15429 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15432 thalf_otp
= tr32(OTP_READ_DATA
);
15434 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15436 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15439 bhalf_otp
= tr32(OTP_READ_DATA
);
15441 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15444 static void tg3_phy_init_link_config(struct tg3
*tp
)
15446 u32 adv
= ADVERTISED_Autoneg
;
15448 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15449 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15450 adv
|= ADVERTISED_1000baseT_Half
;
15451 adv
|= ADVERTISED_1000baseT_Full
;
15454 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15455 adv
|= ADVERTISED_100baseT_Half
|
15456 ADVERTISED_100baseT_Full
|
15457 ADVERTISED_10baseT_Half
|
15458 ADVERTISED_10baseT_Full
|
15461 adv
|= ADVERTISED_FIBRE
;
15463 tp
->link_config
.advertising
= adv
;
15464 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15465 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15466 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15467 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15468 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15473 static int tg3_phy_probe(struct tg3
*tp
)
15475 u32 hw_phy_id_1
, hw_phy_id_2
;
15476 u32 hw_phy_id
, hw_phy_id_masked
;
15479 /* flow control autonegotiation is default behavior */
15480 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15481 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15483 if (tg3_flag(tp
, ENABLE_APE
)) {
15484 switch (tp
->pci_fn
) {
15486 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15489 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15492 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15495 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15500 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15501 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15502 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15503 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15504 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15506 if (tg3_flag(tp
, USE_PHYLIB
))
15507 return tg3_phy_init(tp
);
15509 /* Reading the PHY ID register can conflict with ASF
15510 * firmware access to the PHY hardware.
15513 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15514 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15516 /* Now read the physical PHY_ID from the chip and verify
15517 * that it is sane. If it doesn't look good, we fall back
15518 * to either the hard-coded table based PHY_ID and failing
15519 * that the value found in the eeprom area.
15521 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15522 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15524 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15525 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15526 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15528 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15531 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15532 tp
->phy_id
= hw_phy_id
;
15533 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15534 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15536 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15538 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15539 /* Do nothing, phy ID already set up in
15540 * tg3_get_eeprom_hw_cfg().
15543 struct subsys_tbl_ent
*p
;
15545 /* No eeprom signature? Try the hardcoded
15546 * subsys device table.
15548 p
= tg3_lookup_by_subsys(tp
);
15550 tp
->phy_id
= p
->phy_id
;
15551 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15552 /* For now we saw the IDs 0xbc050cd0,
15553 * 0xbc050f80 and 0xbc050c30 on devices
15554 * connected to an BCM4785 and there are
15555 * probably more. Just assume that the phy is
15556 * supported when it is connected to a SSB core
15563 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15564 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15568 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15569 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15570 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15571 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15572 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15573 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15574 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15575 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15576 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15577 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15579 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15580 SUPPORTED_1000baseT_Full
;
15581 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15582 ADVERTISED_1000baseT_Full
;
15583 tp
->eee
.eee_enabled
= 1;
15584 tp
->eee
.tx_lpi_enabled
= 1;
15585 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15588 tg3_phy_init_link_config(tp
);
15590 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15591 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15592 !tg3_flag(tp
, ENABLE_APE
) &&
15593 !tg3_flag(tp
, ENABLE_ASF
)) {
15596 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15597 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15598 (bmsr
& BMSR_LSTATUS
))
15599 goto skip_phy_reset
;
15601 err
= tg3_phy_reset(tp
);
15605 tg3_phy_set_wirespeed(tp
);
15607 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15608 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15609 tp
->link_config
.flowctrl
);
15611 tg3_writephy(tp
, MII_BMCR
,
15612 BMCR_ANENABLE
| BMCR_ANRESTART
);
15617 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15618 err
= tg3_init_5401phy_dsp(tp
);
15622 err
= tg3_init_5401phy_dsp(tp
);
15628 static void tg3_read_vpd(struct tg3
*tp
)
15631 unsigned int block_end
, rosize
, len
;
15635 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15639 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15641 goto out_not_found
;
15643 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15644 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15645 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15647 if (block_end
> vpdlen
)
15648 goto out_not_found
;
15650 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15651 PCI_VPD_RO_KEYWORD_MFR_ID
);
15653 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15655 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15656 if (j
+ len
> block_end
|| len
!= 4 ||
15657 memcmp(&vpd_data
[j
], "1028", 4))
15660 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15661 PCI_VPD_RO_KEYWORD_VENDOR0
);
15665 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15667 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15668 if (j
+ len
> block_end
)
15671 if (len
>= sizeof(tp
->fw_ver
))
15672 len
= sizeof(tp
->fw_ver
) - 1;
15673 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15674 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15679 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15680 PCI_VPD_RO_KEYWORD_PARTNO
);
15682 goto out_not_found
;
15684 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15686 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15687 if (len
> TG3_BPN_SIZE
||
15688 (len
+ i
) > vpdlen
)
15689 goto out_not_found
;
15691 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15695 if (tp
->board_part_number
[0])
15699 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15700 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15701 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15702 strcpy(tp
->board_part_number
, "BCM5717");
15703 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15704 strcpy(tp
->board_part_number
, "BCM5718");
15707 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15708 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15709 strcpy(tp
->board_part_number
, "BCM57780");
15710 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15711 strcpy(tp
->board_part_number
, "BCM57760");
15712 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15713 strcpy(tp
->board_part_number
, "BCM57790");
15714 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15715 strcpy(tp
->board_part_number
, "BCM57788");
15718 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15719 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15720 strcpy(tp
->board_part_number
, "BCM57761");
15721 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15722 strcpy(tp
->board_part_number
, "BCM57765");
15723 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15724 strcpy(tp
->board_part_number
, "BCM57781");
15725 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15726 strcpy(tp
->board_part_number
, "BCM57785");
15727 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15728 strcpy(tp
->board_part_number
, "BCM57791");
15729 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15730 strcpy(tp
->board_part_number
, "BCM57795");
15733 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15734 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15735 strcpy(tp
->board_part_number
, "BCM57762");
15736 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15737 strcpy(tp
->board_part_number
, "BCM57766");
15738 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15739 strcpy(tp
->board_part_number
, "BCM57782");
15740 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15741 strcpy(tp
->board_part_number
, "BCM57786");
15744 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15745 strcpy(tp
->board_part_number
, "BCM95906");
15748 strcpy(tp
->board_part_number
, "none");
15752 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15756 if (tg3_nvram_read(tp
, offset
, &val
) ||
15757 (val
& 0xfc000000) != 0x0c000000 ||
15758 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15765 static void tg3_read_bc_ver(struct tg3
*tp
)
15767 u32 val
, offset
, start
, ver_offset
;
15769 bool newver
= false;
15771 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15772 tg3_nvram_read(tp
, 0x4, &start
))
15775 offset
= tg3_nvram_logical_addr(tp
, offset
);
15777 if (tg3_nvram_read(tp
, offset
, &val
))
15780 if ((val
& 0xfc000000) == 0x0c000000) {
15781 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15788 dst_off
= strlen(tp
->fw_ver
);
15791 if (TG3_VER_SIZE
- dst_off
< 16 ||
15792 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15795 offset
= offset
+ ver_offset
- start
;
15796 for (i
= 0; i
< 16; i
+= 4) {
15798 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15801 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15806 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15809 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15810 TG3_NVM_BCVER_MAJSFT
;
15811 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15812 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15813 "v%d.%02d", major
, minor
);
15817 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15819 u32 val
, major
, minor
;
15821 /* Use native endian representation */
15822 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15825 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15826 TG3_NVM_HWSB_CFG1_MAJSFT
;
15827 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15828 TG3_NVM_HWSB_CFG1_MINSFT
;
15830 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15833 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15835 u32 offset
, major
, minor
, build
;
15837 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15839 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15842 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15843 case TG3_EEPROM_SB_REVISION_0
:
15844 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15846 case TG3_EEPROM_SB_REVISION_2
:
15847 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15849 case TG3_EEPROM_SB_REVISION_3
:
15850 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15852 case TG3_EEPROM_SB_REVISION_4
:
15853 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15855 case TG3_EEPROM_SB_REVISION_5
:
15856 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15858 case TG3_EEPROM_SB_REVISION_6
:
15859 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15865 if (tg3_nvram_read(tp
, offset
, &val
))
15868 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15869 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15870 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15871 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15872 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15874 if (minor
> 99 || build
> 26)
15877 offset
= strlen(tp
->fw_ver
);
15878 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15879 " v%d.%02d", major
, minor
);
15882 offset
= strlen(tp
->fw_ver
);
15883 if (offset
< TG3_VER_SIZE
- 1)
15884 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15888 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15890 u32 val
, offset
, start
;
15893 for (offset
= TG3_NVM_DIR_START
;
15894 offset
< TG3_NVM_DIR_END
;
15895 offset
+= TG3_NVM_DIRENT_SIZE
) {
15896 if (tg3_nvram_read(tp
, offset
, &val
))
15899 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15903 if (offset
== TG3_NVM_DIR_END
)
15906 if (!tg3_flag(tp
, 5705_PLUS
))
15907 start
= 0x08000000;
15908 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15911 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15912 !tg3_fw_img_is_valid(tp
, offset
) ||
15913 tg3_nvram_read(tp
, offset
+ 8, &val
))
15916 offset
+= val
- start
;
15918 vlen
= strlen(tp
->fw_ver
);
15920 tp
->fw_ver
[vlen
++] = ',';
15921 tp
->fw_ver
[vlen
++] = ' ';
15923 for (i
= 0; i
< 4; i
++) {
15925 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15928 offset
+= sizeof(v
);
15930 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15931 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15935 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15940 static void tg3_probe_ncsi(struct tg3
*tp
)
15944 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15945 if (apedata
!= APE_SEG_SIG_MAGIC
)
15948 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15949 if (!(apedata
& APE_FW_STATUS_READY
))
15952 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15953 tg3_flag_set(tp
, APE_HAS_NCSI
);
15956 static void tg3_read_dash_ver(struct tg3
*tp
)
15962 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15964 if (tg3_flag(tp
, APE_HAS_NCSI
))
15966 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15971 vlen
= strlen(tp
->fw_ver
);
15973 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15975 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15976 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15977 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15978 (apedata
& APE_FW_VERSION_BLDMSK
));
15981 static void tg3_read_otp_ver(struct tg3
*tp
)
15985 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15988 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15989 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15990 TG3_OTP_MAGIC0_VALID(val
)) {
15991 u64 val64
= (u64
) val
<< 32 | val2
;
15995 for (i
= 0; i
< 7; i
++) {
15996 if ((val64
& 0xff) == 0)
15998 ver
= val64
& 0xff;
16001 vlen
= strlen(tp
->fw_ver
);
16002 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
16006 static void tg3_read_fw_ver(struct tg3
*tp
)
16009 bool vpd_vers
= false;
16011 if (tp
->fw_ver
[0] != 0)
16014 if (tg3_flag(tp
, NO_NVRAM
)) {
16015 strcat(tp
->fw_ver
, "sb");
16016 tg3_read_otp_ver(tp
);
16020 if (tg3_nvram_read(tp
, 0, &val
))
16023 if (val
== TG3_EEPROM_MAGIC
)
16024 tg3_read_bc_ver(tp
);
16025 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
16026 tg3_read_sb_ver(tp
, val
);
16027 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
16028 tg3_read_hwsb_ver(tp
);
16030 if (tg3_flag(tp
, ENABLE_ASF
)) {
16031 if (tg3_flag(tp
, ENABLE_APE
)) {
16032 tg3_probe_ncsi(tp
);
16034 tg3_read_dash_ver(tp
);
16035 } else if (!vpd_vers
) {
16036 tg3_read_mgmtfw_ver(tp
);
16040 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
16043 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
16045 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
16046 return TG3_RX_RET_MAX_SIZE_5717
;
16047 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
16048 return TG3_RX_RET_MAX_SIZE_5700
;
16050 return TG3_RX_RET_MAX_SIZE_5705
;
16053 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
16054 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
16055 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
16056 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
16060 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
16062 struct pci_dev
*peer
;
16063 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
16065 for (func
= 0; func
< 8; func
++) {
16066 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
16067 if (peer
&& peer
!= tp
->pdev
)
16071 /* 5704 can be configured in single-port mode, set peer to
16072 * tp->pdev in that case.
16080 * We don't need to keep the refcount elevated; there's no way
16081 * to remove one half of this device without removing the other
16088 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16090 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16091 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16094 /* All devices that use the alternate
16095 * ASIC REV location have a CPMU.
16097 tg3_flag_set(tp
, CPMU_PRESENT
);
16099 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16100 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16101 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16102 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16103 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16104 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16105 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16106 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16107 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16108 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16109 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16110 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16111 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16112 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16113 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16114 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16115 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16116 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16117 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16118 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16119 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16120 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16121 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16123 reg
= TG3PCI_PRODID_ASICREV
;
16125 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16128 /* Wrong chip ID in 5752 A0. This code can be removed later
16129 * as A0 is not in production.
16131 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16132 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16134 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16135 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16137 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16138 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16139 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16140 tg3_flag_set(tp
, 5717_PLUS
);
16142 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16143 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16144 tg3_flag_set(tp
, 57765_CLASS
);
16146 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16147 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16148 tg3_flag_set(tp
, 57765_PLUS
);
16150 /* Intentionally exclude ASIC_REV_5906 */
16151 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16152 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16153 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16154 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16155 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16156 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16157 tg3_flag(tp
, 57765_PLUS
))
16158 tg3_flag_set(tp
, 5755_PLUS
);
16160 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16161 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16162 tg3_flag_set(tp
, 5780_CLASS
);
16164 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16165 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16166 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16167 tg3_flag(tp
, 5755_PLUS
) ||
16168 tg3_flag(tp
, 5780_CLASS
))
16169 tg3_flag_set(tp
, 5750_PLUS
);
16171 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16172 tg3_flag(tp
, 5750_PLUS
))
16173 tg3_flag_set(tp
, 5705_PLUS
);
16176 static bool tg3_10_100_only_device(struct tg3
*tp
,
16177 const struct pci_device_id
*ent
)
16179 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16181 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16182 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16183 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16186 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16187 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16188 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16198 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16201 u32 pci_state_reg
, grc_misc_cfg
;
16206 /* Force memory write invalidate off. If we leave it on,
16207 * then on 5700_BX chips we have to enable a workaround.
16208 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16209 * to match the cacheline size. The Broadcom driver have this
16210 * workaround but turns MWI off all the times so never uses
16211 * it. This seems to suggest that the workaround is insufficient.
16213 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16214 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16215 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16217 /* Important! -- Make sure register accesses are byteswapped
16218 * correctly. Also, for those chips that require it, make
16219 * sure that indirect register accesses are enabled before
16220 * the first operation.
16222 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16224 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16225 MISC_HOST_CTRL_CHIPREV
);
16226 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16227 tp
->misc_host_ctrl
);
16229 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16231 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16232 * we need to disable memory and use config. cycles
16233 * only to access all registers. The 5702/03 chips
16234 * can mistakenly decode the special cycles from the
16235 * ICH chipsets as memory write cycles, causing corruption
16236 * of register and memory space. Only certain ICH bridges
16237 * will drive special cycles with non-zero data during the
16238 * address phase which can fall within the 5703's address
16239 * range. This is not an ICH bug as the PCI spec allows
16240 * non-zero address during special cycles. However, only
16241 * these ICH bridges are known to drive non-zero addresses
16242 * during special cycles.
16244 * Since special cycles do not cross PCI bridges, we only
16245 * enable this workaround if the 5703 is on the secondary
16246 * bus of these ICH bridges.
16248 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16249 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16250 static struct tg3_dev_id
{
16254 } ich_chipsets
[] = {
16255 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16257 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16259 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16261 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16265 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16266 struct pci_dev
*bridge
= NULL
;
16268 while (pci_id
->vendor
!= 0) {
16269 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16275 if (pci_id
->rev
!= PCI_ANY_ID
) {
16276 if (bridge
->revision
> pci_id
->rev
)
16279 if (bridge
->subordinate
&&
16280 (bridge
->subordinate
->number
==
16281 tp
->pdev
->bus
->number
)) {
16282 tg3_flag_set(tp
, ICH_WORKAROUND
);
16283 pci_dev_put(bridge
);
16289 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16290 static struct tg3_dev_id
{
16293 } bridge_chipsets
[] = {
16294 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16295 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16298 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16299 struct pci_dev
*bridge
= NULL
;
16301 while (pci_id
->vendor
!= 0) {
16302 bridge
= pci_get_device(pci_id
->vendor
,
16309 if (bridge
->subordinate
&&
16310 (bridge
->subordinate
->number
<=
16311 tp
->pdev
->bus
->number
) &&
16312 (bridge
->subordinate
->busn_res
.end
>=
16313 tp
->pdev
->bus
->number
)) {
16314 tg3_flag_set(tp
, 5701_DMA_BUG
);
16315 pci_dev_put(bridge
);
16321 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16322 * DMA addresses > 40-bit. This bridge may have other additional
16323 * 57xx devices behind it in some 4-port NIC designs for example.
16324 * Any tg3 device found behind the bridge will also need the 40-bit
16327 if (tg3_flag(tp
, 5780_CLASS
)) {
16328 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16329 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16331 struct pci_dev
*bridge
= NULL
;
16334 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16335 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16337 if (bridge
&& bridge
->subordinate
&&
16338 (bridge
->subordinate
->number
<=
16339 tp
->pdev
->bus
->number
) &&
16340 (bridge
->subordinate
->busn_res
.end
>=
16341 tp
->pdev
->bus
->number
)) {
16342 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16343 pci_dev_put(bridge
);
16349 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16350 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16351 tp
->pdev_peer
= tg3_find_peer(tp
);
16353 /* Determine TSO capabilities */
16354 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16355 ; /* Do nothing. HW bug. */
16356 else if (tg3_flag(tp
, 57765_PLUS
))
16357 tg3_flag_set(tp
, HW_TSO_3
);
16358 else if (tg3_flag(tp
, 5755_PLUS
) ||
16359 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16360 tg3_flag_set(tp
, HW_TSO_2
);
16361 else if (tg3_flag(tp
, 5750_PLUS
)) {
16362 tg3_flag_set(tp
, HW_TSO_1
);
16363 tg3_flag_set(tp
, TSO_BUG
);
16364 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16365 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16366 tg3_flag_clear(tp
, TSO_BUG
);
16367 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16368 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16369 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16370 tg3_flag_set(tp
, FW_TSO
);
16371 tg3_flag_set(tp
, TSO_BUG
);
16372 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16373 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16375 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16378 /* Selectively allow TSO based on operating conditions */
16379 if (tg3_flag(tp
, HW_TSO_1
) ||
16380 tg3_flag(tp
, HW_TSO_2
) ||
16381 tg3_flag(tp
, HW_TSO_3
) ||
16382 tg3_flag(tp
, FW_TSO
)) {
16383 /* For firmware TSO, assume ASF is disabled.
16384 * We'll disable TSO later if we discover ASF
16385 * is enabled in tg3_get_eeprom_hw_cfg().
16387 tg3_flag_set(tp
, TSO_CAPABLE
);
16389 tg3_flag_clear(tp
, TSO_CAPABLE
);
16390 tg3_flag_clear(tp
, TSO_BUG
);
16391 tp
->fw_needed
= NULL
;
16394 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16395 tp
->fw_needed
= FIRMWARE_TG3
;
16397 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16398 tp
->fw_needed
= FIRMWARE_TG357766
;
16402 if (tg3_flag(tp
, 5750_PLUS
)) {
16403 tg3_flag_set(tp
, SUPPORT_MSI
);
16404 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16405 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16406 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16407 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16408 tp
->pdev_peer
== tp
->pdev
))
16409 tg3_flag_clear(tp
, SUPPORT_MSI
);
16411 if (tg3_flag(tp
, 5755_PLUS
) ||
16412 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16413 tg3_flag_set(tp
, 1SHOT_MSI
);
16416 if (tg3_flag(tp
, 57765_PLUS
)) {
16417 tg3_flag_set(tp
, SUPPORT_MSIX
);
16418 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16424 if (tp
->irq_max
> 1) {
16425 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16426 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16428 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16429 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16430 tp
->txq_max
= tp
->irq_max
- 1;
16433 if (tg3_flag(tp
, 5755_PLUS
) ||
16434 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16435 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16437 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16438 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16440 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16441 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16442 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16443 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16444 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16446 if (tg3_flag(tp
, 57765_PLUS
) &&
16447 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16448 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16450 if (!tg3_flag(tp
, 5705_PLUS
) ||
16451 tg3_flag(tp
, 5780_CLASS
) ||
16452 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16453 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16455 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16458 if (pci_is_pcie(tp
->pdev
)) {
16461 tg3_flag_set(tp
, PCI_EXPRESS
);
16463 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16464 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16465 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16466 tg3_flag_clear(tp
, HW_TSO_2
);
16467 tg3_flag_clear(tp
, TSO_CAPABLE
);
16469 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16470 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16471 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16472 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16473 tg3_flag_set(tp
, CLKREQ_BUG
);
16474 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16475 tg3_flag_set(tp
, L1PLLPD_EN
);
16477 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16478 /* BCM5785 devices are effectively PCIe devices, and should
16479 * follow PCIe codepaths, but do not have a PCIe capabilities
16482 tg3_flag_set(tp
, PCI_EXPRESS
);
16483 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16484 tg3_flag(tp
, 5780_CLASS
)) {
16485 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16486 if (!tp
->pcix_cap
) {
16487 dev_err(&tp
->pdev
->dev
,
16488 "Cannot find PCI-X capability, aborting\n");
16492 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16493 tg3_flag_set(tp
, PCIX_MODE
);
16496 /* If we have an AMD 762 or VIA K8T800 chipset, write
16497 * reordering to the mailbox registers done by the host
16498 * controller can cause major troubles. We read back from
16499 * every mailbox register write to force the writes to be
16500 * posted to the chip in order.
16502 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16503 !tg3_flag(tp
, PCI_EXPRESS
))
16504 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16506 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16507 &tp
->pci_cacheline_sz
);
16508 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16509 &tp
->pci_lat_timer
);
16510 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16511 tp
->pci_lat_timer
< 64) {
16512 tp
->pci_lat_timer
= 64;
16513 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16514 tp
->pci_lat_timer
);
16517 /* Important! -- It is critical that the PCI-X hw workaround
16518 * situation is decided before the first MMIO register access.
16520 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16521 /* 5700 BX chips need to have their TX producer index
16522 * mailboxes written twice to workaround a bug.
16524 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16526 /* If we are in PCI-X mode, enable register write workaround.
16528 * The workaround is to use indirect register accesses
16529 * for all chip writes not to mailbox registers.
16531 if (tg3_flag(tp
, PCIX_MODE
)) {
16534 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16536 /* The chip can have it's power management PCI config
16537 * space registers clobbered due to this bug.
16538 * So explicitly force the chip into D0 here.
16540 pci_read_config_dword(tp
->pdev
,
16541 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16543 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16544 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16545 pci_write_config_dword(tp
->pdev
,
16546 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16549 /* Also, force SERR#/PERR# in PCI command. */
16550 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16551 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16552 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16556 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16557 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16558 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16559 tg3_flag_set(tp
, PCI_32BIT
);
16561 /* Chip-specific fixup from Broadcom driver */
16562 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16563 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16564 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16565 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16568 /* Default fast path register access methods */
16569 tp
->read32
= tg3_read32
;
16570 tp
->write32
= tg3_write32
;
16571 tp
->read32_mbox
= tg3_read32
;
16572 tp
->write32_mbox
= tg3_write32
;
16573 tp
->write32_tx_mbox
= tg3_write32
;
16574 tp
->write32_rx_mbox
= tg3_write32
;
16576 /* Various workaround register access methods */
16577 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16578 tp
->write32
= tg3_write_indirect_reg32
;
16579 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16580 (tg3_flag(tp
, PCI_EXPRESS
) &&
16581 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16583 * Back to back register writes can cause problems on these
16584 * chips, the workaround is to read back all reg writes
16585 * except those to mailbox regs.
16587 * See tg3_write_indirect_reg32().
16589 tp
->write32
= tg3_write_flush_reg32
;
16592 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16593 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16594 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16595 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16598 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16599 tp
->read32
= tg3_read_indirect_reg32
;
16600 tp
->write32
= tg3_write_indirect_reg32
;
16601 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16602 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16603 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16604 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16609 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16610 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16611 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16613 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16614 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16615 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16616 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16617 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16620 if (tp
->write32
== tg3_write_indirect_reg32
||
16621 (tg3_flag(tp
, PCIX_MODE
) &&
16622 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16623 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16624 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16626 /* The memory arbiter has to be enabled in order for SRAM accesses
16627 * to succeed. Normally on powerup the tg3 chip firmware will make
16628 * sure it is enabled, but other entities such as system netboot
16629 * code might disable it.
16631 val
= tr32(MEMARB_MODE
);
16632 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16634 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16635 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16636 tg3_flag(tp
, 5780_CLASS
)) {
16637 if (tg3_flag(tp
, PCIX_MODE
)) {
16638 pci_read_config_dword(tp
->pdev
,
16639 tp
->pcix_cap
+ PCI_X_STATUS
,
16641 tp
->pci_fn
= val
& 0x7;
16643 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16644 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16645 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16646 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16647 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16648 val
= tr32(TG3_CPMU_STATUS
);
16650 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16651 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16653 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16654 TG3_CPMU_STATUS_FSHFT_5719
;
16657 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16658 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16659 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16662 /* Get eeprom hw config before calling tg3_set_power_state().
16663 * In particular, the TG3_FLAG_IS_NIC flag must be
16664 * determined before calling tg3_set_power_state() so that
16665 * we know whether or not to switch out of Vaux power.
16666 * When the flag is set, it means that GPIO1 is used for eeprom
16667 * write protect and also implies that it is a LOM where GPIOs
16668 * are not used to switch power.
16670 tg3_get_eeprom_hw_cfg(tp
);
16672 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16673 tg3_flag_clear(tp
, TSO_CAPABLE
);
16674 tg3_flag_clear(tp
, TSO_BUG
);
16675 tp
->fw_needed
= NULL
;
16678 if (tg3_flag(tp
, ENABLE_APE
)) {
16679 /* Allow reads and writes to the
16680 * APE register and memory space.
16682 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16683 PCISTATE_ALLOW_APE_SHMEM_WR
|
16684 PCISTATE_ALLOW_APE_PSPACE_WR
;
16685 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16688 tg3_ape_lock_init(tp
);
16689 tp
->ape_hb_interval
=
16690 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC
);
16693 /* Set up tp->grc_local_ctrl before calling
16694 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16695 * will bring 5700's external PHY out of reset.
16696 * It is also used as eeprom write protect on LOMs.
16698 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16699 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16700 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16701 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16702 GRC_LCLCTRL_GPIO_OUTPUT1
);
16703 /* Unused GPIO3 must be driven as output on 5752 because there
16704 * are no pull-up resistors on unused GPIO pins.
16706 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16707 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16709 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16710 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16711 tg3_flag(tp
, 57765_CLASS
))
16712 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16714 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16715 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16716 /* Turn off the debug UART. */
16717 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16718 if (tg3_flag(tp
, IS_NIC
))
16719 /* Keep VMain power. */
16720 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16721 GRC_LCLCTRL_GPIO_OUTPUT0
;
16724 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16725 tp
->grc_local_ctrl
|=
16726 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16728 /* Switch out of Vaux if it is a NIC */
16729 tg3_pwrsrc_switch_to_vmain(tp
);
16731 /* Derive initial jumbo mode from MTU assigned in
16732 * ether_setup() via the alloc_etherdev() call
16734 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16735 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16737 /* Determine WakeOnLan speed to use. */
16738 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16739 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16740 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16741 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16742 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16744 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16747 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16748 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16750 /* A few boards don't want Ethernet@WireSpeed phy feature */
16751 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16752 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16753 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16754 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16755 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16756 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16757 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16759 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16760 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16761 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16762 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16763 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16765 if (tg3_flag(tp
, 5705_PLUS
) &&
16766 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16767 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16768 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16769 !tg3_flag(tp
, 57765_PLUS
)) {
16770 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16771 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16772 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16773 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16774 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16775 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16776 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16777 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16778 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16780 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16783 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16784 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16785 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16786 if (tp
->phy_otp
== 0)
16787 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16790 if (tg3_flag(tp
, CPMU_PRESENT
))
16791 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16793 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16795 tp
->coalesce_mode
= 0;
16796 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16797 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16798 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16800 /* Set these bits to enable statistics workaround. */
16801 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16802 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16803 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16804 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16805 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16806 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16809 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16810 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16811 tg3_flag_set(tp
, USE_PHYLIB
);
16813 err
= tg3_mdio_init(tp
);
16817 /* Initialize data/descriptor byte/word swapping. */
16818 val
= tr32(GRC_MODE
);
16819 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16820 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16821 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16822 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16823 GRC_MODE_B2HRX_ENABLE
|
16824 GRC_MODE_HTX2B_ENABLE
|
16825 GRC_MODE_HOST_STACKUP
);
16827 val
&= GRC_MODE_HOST_STACKUP
;
16829 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16831 tg3_switch_clocks(tp
);
16833 /* Clear this out for sanity. */
16834 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16836 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16837 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16839 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16841 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16842 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16843 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16844 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16845 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16846 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16847 void __iomem
*sram_base
;
16849 /* Write some dummy words into the SRAM status block
16850 * area, see if it reads back correctly. If the return
16851 * value is bad, force enable the PCIX workaround.
16853 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16855 writel(0x00000000, sram_base
);
16856 writel(0x00000000, sram_base
+ 4);
16857 writel(0xffffffff, sram_base
+ 4);
16858 if (readl(sram_base
) != 0x00000000)
16859 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16864 tg3_nvram_init(tp
);
16866 /* If the device has an NVRAM, no need to load patch firmware */
16867 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16868 !tg3_flag(tp
, NO_NVRAM
))
16869 tp
->fw_needed
= NULL
;
16871 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16872 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16874 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16875 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16876 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16877 tg3_flag_set(tp
, IS_5788
);
16879 if (!tg3_flag(tp
, IS_5788
) &&
16880 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16881 tg3_flag_set(tp
, TAGGED_STATUS
);
16882 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16883 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16884 HOSTCC_MODE_CLRTICK_TXBD
);
16886 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16887 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16888 tp
->misc_host_ctrl
);
16891 /* Preserve the APE MAC_MODE bits */
16892 if (tg3_flag(tp
, ENABLE_APE
))
16893 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16897 if (tg3_10_100_only_device(tp
, ent
))
16898 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16900 err
= tg3_phy_probe(tp
);
16902 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16903 /* ... but do not return immediately ... */
16908 tg3_read_fw_ver(tp
);
16910 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16911 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16913 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16914 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16916 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16919 /* 5700 {AX,BX} chips have a broken status block link
16920 * change bit implementation, so we must use the
16921 * status register in those cases.
16923 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16924 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16926 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16928 /* The led_ctrl is set during tg3_phy_probe, here we might
16929 * have to force the link status polling mechanism based
16930 * upon subsystem IDs.
16932 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16933 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16934 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16935 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16936 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16939 /* For all SERDES we poll the MAC status register. */
16940 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16941 tg3_flag_set(tp
, POLL_SERDES
);
16943 tg3_flag_clear(tp
, POLL_SERDES
);
16945 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16946 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16948 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16949 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16950 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16951 tg3_flag(tp
, PCIX_MODE
)) {
16952 tp
->rx_offset
= NET_SKB_PAD
;
16953 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16954 tp
->rx_copy_thresh
= ~(u16
)0;
16958 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16959 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16960 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16962 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16964 /* Increment the rx prod index on the rx std ring by at most
16965 * 8 for these chips to workaround hw errata.
16967 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16968 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16969 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16970 tp
->rx_std_max_post
= 8;
16972 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16973 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16974 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16979 static int tg3_get_device_address(struct tg3
*tp
)
16981 struct net_device
*dev
= tp
->dev
;
16982 u32 hi
, lo
, mac_offset
;
16986 if (!eth_platform_get_mac_address(&tp
->pdev
->dev
, dev
->dev_addr
))
16989 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16990 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16991 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16996 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16997 tg3_flag(tp
, 5780_CLASS
)) {
16998 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
17000 if (tg3_nvram_lock(tp
))
17001 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
17003 tg3_nvram_unlock(tp
);
17004 } else if (tg3_flag(tp
, 5717_PLUS
)) {
17005 if (tp
->pci_fn
& 1)
17007 if (tp
->pci_fn
> 1)
17008 mac_offset
+= 0x18c;
17009 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
17012 /* First try to get it from MAC address mailbox. */
17013 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
17014 if ((hi
>> 16) == 0x484b) {
17015 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17016 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
17018 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
17019 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17020 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17021 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17022 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
17024 /* Some old bootcode may report a 0 MAC address in SRAM */
17025 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
17028 /* Next, try NVRAM. */
17029 if (!tg3_flag(tp
, NO_NVRAM
) &&
17030 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
17031 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
17032 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
17033 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
17035 /* Finally just fetch it out of the MAC control regs. */
17037 hi
= tr32(MAC_ADDR_0_HIGH
);
17038 lo
= tr32(MAC_ADDR_0_LOW
);
17040 dev
->dev_addr
[5] = lo
& 0xff;
17041 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17042 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17043 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17044 dev
->dev_addr
[1] = hi
& 0xff;
17045 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17049 if (!is_valid_ether_addr(&dev
->dev_addr
[0]))
17054 #define BOUNDARY_SINGLE_CACHELINE 1
17055 #define BOUNDARY_MULTI_CACHELINE 2
17057 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17059 int cacheline_size
;
17063 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17065 cacheline_size
= 1024;
17067 cacheline_size
= (int) byte
* 4;
17069 /* On 5703 and later chips, the boundary bits have no
17072 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17073 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17074 !tg3_flag(tp
, PCI_EXPRESS
))
17077 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17078 goal
= BOUNDARY_MULTI_CACHELINE
;
17080 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17081 goal
= BOUNDARY_SINGLE_CACHELINE
;
17087 if (tg3_flag(tp
, 57765_PLUS
)) {
17088 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17095 /* PCI controllers on most RISC systems tend to disconnect
17096 * when a device tries to burst across a cache-line boundary.
17097 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17099 * Unfortunately, for PCI-E there are only limited
17100 * write-side controls for this, and thus for reads
17101 * we will still get the disconnects. We'll also waste
17102 * these PCI cycles for both read and write for chips
17103 * other than 5700 and 5701 which do not implement the
17106 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17107 switch (cacheline_size
) {
17112 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17113 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17114 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17116 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17117 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17122 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17123 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17127 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17128 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17131 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17132 switch (cacheline_size
) {
17136 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17137 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17138 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17144 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17145 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17149 switch (cacheline_size
) {
17151 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17152 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17153 DMA_RWCTRL_WRITE_BNDRY_16
);
17158 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17159 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17160 DMA_RWCTRL_WRITE_BNDRY_32
);
17165 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17166 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17167 DMA_RWCTRL_WRITE_BNDRY_64
);
17172 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17173 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17174 DMA_RWCTRL_WRITE_BNDRY_128
);
17179 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17180 DMA_RWCTRL_WRITE_BNDRY_256
);
17183 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17184 DMA_RWCTRL_WRITE_BNDRY_512
);
17188 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17189 DMA_RWCTRL_WRITE_BNDRY_1024
);
17198 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17199 int size
, bool to_device
)
17201 struct tg3_internal_buffer_desc test_desc
;
17202 u32 sram_dma_descs
;
17205 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17207 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17208 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17209 tw32(RDMAC_STATUS
, 0);
17210 tw32(WDMAC_STATUS
, 0);
17212 tw32(BUFMGR_MODE
, 0);
17213 tw32(FTQ_RESET
, 0);
17215 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17216 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17217 test_desc
.nic_mbuf
= 0x00002100;
17218 test_desc
.len
= size
;
17221 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17222 * the *second* time the tg3 driver was getting loaded after an
17225 * Broadcom tells me:
17226 * ...the DMA engine is connected to the GRC block and a DMA
17227 * reset may affect the GRC block in some unpredictable way...
17228 * The behavior of resets to individual blocks has not been tested.
17230 * Broadcom noted the GRC reset will also reset all sub-components.
17233 test_desc
.cqid_sqid
= (13 << 8) | 2;
17235 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17238 test_desc
.cqid_sqid
= (16 << 8) | 7;
17240 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17243 test_desc
.flags
= 0x00000005;
17245 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17248 val
= *(((u32
*)&test_desc
) + i
);
17249 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17250 sram_dma_descs
+ (i
* sizeof(u32
)));
17251 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17253 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17256 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17258 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17261 for (i
= 0; i
< 40; i
++) {
17265 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17267 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17268 if ((val
& 0xffff) == sram_dma_descs
) {
17279 #define TEST_BUFFER_SIZE 0x2000
17281 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17282 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17286 static int tg3_test_dma(struct tg3
*tp
)
17288 dma_addr_t buf_dma
;
17289 u32
*buf
, saved_dma_rwctrl
;
17292 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17293 &buf_dma
, GFP_KERNEL
);
17299 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17300 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17302 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17304 if (tg3_flag(tp
, 57765_PLUS
))
17307 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17308 /* DMA read watermark not used on PCIE */
17309 tp
->dma_rwctrl
|= 0x00180000;
17310 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17311 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17312 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17313 tp
->dma_rwctrl
|= 0x003f0000;
17315 tp
->dma_rwctrl
|= 0x003f000f;
17317 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17318 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17319 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17320 u32 read_water
= 0x7;
17322 /* If the 5704 is behind the EPB bridge, we can
17323 * do the less restrictive ONE_DMA workaround for
17324 * better performance.
17326 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17327 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17328 tp
->dma_rwctrl
|= 0x8000;
17329 else if (ccval
== 0x6 || ccval
== 0x7)
17330 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17332 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17334 /* Set bit 23 to enable PCIX hw bug fix */
17336 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17337 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17339 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17340 /* 5780 always in PCIX mode */
17341 tp
->dma_rwctrl
|= 0x00144000;
17342 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17343 /* 5714 always in PCIX mode */
17344 tp
->dma_rwctrl
|= 0x00148000;
17346 tp
->dma_rwctrl
|= 0x001b000f;
17349 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17350 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17352 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17353 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17354 tp
->dma_rwctrl
&= 0xfffffff0;
17356 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17357 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17358 /* Remove this if it causes problems for some boards. */
17359 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17361 /* On 5700/5701 chips, we need to set this bit.
17362 * Otherwise the chip will issue cacheline transactions
17363 * to streamable DMA memory with not all the byte
17364 * enables turned on. This is an error on several
17365 * RISC PCI controllers, in particular sparc64.
17367 * On 5703/5704 chips, this bit has been reassigned
17368 * a different meaning. In particular, it is used
17369 * on those chips to enable a PCI-X workaround.
17371 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17374 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17377 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17378 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17381 /* It is best to perform DMA test with maximum write burst size
17382 * to expose the 5700/5701 write DMA bug.
17384 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17385 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17386 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17391 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17394 /* Send the buffer to the chip. */
17395 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17397 dev_err(&tp
->pdev
->dev
,
17398 "%s: Buffer write failed. err = %d\n",
17403 /* Now read it back. */
17404 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17406 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17407 "err = %d\n", __func__
, ret
);
17412 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17416 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17417 DMA_RWCTRL_WRITE_BNDRY_16
) {
17418 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17419 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17420 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17423 dev_err(&tp
->pdev
->dev
,
17424 "%s: Buffer corrupted on read back! "
17425 "(%d != %d)\n", __func__
, p
[i
], i
);
17431 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17437 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17438 DMA_RWCTRL_WRITE_BNDRY_16
) {
17439 /* DMA test passed without adjusting DMA boundary,
17440 * now look for chipsets that are known to expose the
17441 * DMA bug without failing the test.
17443 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17444 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17445 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17447 /* Safe to use the calculated DMA boundary. */
17448 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17451 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17455 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17460 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17462 if (tg3_flag(tp
, 57765_PLUS
)) {
17463 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17464 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17465 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17466 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17467 tp
->bufmgr_config
.mbuf_high_water
=
17468 DEFAULT_MB_HIGH_WATER_57765
;
17470 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17471 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17472 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17473 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17474 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17475 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17476 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17477 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17478 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17479 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17480 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17481 tp
->bufmgr_config
.mbuf_high_water
=
17482 DEFAULT_MB_HIGH_WATER_5705
;
17483 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17484 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17485 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17486 tp
->bufmgr_config
.mbuf_high_water
=
17487 DEFAULT_MB_HIGH_WATER_5906
;
17490 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17491 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17492 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17493 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17494 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17495 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17497 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17498 DEFAULT_MB_RDMA_LOW_WATER
;
17499 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17500 DEFAULT_MB_MACRX_LOW_WATER
;
17501 tp
->bufmgr_config
.mbuf_high_water
=
17502 DEFAULT_MB_HIGH_WATER
;
17504 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17505 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17506 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17507 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17508 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17509 DEFAULT_MB_HIGH_WATER_JUMBO
;
17512 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17513 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17516 static char *tg3_phy_string(struct tg3
*tp
)
17518 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17519 case TG3_PHY_ID_BCM5400
: return "5400";
17520 case TG3_PHY_ID_BCM5401
: return "5401";
17521 case TG3_PHY_ID_BCM5411
: return "5411";
17522 case TG3_PHY_ID_BCM5701
: return "5701";
17523 case TG3_PHY_ID_BCM5703
: return "5703";
17524 case TG3_PHY_ID_BCM5704
: return "5704";
17525 case TG3_PHY_ID_BCM5705
: return "5705";
17526 case TG3_PHY_ID_BCM5750
: return "5750";
17527 case TG3_PHY_ID_BCM5752
: return "5752";
17528 case TG3_PHY_ID_BCM5714
: return "5714";
17529 case TG3_PHY_ID_BCM5780
: return "5780";
17530 case TG3_PHY_ID_BCM5755
: return "5755";
17531 case TG3_PHY_ID_BCM5787
: return "5787";
17532 case TG3_PHY_ID_BCM5784
: return "5784";
17533 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17534 case TG3_PHY_ID_BCM5906
: return "5906";
17535 case TG3_PHY_ID_BCM5761
: return "5761";
17536 case TG3_PHY_ID_BCM5718C
: return "5718C";
17537 case TG3_PHY_ID_BCM5718S
: return "5718S";
17538 case TG3_PHY_ID_BCM57765
: return "57765";
17539 case TG3_PHY_ID_BCM5719C
: return "5719C";
17540 case TG3_PHY_ID_BCM5720C
: return "5720C";
17541 case TG3_PHY_ID_BCM5762
: return "5762C";
17542 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17543 case 0: return "serdes";
17544 default: return "unknown";
17548 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17550 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17551 strcpy(str
, "PCI Express");
17553 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17554 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17556 strcpy(str
, "PCIX:");
17558 if ((clock_ctrl
== 7) ||
17559 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17560 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17561 strcat(str
, "133MHz");
17562 else if (clock_ctrl
== 0)
17563 strcat(str
, "33MHz");
17564 else if (clock_ctrl
== 2)
17565 strcat(str
, "50MHz");
17566 else if (clock_ctrl
== 4)
17567 strcat(str
, "66MHz");
17568 else if (clock_ctrl
== 6)
17569 strcat(str
, "100MHz");
17571 strcpy(str
, "PCI:");
17572 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17573 strcat(str
, "66MHz");
17575 strcat(str
, "33MHz");
17577 if (tg3_flag(tp
, PCI_32BIT
))
17578 strcat(str
, ":32-bit");
17580 strcat(str
, ":64-bit");
17584 static void tg3_init_coal(struct tg3
*tp
)
17586 struct ethtool_coalesce
*ec
= &tp
->coal
;
17588 memset(ec
, 0, sizeof(*ec
));
17589 ec
->cmd
= ETHTOOL_GCOALESCE
;
17590 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17591 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17592 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17593 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17594 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17595 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17596 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17597 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17598 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17600 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17601 HOSTCC_MODE_CLRTICK_TXBD
)) {
17602 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17603 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17604 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17605 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17608 if (tg3_flag(tp
, 5705_PLUS
)) {
17609 ec
->rx_coalesce_usecs_irq
= 0;
17610 ec
->tx_coalesce_usecs_irq
= 0;
17611 ec
->stats_block_coalesce_usecs
= 0;
17615 static int tg3_init_one(struct pci_dev
*pdev
,
17616 const struct pci_device_id
*ent
)
17618 struct net_device
*dev
;
17621 u32 sndmbx
, rcvmbx
, intmbx
;
17623 u64 dma_mask
, persist_dma_mask
;
17624 netdev_features_t features
= 0;
17626 err
= pci_enable_device(pdev
);
17628 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17632 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17634 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17635 goto err_out_disable_pdev
;
17638 pci_set_master(pdev
);
17640 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17643 goto err_out_free_res
;
17646 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17648 tp
= netdev_priv(dev
);
17651 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17652 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17654 tp
->pcierr_recovery
= false;
17657 tp
->msg_enable
= tg3_debug
;
17659 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17661 if (pdev_is_ssb_gige_core(pdev
)) {
17662 tg3_flag_set(tp
, IS_SSB_CORE
);
17663 if (ssb_gige_must_flush_posted_writes(pdev
))
17664 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17665 if (ssb_gige_one_dma_at_once(pdev
))
17666 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17667 if (ssb_gige_have_roboswitch(pdev
)) {
17668 tg3_flag_set(tp
, USE_PHYLIB
);
17669 tg3_flag_set(tp
, ROBOSWITCH
);
17671 if (ssb_gige_is_rgmii(pdev
))
17672 tg3_flag_set(tp
, RGMII_MODE
);
17675 /* The word/byte swap controls here control register access byte
17676 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17679 tp
->misc_host_ctrl
=
17680 MISC_HOST_CTRL_MASK_PCI_INT
|
17681 MISC_HOST_CTRL_WORD_SWAP
|
17682 MISC_HOST_CTRL_INDIR_ACCESS
|
17683 MISC_HOST_CTRL_PCISTATE_RW
;
17685 /* The NONFRM (non-frame) byte/word swap controls take effect
17686 * on descriptor entries, anything which isn't packet data.
17688 * The StrongARM chips on the board (one for tx, one for rx)
17689 * are running in big-endian mode.
17691 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17692 GRC_MODE_WSWAP_NONFRM_DATA
);
17693 #ifdef __BIG_ENDIAN
17694 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17696 spin_lock_init(&tp
->lock
);
17697 spin_lock_init(&tp
->indirect_lock
);
17698 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17700 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17702 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17704 goto err_out_free_dev
;
17707 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17708 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17709 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17710 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17711 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17712 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17713 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17714 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17715 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17716 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17717 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17718 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17719 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17720 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17721 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17722 tg3_flag_set(tp
, ENABLE_APE
);
17723 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17724 if (!tp
->aperegs
) {
17725 dev_err(&pdev
->dev
,
17726 "Cannot map APE registers, aborting\n");
17728 goto err_out_iounmap
;
17732 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17733 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17735 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17736 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17737 dev
->netdev_ops
= &tg3_netdev_ops
;
17738 dev
->irq
= pdev
->irq
;
17740 err
= tg3_get_invariants(tp
, ent
);
17742 dev_err(&pdev
->dev
,
17743 "Problem fetching invariants of chip, aborting\n");
17744 goto err_out_apeunmap
;
17747 /* The EPB bridge inside 5714, 5715, and 5780 and any
17748 * device behind the EPB cannot support DMA addresses > 40-bit.
17749 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17750 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17751 * do DMA address check in tg3_start_xmit().
17753 if (tg3_flag(tp
, IS_5788
))
17754 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17755 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17756 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17757 #ifdef CONFIG_HIGHMEM
17758 dma_mask
= DMA_BIT_MASK(64);
17761 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17763 /* Configure DMA attributes. */
17764 if (dma_mask
> DMA_BIT_MASK(32)) {
17765 err
= pci_set_dma_mask(pdev
, dma_mask
);
17767 features
|= NETIF_F_HIGHDMA
;
17768 err
= pci_set_consistent_dma_mask(pdev
,
17771 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17772 "DMA for consistent allocations\n");
17773 goto err_out_apeunmap
;
17777 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17778 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17780 dev_err(&pdev
->dev
,
17781 "No usable DMA configuration, aborting\n");
17782 goto err_out_apeunmap
;
17786 tg3_init_bufmgr_config(tp
);
17788 /* 5700 B0 chips do not support checksumming correctly due
17789 * to hardware bugs.
17791 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17792 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17794 if (tg3_flag(tp
, 5755_PLUS
))
17795 features
|= NETIF_F_IPV6_CSUM
;
17798 /* TSO is on by default on chips that support hardware TSO.
17799 * Firmware TSO on older chips gives lower performance, so it
17800 * is off by default, but can be enabled using ethtool.
17802 if ((tg3_flag(tp
, HW_TSO_1
) ||
17803 tg3_flag(tp
, HW_TSO_2
) ||
17804 tg3_flag(tp
, HW_TSO_3
)) &&
17805 (features
& NETIF_F_IP_CSUM
))
17806 features
|= NETIF_F_TSO
;
17807 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17808 if (features
& NETIF_F_IPV6_CSUM
)
17809 features
|= NETIF_F_TSO6
;
17810 if (tg3_flag(tp
, HW_TSO_3
) ||
17811 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17812 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17813 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17814 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17815 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17816 features
|= NETIF_F_TSO_ECN
;
17819 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17820 NETIF_F_HW_VLAN_CTAG_RX
;
17821 dev
->vlan_features
|= features
;
17824 * Add loopback capability only for a subset of devices that support
17825 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17826 * loopback for the remaining devices.
17828 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17829 !tg3_flag(tp
, CPMU_PRESENT
))
17830 /* Add the loopback capability */
17831 features
|= NETIF_F_LOOPBACK
;
17833 dev
->hw_features
|= features
;
17834 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17836 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17837 dev
->min_mtu
= TG3_MIN_MTU
;
17838 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17840 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17841 !tg3_flag(tp
, TSO_CAPABLE
) &&
17842 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17843 tg3_flag_set(tp
, MAX_RXPEND_64
);
17844 tp
->rx_pending
= 63;
17847 err
= tg3_get_device_address(tp
);
17849 dev_err(&pdev
->dev
,
17850 "Could not obtain valid ethernet address, aborting\n");
17851 goto err_out_apeunmap
;
17854 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17855 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17856 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17857 for (i
= 0; i
< tp
->irq_max
; i
++) {
17858 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17861 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17863 tnapi
->int_mbox
= intmbx
;
17869 tnapi
->consmbox
= rcvmbx
;
17870 tnapi
->prodmbox
= sndmbx
;
17873 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17875 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17877 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17881 * If we support MSIX, we'll be using RSS. If we're using
17882 * RSS, the first vector only handles link interrupts and the
17883 * remaining vectors handle rx and tx interrupts. Reuse the
17884 * mailbox values for the next iteration. The values we setup
17885 * above are still useful for the single vectored mode.
17899 * Reset chip in case UNDI or EFI driver did not shutdown
17900 * DMA self test will enable WDMAC and we'll see (spurious)
17901 * pending DMA on the PCI bus at that point.
17903 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17904 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17905 tg3_full_lock(tp
, 0);
17906 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17907 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17908 tg3_full_unlock(tp
);
17911 err
= tg3_test_dma(tp
);
17913 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17914 goto err_out_apeunmap
;
17919 pci_set_drvdata(pdev
, dev
);
17921 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17922 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17923 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17924 tg3_flag_set(tp
, PTP_CAPABLE
);
17926 tg3_timer_init(tp
);
17928 tg3_carrier_off(tp
);
17930 err
= register_netdev(dev
);
17932 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17933 goto err_out_apeunmap
;
17936 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17938 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17940 if (IS_ERR(tp
->ptp_clock
))
17941 tp
->ptp_clock
= NULL
;
17944 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17945 tp
->board_part_number
,
17946 tg3_chip_rev_id(tp
),
17947 tg3_bus_string(tp
, str
),
17950 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
17953 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17954 ethtype
= "10/100Base-TX";
17955 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17956 ethtype
= "1000Base-SX";
17958 ethtype
= "10/100/1000Base-T";
17960 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17961 "(WireSpeed[%d], EEE[%d])\n",
17962 tg3_phy_string(tp
), ethtype
,
17963 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17964 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17967 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17968 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17969 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17970 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17971 tg3_flag(tp
, ENABLE_ASF
) != 0,
17972 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17973 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17975 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17976 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17978 pci_save_state(pdev
);
17984 iounmap(tp
->aperegs
);
17985 tp
->aperegs
= NULL
;
17998 pci_release_regions(pdev
);
18000 err_out_disable_pdev
:
18001 if (pci_is_enabled(pdev
))
18002 pci_disable_device(pdev
);
18006 static void tg3_remove_one(struct pci_dev
*pdev
)
18008 struct net_device
*dev
= pci_get_drvdata(pdev
);
18011 struct tg3
*tp
= netdev_priv(dev
);
18015 release_firmware(tp
->fw
);
18017 tg3_reset_task_cancel(tp
);
18019 if (tg3_flag(tp
, USE_PHYLIB
)) {
18024 unregister_netdev(dev
);
18026 iounmap(tp
->aperegs
);
18027 tp
->aperegs
= NULL
;
18034 pci_release_regions(pdev
);
18035 pci_disable_device(pdev
);
18039 #ifdef CONFIG_PM_SLEEP
18040 static int tg3_suspend(struct device
*device
)
18042 struct net_device
*dev
= dev_get_drvdata(device
);
18043 struct tg3
*tp
= netdev_priv(dev
);
18048 if (!netif_running(dev
))
18051 tg3_reset_task_cancel(tp
);
18053 tg3_netif_stop(tp
);
18055 tg3_timer_stop(tp
);
18057 tg3_full_lock(tp
, 1);
18058 tg3_disable_ints(tp
);
18059 tg3_full_unlock(tp
);
18061 netif_device_detach(dev
);
18063 tg3_full_lock(tp
, 0);
18064 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18065 tg3_flag_clear(tp
, INIT_COMPLETE
);
18066 tg3_full_unlock(tp
);
18068 err
= tg3_power_down_prepare(tp
);
18072 tg3_full_lock(tp
, 0);
18074 tg3_flag_set(tp
, INIT_COMPLETE
);
18075 err2
= tg3_restart_hw(tp
, true);
18079 tg3_timer_start(tp
);
18081 netif_device_attach(dev
);
18082 tg3_netif_start(tp
);
18085 tg3_full_unlock(tp
);
18096 static int tg3_resume(struct device
*device
)
18098 struct net_device
*dev
= dev_get_drvdata(device
);
18099 struct tg3
*tp
= netdev_priv(dev
);
18104 if (!netif_running(dev
))
18107 netif_device_attach(dev
);
18109 tg3_full_lock(tp
, 0);
18111 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18113 tg3_flag_set(tp
, INIT_COMPLETE
);
18114 err
= tg3_restart_hw(tp
,
18115 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18119 tg3_timer_start(tp
);
18121 tg3_netif_start(tp
);
18124 tg3_full_unlock(tp
);
18133 #endif /* CONFIG_PM_SLEEP */
18135 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18137 static void tg3_shutdown(struct pci_dev
*pdev
)
18139 struct net_device
*dev
= pci_get_drvdata(pdev
);
18140 struct tg3
*tp
= netdev_priv(dev
);
18143 netif_device_detach(dev
);
18145 if (netif_running(dev
))
18148 if (system_state
== SYSTEM_POWER_OFF
)
18149 tg3_power_down(tp
);
18155 * tg3_io_error_detected - called when PCI error is detected
18156 * @pdev: Pointer to PCI device
18157 * @state: The current pci connection state
18159 * This function is called after a PCI bus error affecting
18160 * this device has been detected.
18162 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18163 pci_channel_state_t state
)
18165 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18166 struct tg3
*tp
= netdev_priv(netdev
);
18167 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18169 netdev_info(netdev
, "PCI I/O error detected\n");
18173 /* We probably don't have netdev yet */
18174 if (!netdev
|| !netif_running(netdev
))
18177 /* We needn't recover from permanent error */
18178 if (state
== pci_channel_io_frozen
)
18179 tp
->pcierr_recovery
= true;
18183 tg3_netif_stop(tp
);
18185 tg3_timer_stop(tp
);
18187 /* Want to make sure that the reset task doesn't run */
18188 tg3_reset_task_cancel(tp
);
18190 netif_device_detach(netdev
);
18192 /* Clean up software state, even if MMIO is blocked */
18193 tg3_full_lock(tp
, 0);
18194 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18195 tg3_full_unlock(tp
);
18198 if (state
== pci_channel_io_perm_failure
) {
18200 tg3_napi_enable(tp
);
18203 err
= PCI_ERS_RESULT_DISCONNECT
;
18205 pci_disable_device(pdev
);
18214 * tg3_io_slot_reset - called after the pci bus has been reset.
18215 * @pdev: Pointer to PCI device
18217 * Restart the card from scratch, as if from a cold-boot.
18218 * At this point, the card has exprienced a hard reset,
18219 * followed by fixups by BIOS, and has its config space
18220 * set up identically to what it was at cold boot.
18222 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18224 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18225 struct tg3
*tp
= netdev_priv(netdev
);
18226 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18231 if (pci_enable_device(pdev
)) {
18232 dev_err(&pdev
->dev
,
18233 "Cannot re-enable PCI device after reset.\n");
18237 pci_set_master(pdev
);
18238 pci_restore_state(pdev
);
18239 pci_save_state(pdev
);
18241 if (!netdev
|| !netif_running(netdev
)) {
18242 rc
= PCI_ERS_RESULT_RECOVERED
;
18246 err
= tg3_power_up(tp
);
18250 rc
= PCI_ERS_RESULT_RECOVERED
;
18253 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18254 tg3_napi_enable(tp
);
18263 * tg3_io_resume - called when traffic can start flowing again.
18264 * @pdev: Pointer to PCI device
18266 * This callback is called when the error recovery driver tells
18267 * us that its OK to resume normal operation.
18269 static void tg3_io_resume(struct pci_dev
*pdev
)
18271 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18272 struct tg3
*tp
= netdev_priv(netdev
);
18277 if (!netdev
|| !netif_running(netdev
))
18280 tg3_full_lock(tp
, 0);
18281 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18282 tg3_flag_set(tp
, INIT_COMPLETE
);
18283 err
= tg3_restart_hw(tp
, true);
18285 tg3_full_unlock(tp
);
18286 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18290 netif_device_attach(netdev
);
18292 tg3_timer_start(tp
);
18294 tg3_netif_start(tp
);
18296 tg3_full_unlock(tp
);
18301 tp
->pcierr_recovery
= false;
18305 static const struct pci_error_handlers tg3_err_handler
= {
18306 .error_detected
= tg3_io_error_detected
,
18307 .slot_reset
= tg3_io_slot_reset
,
18308 .resume
= tg3_io_resume
18311 static struct pci_driver tg3_driver
= {
18312 .name
= DRV_MODULE_NAME
,
18313 .id_table
= tg3_pci_tbl
,
18314 .probe
= tg3_init_one
,
18315 .remove
= tg3_remove_one
,
18316 .err_handler
= &tg3_err_handler
,
18317 .driver
.pm
= &tg3_pm_ops
,
18318 .shutdown
= tg3_shutdown
,
18321 module_pci_driver(tg3_driver
);