2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
41 #include <linux/if_vlan.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
52 #include <net/checksum.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
63 #include <asm/idprom.h>
72 /* Functions & macros to verify TG3_FLAGS types */
74 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
76 return test_bit(flag
, bits
);
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
86 clear_bit(flag
, bits
);
89 #define tg3_flag(tp, flag) \
90 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag) \
92 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag) \
94 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define DRV_MODULE_NAME "tg3"
98 #define TG3_MIN_NUM 137
99 #define DRV_MODULE_VERSION \
100 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE "May 11, 2014"
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223 static char version
[] =
224 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION
);
230 MODULE_FIRMWARE(FIRMWARE_TG3
);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
234 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug
, int, 0);
236 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
241 static const struct pci_device_id tg3_pci_tbl
[] = {
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
261 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
262 TG3_DRV_DATA_FLAG_5705_10_100
},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
268 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
269 TG3_DRV_DATA_FLAG_5705_10_100
},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
276 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
282 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
290 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
291 PCI_VENDOR_ID_LENOVO
,
292 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
293 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
296 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
315 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
316 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
317 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
318 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
319 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
324 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
334 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
336 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
355 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
356 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
360 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
362 static const struct {
363 const char string
[ETH_GSTRING_LEN
];
364 } ethtool_stats_keys
[] = {
367 { "rx_ucast_packets" },
368 { "rx_mcast_packets" },
369 { "rx_bcast_packets" },
371 { "rx_align_errors" },
372 { "rx_xon_pause_rcvd" },
373 { "rx_xoff_pause_rcvd" },
374 { "rx_mac_ctrl_rcvd" },
375 { "rx_xoff_entered" },
376 { "rx_frame_too_long_errors" },
378 { "rx_undersize_packets" },
379 { "rx_in_length_errors" },
380 { "rx_out_length_errors" },
381 { "rx_64_or_less_octet_packets" },
382 { "rx_65_to_127_octet_packets" },
383 { "rx_128_to_255_octet_packets" },
384 { "rx_256_to_511_octet_packets" },
385 { "rx_512_to_1023_octet_packets" },
386 { "rx_1024_to_1522_octet_packets" },
387 { "rx_1523_to_2047_octet_packets" },
388 { "rx_2048_to_4095_octet_packets" },
389 { "rx_4096_to_8191_octet_packets" },
390 { "rx_8192_to_9022_octet_packets" },
397 { "tx_flow_control" },
399 { "tx_single_collisions" },
400 { "tx_mult_collisions" },
402 { "tx_excessive_collisions" },
403 { "tx_late_collisions" },
404 { "tx_collide_2times" },
405 { "tx_collide_3times" },
406 { "tx_collide_4times" },
407 { "tx_collide_5times" },
408 { "tx_collide_6times" },
409 { "tx_collide_7times" },
410 { "tx_collide_8times" },
411 { "tx_collide_9times" },
412 { "tx_collide_10times" },
413 { "tx_collide_11times" },
414 { "tx_collide_12times" },
415 { "tx_collide_13times" },
416 { "tx_collide_14times" },
417 { "tx_collide_15times" },
418 { "tx_ucast_packets" },
419 { "tx_mcast_packets" },
420 { "tx_bcast_packets" },
421 { "tx_carrier_sense_errors" },
425 { "dma_writeq_full" },
426 { "dma_write_prioq_full" },
430 { "rx_threshold_hit" },
432 { "dma_readq_full" },
433 { "dma_read_prioq_full" },
434 { "tx_comp_queue_full" },
436 { "ring_set_send_prod_index" },
437 { "ring_status_update" },
439 { "nic_avoided_irqs" },
440 { "nic_tx_threshold_hit" },
442 { "mbuf_lwm_thresh_hit" },
445 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST 0
447 #define TG3_LINK_TEST 1
448 #define TG3_REGISTER_TEST 2
449 #define TG3_MEMORY_TEST 3
450 #define TG3_MAC_LOOPB_TEST 4
451 #define TG3_PHY_LOOPB_TEST 5
452 #define TG3_EXT_LOOPB_TEST 6
453 #define TG3_INTERRUPT_TEST 7
456 static const struct {
457 const char string
[ETH_GSTRING_LEN
];
458 } ethtool_test_keys
[] = {
459 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
460 [TG3_LINK_TEST
] = { "link test (online) " },
461 [TG3_REGISTER_TEST
] = { "register test (offline)" },
462 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
463 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
464 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
465 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
466 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
469 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
472 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->regs
+ off
);
477 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->regs
+ off
);
482 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
484 writel(val
, tp
->aperegs
+ off
);
487 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
489 return readl(tp
->aperegs
+ off
);
492 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
496 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
497 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
498 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
499 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
502 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
504 writel(val
, tp
->regs
+ off
);
505 readl(tp
->regs
+ off
);
508 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
513 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
514 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
515 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
516 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
520 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
524 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
525 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
526 TG3_64BIT_REG_LOW
, val
);
529 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
530 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
531 TG3_64BIT_REG_LOW
, val
);
535 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
536 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
537 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
538 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
540 /* In indirect mode when disabling interrupts, we also need
541 * to clear the interrupt bit in the GRC local ctrl register.
543 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
545 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
546 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
550 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
555 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
556 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
557 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
558 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563 * where it is unsafe to read back the register without some delay.
564 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
567 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
569 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
570 /* Non-posted methods */
571 tp
->write32(tp
, off
, val
);
574 tg3_write32(tp
, off
, val
);
579 /* Wait again after the read for the posted method to guarantee that
580 * the wait time is met.
586 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
588 tp
->write32_mbox(tp
, off
, val
);
589 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
590 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
591 !tg3_flag(tp
, ICH_WORKAROUND
)))
592 tp
->read32_mbox(tp
, off
);
595 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
597 void __iomem
*mbox
= tp
->regs
+ off
;
599 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
601 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
602 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
606 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
608 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
611 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
613 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
616 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
622 #define tw32(reg, val) tp->write32(tp, reg, val)
623 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg) tp->read32(tp, reg)
627 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
631 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
632 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
635 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
636 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
637 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
638 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
640 /* Always leave this as zero. */
641 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
644 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
646 /* Always leave this as zero. */
647 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
649 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
652 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
656 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
657 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
662 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
663 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
664 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
665 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
667 /* Always leave this as zero. */
668 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
671 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
673 /* Always leave this as zero. */
674 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
676 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
679 static void tg3_ape_lock_init(struct tg3
*tp
)
684 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
685 regbase
= TG3_APE_LOCK_GRANT
;
687 regbase
= TG3_APE_PER_LOCK_GRANT
;
689 /* Make sure the driver hasn't any stale locks. */
690 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
692 case TG3_APE_LOCK_PHY0
:
693 case TG3_APE_LOCK_PHY1
:
694 case TG3_APE_LOCK_PHY2
:
695 case TG3_APE_LOCK_PHY3
:
696 bit
= APE_LOCK_GRANT_DRIVER
;
700 bit
= APE_LOCK_GRANT_DRIVER
;
702 bit
= 1 << tp
->pci_fn
;
704 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
709 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
713 u32 status
, req
, gnt
, bit
;
715 if (!tg3_flag(tp
, ENABLE_APE
))
719 case TG3_APE_LOCK_GPIO
:
720 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
722 case TG3_APE_LOCK_GRC
:
723 case TG3_APE_LOCK_MEM
:
725 bit
= APE_LOCK_REQ_DRIVER
;
727 bit
= 1 << tp
->pci_fn
;
729 case TG3_APE_LOCK_PHY0
:
730 case TG3_APE_LOCK_PHY1
:
731 case TG3_APE_LOCK_PHY2
:
732 case TG3_APE_LOCK_PHY3
:
733 bit
= APE_LOCK_REQ_DRIVER
;
739 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
740 req
= TG3_APE_LOCK_REQ
;
741 gnt
= TG3_APE_LOCK_GRANT
;
743 req
= TG3_APE_PER_LOCK_REQ
;
744 gnt
= TG3_APE_PER_LOCK_GRANT
;
749 tg3_ape_write32(tp
, req
+ off
, bit
);
751 /* Wait for up to 1 millisecond to acquire lock. */
752 for (i
= 0; i
< 100; i
++) {
753 status
= tg3_ape_read32(tp
, gnt
+ off
);
756 if (pci_channel_offline(tp
->pdev
))
763 /* Revoke the lock request. */
764 tg3_ape_write32(tp
, gnt
+ off
, bit
);
771 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
775 if (!tg3_flag(tp
, ENABLE_APE
))
779 case TG3_APE_LOCK_GPIO
:
780 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
782 case TG3_APE_LOCK_GRC
:
783 case TG3_APE_LOCK_MEM
:
785 bit
= APE_LOCK_GRANT_DRIVER
;
787 bit
= 1 << tp
->pci_fn
;
789 case TG3_APE_LOCK_PHY0
:
790 case TG3_APE_LOCK_PHY1
:
791 case TG3_APE_LOCK_PHY2
:
792 case TG3_APE_LOCK_PHY3
:
793 bit
= APE_LOCK_GRANT_DRIVER
;
799 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
800 gnt
= TG3_APE_LOCK_GRANT
;
802 gnt
= TG3_APE_PER_LOCK_GRANT
;
804 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
807 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
812 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
815 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
816 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
819 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
822 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
825 return timeout_us
? 0 : -EBUSY
;
828 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
832 for (i
= 0; i
< timeout_us
/ 10; i
++) {
833 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
835 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
841 return i
== timeout_us
/ 10;
844 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
848 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
850 if (!tg3_flag(tp
, APE_HAS_NCSI
))
853 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
854 if (apedata
!= APE_SEG_SIG_MAGIC
)
857 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
858 if (!(apedata
& APE_FW_STATUS_READY
))
861 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
863 msgoff
= bufoff
+ 2 * sizeof(u32
);
864 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
869 /* Cap xfer sizes to scratchpad limits. */
870 length
= (len
> maxlen
) ? maxlen
: len
;
873 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
874 if (!(apedata
& APE_FW_STATUS_READY
))
877 /* Wait for up to 1 msec for APE to service previous event. */
878 err
= tg3_ape_event_lock(tp
, 1000);
882 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
883 APE_EVENT_STATUS_SCRTCHPD_READ
|
884 APE_EVENT_STATUS_EVENT_PENDING
;
885 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
887 tg3_ape_write32(tp
, bufoff
, base_off
);
888 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
890 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
891 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
895 if (tg3_ape_wait_for_event(tp
, 30000))
898 for (i
= 0; length
; i
+= 4, length
-= 4) {
899 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
900 memcpy(data
, &val
, sizeof(u32
));
908 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
913 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
914 if (apedata
!= APE_SEG_SIG_MAGIC
)
917 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
918 if (!(apedata
& APE_FW_STATUS_READY
))
921 /* Wait for up to 1 millisecond for APE to service previous event. */
922 err
= tg3_ape_event_lock(tp
, 1000);
926 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
927 event
| APE_EVENT_STATUS_EVENT_PENDING
);
929 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
930 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
935 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
940 if (!tg3_flag(tp
, ENABLE_APE
))
944 case RESET_KIND_INIT
:
945 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
946 APE_HOST_SEG_SIG_MAGIC
);
947 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
948 APE_HOST_SEG_LEN_MAGIC
);
949 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
950 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
951 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
952 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
953 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
954 APE_HOST_BEHAV_NO_PHYLOCK
);
955 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
956 TG3_APE_HOST_DRVR_STATE_START
);
958 event
= APE_EVENT_STATUS_STATE_START
;
960 case RESET_KIND_SHUTDOWN
:
961 /* With the interface we are currently using,
962 * APE does not track driver state. Wiping
963 * out the HOST SEGMENT SIGNATURE forces
964 * the APE to assume OS absent status.
966 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
968 if (device_may_wakeup(&tp
->pdev
->dev
) &&
969 tg3_flag(tp
, WOL_ENABLE
)) {
970 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
971 TG3_APE_HOST_WOL_SPEED_AUTO
);
972 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
974 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
976 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
978 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
984 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
986 tg3_ape_send_event(tp
, event
);
989 static void tg3_disable_ints(struct tg3
*tp
)
993 tw32(TG3PCI_MISC_HOST_CTRL
,
994 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
995 for (i
= 0; i
< tp
->irq_max
; i
++)
996 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
999 static void tg3_enable_ints(struct tg3
*tp
)
1006 tw32(TG3PCI_MISC_HOST_CTRL
,
1007 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1009 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1010 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1011 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1013 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1014 if (tg3_flag(tp
, 1SHOT_MSI
))
1015 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1017 tp
->coal_now
|= tnapi
->coal_now
;
1020 /* Force an initial interrupt */
1021 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1022 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1023 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1025 tw32(HOSTCC_MODE
, tp
->coal_now
);
1027 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1030 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1032 struct tg3
*tp
= tnapi
->tp
;
1033 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1034 unsigned int work_exists
= 0;
1036 /* check for phy events */
1037 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1038 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1042 /* check for TX work to do */
1043 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1046 /* check for RX work to do */
1047 if (tnapi
->rx_rcb_prod_idx
&&
1048 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1055 * similar to tg3_enable_ints, but it accurately determines whether there
1056 * is new work pending and can return without flushing the PIO write
1057 * which reenables interrupts
1059 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1061 struct tg3
*tp
= tnapi
->tp
;
1063 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1066 /* When doing tagged status, this work check is unnecessary.
1067 * The last_tag we write above tells the chip which piece of
1068 * work we've completed.
1070 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1071 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1072 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1075 static void tg3_switch_clocks(struct tg3
*tp
)
1078 u32 orig_clock_ctrl
;
1080 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1083 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1085 orig_clock_ctrl
= clock_ctrl
;
1086 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1087 CLOCK_CTRL_CLKRUN_OENABLE
|
1089 tp
->pci_clock_ctrl
= clock_ctrl
;
1091 if (tg3_flag(tp
, 5705_PLUS
)) {
1092 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1094 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1096 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1097 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1099 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1102 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1105 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1108 #define PHY_BUSY_LOOPS 5000
1110 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1117 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1119 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1123 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1127 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1128 MI_COM_PHY_ADDR_MASK
);
1129 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1130 MI_COM_REG_ADDR_MASK
);
1131 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1133 tw32_f(MAC_MI_COM
, frame_val
);
1135 loops
= PHY_BUSY_LOOPS
;
1136 while (loops
!= 0) {
1138 frame_val
= tr32(MAC_MI_COM
);
1140 if ((frame_val
& MI_COM_BUSY
) == 0) {
1142 frame_val
= tr32(MAC_MI_COM
);
1150 *val
= frame_val
& MI_COM_DATA_MASK
;
1154 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1155 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1159 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1164 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1166 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1169 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1176 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1177 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1180 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1182 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1186 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1188 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1189 MI_COM_PHY_ADDR_MASK
);
1190 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1191 MI_COM_REG_ADDR_MASK
);
1192 frame_val
|= (val
& MI_COM_DATA_MASK
);
1193 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1195 tw32_f(MAC_MI_COM
, frame_val
);
1197 loops
= PHY_BUSY_LOOPS
;
1198 while (loops
!= 0) {
1200 frame_val
= tr32(MAC_MI_COM
);
1201 if ((frame_val
& MI_COM_BUSY
) == 0) {
1203 frame_val
= tr32(MAC_MI_COM
);
1213 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1214 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1218 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1223 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1225 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1228 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1232 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1236 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1240 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1241 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1251 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1255 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1259 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1263 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1264 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1268 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1274 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1278 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1280 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1285 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1289 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1291 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1296 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1300 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1301 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1302 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1304 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1309 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1311 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1312 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1314 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1317 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1322 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1328 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1330 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1332 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1333 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1338 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1340 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1341 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1344 static int tg3_bmcr_reset(struct tg3
*tp
)
1349 /* OK, reset it, and poll the BMCR_RESET bit until it
1350 * clears or we time out.
1352 phy_control
= BMCR_RESET
;
1353 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1359 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1363 if ((phy_control
& BMCR_RESET
) == 0) {
1375 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1377 struct tg3
*tp
= bp
->priv
;
1380 spin_lock_bh(&tp
->lock
);
1382 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1385 spin_unlock_bh(&tp
->lock
);
1390 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1392 struct tg3
*tp
= bp
->priv
;
1395 spin_lock_bh(&tp
->lock
);
1397 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1400 spin_unlock_bh(&tp
->lock
);
1405 static void tg3_mdio_config_5785(struct tg3
*tp
)
1408 struct phy_device
*phydev
;
1410 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1411 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1412 case PHY_ID_BCM50610
:
1413 case PHY_ID_BCM50610M
:
1414 val
= MAC_PHYCFG2_50610_LED_MODES
;
1416 case PHY_ID_BCMAC131
:
1417 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1419 case PHY_ID_RTL8211C
:
1420 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1422 case PHY_ID_RTL8201E
:
1423 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1429 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1430 tw32(MAC_PHYCFG2
, val
);
1432 val
= tr32(MAC_PHYCFG1
);
1433 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1434 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1435 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1436 tw32(MAC_PHYCFG1
, val
);
1441 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1442 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1443 MAC_PHYCFG2_FMODE_MASK_MASK
|
1444 MAC_PHYCFG2_GMODE_MASK_MASK
|
1445 MAC_PHYCFG2_ACT_MASK_MASK
|
1446 MAC_PHYCFG2_QUAL_MASK_MASK
|
1447 MAC_PHYCFG2_INBAND_ENABLE
;
1449 tw32(MAC_PHYCFG2
, val
);
1451 val
= tr32(MAC_PHYCFG1
);
1452 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1453 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1454 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1455 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1456 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1457 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1458 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1460 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1461 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1462 tw32(MAC_PHYCFG1
, val
);
1464 val
= tr32(MAC_EXT_RGMII_MODE
);
1465 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1466 MAC_RGMII_MODE_RX_QUALITY
|
1467 MAC_RGMII_MODE_RX_ACTIVITY
|
1468 MAC_RGMII_MODE_RX_ENG_DET
|
1469 MAC_RGMII_MODE_TX_ENABLE
|
1470 MAC_RGMII_MODE_TX_LOWPWR
|
1471 MAC_RGMII_MODE_TX_RESET
);
1472 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1473 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1474 val
|= MAC_RGMII_MODE_RX_INT_B
|
1475 MAC_RGMII_MODE_RX_QUALITY
|
1476 MAC_RGMII_MODE_RX_ACTIVITY
|
1477 MAC_RGMII_MODE_RX_ENG_DET
;
1478 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1479 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1480 MAC_RGMII_MODE_TX_LOWPWR
|
1481 MAC_RGMII_MODE_TX_RESET
;
1483 tw32(MAC_EXT_RGMII_MODE
, val
);
1486 static void tg3_mdio_start(struct tg3
*tp
)
1488 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1489 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1492 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1493 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1494 tg3_mdio_config_5785(tp
);
1497 static int tg3_mdio_init(struct tg3
*tp
)
1501 struct phy_device
*phydev
;
1503 if (tg3_flag(tp
, 5717_PLUS
)) {
1506 tp
->phy_addr
= tp
->pci_fn
+ 1;
1508 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1509 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1511 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1512 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1515 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1518 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1521 tp
->phy_addr
= addr
;
1523 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1527 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1530 tp
->mdio_bus
= mdiobus_alloc();
1531 if (tp
->mdio_bus
== NULL
)
1534 tp
->mdio_bus
->name
= "tg3 mdio bus";
1535 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1536 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1537 tp
->mdio_bus
->priv
= tp
;
1538 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1539 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1540 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1541 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1543 /* The bus registration will look for all the PHYs on the mdio bus.
1544 * Unfortunately, it does not ensure the PHY is powered up before
1545 * accessing the PHY ID registers. A chip reset is the
1546 * quickest way to bring the device back to an operational state..
1548 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1551 i
= mdiobus_register(tp
->mdio_bus
);
1553 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1554 mdiobus_free(tp
->mdio_bus
);
1558 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1560 if (!phydev
|| !phydev
->drv
) {
1561 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1562 mdiobus_unregister(tp
->mdio_bus
);
1563 mdiobus_free(tp
->mdio_bus
);
1567 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1568 case PHY_ID_BCM57780
:
1569 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1570 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1572 case PHY_ID_BCM50610
:
1573 case PHY_ID_BCM50610M
:
1574 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1575 PHY_BRCM_RX_REFCLK_UNUSED
|
1576 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1577 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1578 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1579 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1580 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1581 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1582 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1583 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1585 case PHY_ID_RTL8211C
:
1586 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1588 case PHY_ID_RTL8201E
:
1589 case PHY_ID_BCMAC131
:
1590 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1591 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1592 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1596 tg3_flag_set(tp
, MDIOBUS_INITED
);
1598 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1599 tg3_mdio_config_5785(tp
);
1604 static void tg3_mdio_fini(struct tg3
*tp
)
1606 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1607 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1608 mdiobus_unregister(tp
->mdio_bus
);
1609 mdiobus_free(tp
->mdio_bus
);
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1618 val
= tr32(GRC_RX_CPU_EVENT
);
1619 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1620 tw32_f(GRC_RX_CPU_EVENT
, val
);
1622 tp
->last_event_jiffies
= jiffies
;
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1631 unsigned int delay_cnt
;
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1638 if (time_remain
< 0)
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt
= jiffies_to_usecs(time_remain
);
1643 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1644 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1645 delay_cnt
= (delay_cnt
>> 3) + 1;
1647 for (i
= 0; i
< delay_cnt
; i
++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1650 if (pci_channel_offline(tp
->pdev
))
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1663 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1665 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1666 val
|= (reg
& 0xffff);
1670 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1672 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1673 val
|= (reg
& 0xffff);
1677 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1678 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1680 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1681 val
|= (reg
& 0xffff);
1685 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3
*tp
)
1697 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1700 tg3_phy_gather_ump_data(tp
, data
);
1702 tg3_wait_for_event_ack(tp
);
1704 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1705 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1706 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1707 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1708 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1709 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1711 tg3_generate_fw_event(tp
);
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3
*tp
)
1717 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp
);
1721 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1723 tg3_generate_fw_event(tp
);
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp
);
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1733 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1736 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1738 case RESET_KIND_INIT
:
1739 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1743 case RESET_KIND_SHUTDOWN
:
1744 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1748 case RESET_KIND_SUSPEND
:
1749 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1762 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1764 case RESET_KIND_INIT
:
1765 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1766 DRV_STATE_START_DONE
);
1769 case RESET_KIND_SHUTDOWN
:
1770 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1771 DRV_STATE_UNLOAD_DONE
);
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1783 if (tg3_flag(tp
, ENABLE_ASF
)) {
1785 case RESET_KIND_INIT
:
1786 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1790 case RESET_KIND_SHUTDOWN
:
1791 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1795 case RESET_KIND_SUSPEND
:
1796 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1806 static int tg3_poll_fw(struct tg3
*tp
)
1811 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1814 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1815 /* We don't use firmware. */
1819 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1820 /* Wait up to 20ms for init done. */
1821 for (i
= 0; i
< 200; i
++) {
1822 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1824 if (pci_channel_offline(tp
->pdev
))
1832 /* Wait for firmware initialization to complete. */
1833 for (i
= 0; i
< 100000; i
++) {
1834 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1835 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1837 if (pci_channel_offline(tp
->pdev
)) {
1838 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1839 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1840 netdev_info(tp
->dev
, "No firmware running\n");
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1854 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1855 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1857 netdev_info(tp
->dev
, "No firmware running\n");
1860 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1870 static void tg3_link_report(struct tg3
*tp
)
1872 if (!netif_carrier_ok(tp
->dev
)) {
1873 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1874 tg3_ump_link_report(tp
);
1875 } else if (netif_msg_link(tp
)) {
1876 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1877 (tp
->link_config
.active_speed
== SPEED_1000
?
1879 (tp
->link_config
.active_speed
== SPEED_100
?
1881 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1884 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1885 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1887 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1890 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1891 netdev_info(tp
->dev
, "EEE is %s\n",
1892 tp
->setlpicnt
? "enabled" : "disabled");
1894 tg3_ump_link_report(tp
);
1897 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1900 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1904 if (adv
& ADVERTISE_PAUSE_CAP
) {
1905 flowctrl
|= FLOW_CTRL_RX
;
1906 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1907 flowctrl
|= FLOW_CTRL_TX
;
1908 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1909 flowctrl
|= FLOW_CTRL_TX
;
1914 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1918 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1919 miireg
= ADVERTISE_1000XPAUSE
;
1920 else if (flow_ctrl
& FLOW_CTRL_TX
)
1921 miireg
= ADVERTISE_1000XPSE_ASYM
;
1922 else if (flow_ctrl
& FLOW_CTRL_RX
)
1923 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1930 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1934 if (adv
& ADVERTISE_1000XPAUSE
) {
1935 flowctrl
|= FLOW_CTRL_RX
;
1936 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1937 flowctrl
|= FLOW_CTRL_TX
;
1938 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1939 flowctrl
|= FLOW_CTRL_TX
;
1944 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1948 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1949 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1950 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1951 if (lcladv
& ADVERTISE_1000XPAUSE
)
1953 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1960 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1964 u32 old_rx_mode
= tp
->rx_mode
;
1965 u32 old_tx_mode
= tp
->tx_mode
;
1967 if (tg3_flag(tp
, USE_PHYLIB
))
1968 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1970 autoneg
= tp
->link_config
.autoneg
;
1972 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1973 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1974 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1976 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1978 flowctrl
= tp
->link_config
.flowctrl
;
1980 tp
->link_config
.active_flowctrl
= flowctrl
;
1982 if (flowctrl
& FLOW_CTRL_RX
)
1983 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1985 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1987 if (old_rx_mode
!= tp
->rx_mode
)
1988 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1990 if (flowctrl
& FLOW_CTRL_TX
)
1991 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1993 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1995 if (old_tx_mode
!= tp
->tx_mode
)
1996 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1999 static void tg3_adjust_link(struct net_device
*dev
)
2001 u8 oldflowctrl
, linkmesg
= 0;
2002 u32 mac_mode
, lcl_adv
, rmt_adv
;
2003 struct tg3
*tp
= netdev_priv(dev
);
2004 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2006 spin_lock_bh(&tp
->lock
);
2008 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2009 MAC_MODE_HALF_DUPLEX
);
2011 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2017 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2018 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2019 else if (phydev
->speed
== SPEED_1000
||
2020 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2021 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2023 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2025 if (phydev
->duplex
== DUPLEX_HALF
)
2026 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2028 lcl_adv
= mii_advertise_flowctrl(
2029 tp
->link_config
.flowctrl
);
2032 rmt_adv
= LPA_PAUSE_CAP
;
2033 if (phydev
->asym_pause
)
2034 rmt_adv
|= LPA_PAUSE_ASYM
;
2037 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2039 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2041 if (mac_mode
!= tp
->mac_mode
) {
2042 tp
->mac_mode
= mac_mode
;
2043 tw32_f(MAC_MODE
, tp
->mac_mode
);
2047 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2048 if (phydev
->speed
== SPEED_10
)
2050 MAC_MI_STAT_10MBPS_MODE
|
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2053 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2056 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2057 tw32(MAC_TX_LENGTHS
,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2059 (6 << TX_LENGTHS_IPG_SHIFT
) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2062 tw32(MAC_TX_LENGTHS
,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2064 (6 << TX_LENGTHS_IPG_SHIFT
) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2067 if (phydev
->link
!= tp
->old_link
||
2068 phydev
->speed
!= tp
->link_config
.active_speed
||
2069 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2070 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2073 tp
->old_link
= phydev
->link
;
2074 tp
->link_config
.active_speed
= phydev
->speed
;
2075 tp
->link_config
.active_duplex
= phydev
->duplex
;
2077 spin_unlock_bh(&tp
->lock
);
2080 tg3_link_report(tp
);
2083 static int tg3_phy_init(struct tg3
*tp
)
2085 struct phy_device
*phydev
;
2087 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2090 /* Bring the PHY back to a known state. */
2093 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2095 /* Attach the MAC to the PHY. */
2096 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2097 tg3_adjust_link
, phydev
->interface
);
2098 if (IS_ERR(phydev
)) {
2099 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev
);
2103 /* Mask with MAC supported features. */
2104 switch (phydev
->interface
) {
2105 case PHY_INTERFACE_MODE_GMII
:
2106 case PHY_INTERFACE_MODE_RGMII
:
2107 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2108 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2110 SUPPORTED_Asym_Pause
);
2114 case PHY_INTERFACE_MODE_MII
:
2115 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2117 SUPPORTED_Asym_Pause
);
2120 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2124 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2126 phydev
->advertising
= phydev
->supported
;
2128 phy_attached_info(phydev
);
2133 static void tg3_phy_start(struct tg3
*tp
)
2135 struct phy_device
*phydev
;
2137 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2140 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2142 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2143 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2144 phydev
->speed
= tp
->link_config
.speed
;
2145 phydev
->duplex
= tp
->link_config
.duplex
;
2146 phydev
->autoneg
= tp
->link_config
.autoneg
;
2147 phydev
->advertising
= tp
->link_config
.advertising
;
2152 phy_start_aneg(phydev
);
2155 static void tg3_phy_stop(struct tg3
*tp
)
2157 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2160 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2163 static void tg3_phy_fini(struct tg3
*tp
)
2165 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2166 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2167 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2171 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2176 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2179 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2180 /* Cannot do read-modify-write on 5401 */
2181 err
= tg3_phy_auxctl_write(tp
,
2182 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2183 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2188 err
= tg3_phy_auxctl_read(tp
,
2189 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2193 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2194 err
= tg3_phy_auxctl_write(tp
,
2195 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2201 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2205 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2208 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2209 phytest
| MII_TG3_FET_SHADOW_EN
);
2210 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2212 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2214 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2215 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2217 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2221 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2225 if (!tg3_flag(tp
, 5705_PLUS
) ||
2226 (tg3_flag(tp
, 5717_PLUS
) &&
2227 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2230 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2231 tg3_phy_fet_toggle_apd(tp
, enable
);
2235 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2236 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2237 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2238 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2239 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2240 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2242 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2245 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2247 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2249 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2252 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2256 if (!tg3_flag(tp
, 5705_PLUS
) ||
2257 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2260 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2263 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2264 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2266 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2267 ephy
| MII_TG3_FET_SHADOW_EN
);
2268 if (!tg3_readphy(tp
, reg
, &phy
)) {
2270 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2272 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2273 tg3_writephy(tp
, reg
, phy
);
2275 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2280 ret
= tg3_phy_auxctl_read(tp
,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2284 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2286 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2287 tg3_phy_auxctl_write(tp
,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2293 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2298 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2301 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2303 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2304 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2307 static void tg3_phy_apply_otp(struct tg3
*tp
)
2316 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2319 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2320 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2321 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2323 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2324 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2325 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2327 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2328 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2331 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2332 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2334 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2335 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2337 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2338 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2339 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2341 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2344 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2347 struct ethtool_eee
*dest
= &tp
->eee
;
2349 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2355 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2358 /* Pull eee_active */
2359 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2360 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2361 dest
->eee_active
= 1;
2363 dest
->eee_active
= 0;
2365 /* Pull lp advertised settings */
2366 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2368 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2370 /* Pull advertised and eee_enabled settings */
2371 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2373 dest
->eee_enabled
= !!val
;
2374 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2376 /* Pull tx_lpi_enabled */
2377 val
= tr32(TG3_CPMU_EEE_MODE
);
2378 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2380 /* Pull lpi timer value */
2381 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2384 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2388 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2393 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2395 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2396 (tp
->link_config
.active_speed
== SPEED_100
||
2397 tp
->link_config
.active_speed
== SPEED_1000
)) {
2400 if (tp
->link_config
.active_speed
== SPEED_1000
)
2401 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2403 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2405 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2407 tg3_eee_pull_config(tp
, NULL
);
2408 if (tp
->eee
.eee_active
)
2412 if (!tp
->setlpicnt
) {
2413 if (current_link_up
&&
2414 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2415 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2416 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2419 val
= tr32(TG3_CPMU_EEE_MODE
);
2420 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2424 static void tg3_phy_eee_enable(struct tg3
*tp
)
2428 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2429 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2430 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2431 tg3_flag(tp
, 57765_CLASS
)) &&
2432 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2433 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2434 MII_TG3_DSP_TAP26_RMRXSTO
;
2435 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2436 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2439 val
= tr32(TG3_CPMU_EEE_MODE
);
2440 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2443 static int tg3_wait_macro_done(struct tg3
*tp
)
2450 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2451 if ((tmp32
& 0x1000) == 0)
2461 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2463 static const u32 test_pat
[4][6] = {
2464 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2465 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2466 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2467 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2471 for (chan
= 0; chan
< 4; chan
++) {
2474 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2475 (chan
* 0x2000) | 0x0200);
2476 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2478 for (i
= 0; i
< 6; i
++)
2479 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2482 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2483 if (tg3_wait_macro_done(tp
)) {
2488 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2489 (chan
* 0x2000) | 0x0200);
2490 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2491 if (tg3_wait_macro_done(tp
)) {
2496 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2497 if (tg3_wait_macro_done(tp
)) {
2502 for (i
= 0; i
< 6; i
+= 2) {
2505 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2506 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2507 tg3_wait_macro_done(tp
)) {
2513 if (low
!= test_pat
[chan
][i
] ||
2514 high
!= test_pat
[chan
][i
+1]) {
2515 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2516 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2517 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2527 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2531 for (chan
= 0; chan
< 4; chan
++) {
2534 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2535 (chan
* 0x2000) | 0x0200);
2536 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2537 for (i
= 0; i
< 6; i
++)
2538 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2539 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2540 if (tg3_wait_macro_done(tp
))
2547 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2549 u32 reg32
, phy9_orig
;
2550 int retries
, do_phy_reset
, err
;
2556 err
= tg3_bmcr_reset(tp
);
2562 /* Disable transmitter and interrupt. */
2563 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2567 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2569 /* Set full-duplex, 1000 mbps. */
2570 tg3_writephy(tp
, MII_BMCR
,
2571 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2573 /* Set to master mode. */
2574 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2577 tg3_writephy(tp
, MII_CTRL1000
,
2578 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2580 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2584 /* Block the PHY control access. */
2585 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2587 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2590 } while (--retries
);
2592 err
= tg3_phy_reset_chanpat(tp
);
2596 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2598 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2599 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2601 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2603 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2605 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2610 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2615 static void tg3_carrier_off(struct tg3
*tp
)
2617 netif_carrier_off(tp
->dev
);
2618 tp
->link_up
= false;
2621 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2623 if (tg3_flag(tp
, ENABLE_ASF
))
2624 netdev_warn(tp
->dev
,
2625 "Management side-band traffic will be interrupted during phy settings change\n");
2628 /* This will reset the tigon3 PHY if there is no valid
2629 * link unless the FORCE argument is non-zero.
2631 static int tg3_phy_reset(struct tg3
*tp
)
2636 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2637 val
= tr32(GRC_MISC_CFG
);
2638 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2641 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2642 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2646 if (netif_running(tp
->dev
) && tp
->link_up
) {
2647 netif_carrier_off(tp
->dev
);
2648 tg3_link_report(tp
);
2651 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2652 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2653 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2654 err
= tg3_phy_reset_5703_4_5(tp
);
2661 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2662 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2663 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2664 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2666 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2669 err
= tg3_bmcr_reset(tp
);
2673 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2674 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2675 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2677 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2680 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2681 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2682 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2683 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2684 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2685 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2687 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2691 if (tg3_flag(tp
, 5717_PLUS
) &&
2692 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2695 tg3_phy_apply_otp(tp
);
2697 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2698 tg3_phy_toggle_apd(tp
, true);
2700 tg3_phy_toggle_apd(tp
, false);
2703 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2704 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2705 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2706 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2707 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2710 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2711 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2712 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2715 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2716 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2717 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2718 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2719 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2720 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2722 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2723 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2724 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2725 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2726 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2727 tg3_writephy(tp
, MII_TG3_TEST1
,
2728 MII_TG3_TEST1_TRIM_EN
| 0x4);
2730 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2732 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2736 /* Set Extended packet length bit (bit 14) on all chips that */
2737 /* support jumbo frames */
2738 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2739 /* Cannot do read-modify-write on 5401 */
2740 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2741 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2742 /* Set bit 14 with read-modify-write to preserve other bits */
2743 err
= tg3_phy_auxctl_read(tp
,
2744 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2746 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2747 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2750 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2751 * jumbo frames transmission.
2753 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2754 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2755 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2756 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2759 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2760 /* adjust output voltage */
2761 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2764 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2765 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2767 tg3_phy_toggle_automdix(tp
, true);
2768 tg3_phy_set_wirespeed(tp
);
2772 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2773 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2774 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2775 TG3_GPIO_MSG_NEED_VAUX)
2776 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2777 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2779 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 12))
2782 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2783 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2785 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 12))
2788 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2792 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2793 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2794 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2796 status
= tr32(TG3_CPMU_DRV_STATUS
);
2798 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2799 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2800 status
|= (newstat
<< shift
);
2802 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2803 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2804 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2806 tw32(TG3_CPMU_DRV_STATUS
, status
);
2808 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2811 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2813 if (!tg3_flag(tp
, IS_NIC
))
2816 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2817 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2818 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2819 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2822 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2824 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2827 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2829 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2830 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2836 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2840 if (!tg3_flag(tp
, IS_NIC
) ||
2841 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2842 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2845 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2847 tw32_wait_f(GRC_LOCAL_CTRL
,
2848 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2849 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2851 tw32_wait_f(GRC_LOCAL_CTRL
,
2853 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2855 tw32_wait_f(GRC_LOCAL_CTRL
,
2856 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2857 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2860 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2862 if (!tg3_flag(tp
, IS_NIC
))
2865 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2866 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2867 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2868 (GRC_LCLCTRL_GPIO_OE0
|
2869 GRC_LCLCTRL_GPIO_OE1
|
2870 GRC_LCLCTRL_GPIO_OE2
|
2871 GRC_LCLCTRL_GPIO_OUTPUT0
|
2872 GRC_LCLCTRL_GPIO_OUTPUT1
),
2873 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2874 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2875 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2876 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2877 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2878 GRC_LCLCTRL_GPIO_OE1
|
2879 GRC_LCLCTRL_GPIO_OE2
|
2880 GRC_LCLCTRL_GPIO_OUTPUT0
|
2881 GRC_LCLCTRL_GPIO_OUTPUT1
|
2883 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2884 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2886 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2887 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2890 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2891 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2892 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2895 u32 grc_local_ctrl
= 0;
2897 /* Workaround to prevent overdrawing Amps. */
2898 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2899 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2900 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2902 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2905 /* On 5753 and variants, GPIO2 cannot be used. */
2906 no_gpio2
= tp
->nic_sram_data_cfg
&
2907 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2909 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2910 GRC_LCLCTRL_GPIO_OE1
|
2911 GRC_LCLCTRL_GPIO_OE2
|
2912 GRC_LCLCTRL_GPIO_OUTPUT1
|
2913 GRC_LCLCTRL_GPIO_OUTPUT2
;
2915 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2916 GRC_LCLCTRL_GPIO_OUTPUT2
);
2918 tw32_wait_f(GRC_LOCAL_CTRL
,
2919 tp
->grc_local_ctrl
| grc_local_ctrl
,
2920 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2922 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2924 tw32_wait_f(GRC_LOCAL_CTRL
,
2925 tp
->grc_local_ctrl
| grc_local_ctrl
,
2926 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2929 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2930 tw32_wait_f(GRC_LOCAL_CTRL
,
2931 tp
->grc_local_ctrl
| grc_local_ctrl
,
2932 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2937 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2941 /* Serialize power state transitions */
2942 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2945 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2946 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2948 msg
= tg3_set_function_status(tp
, msg
);
2950 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2953 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2954 tg3_pwrsrc_switch_to_vaux(tp
);
2956 tg3_pwrsrc_die_with_vmain(tp
);
2959 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2962 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2964 bool need_vaux
= false;
2966 /* The GPIOs do something completely different on 57765. */
2967 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2970 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2971 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2972 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2973 tg3_frob_aux_power_5717(tp
, include_wol
?
2974 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2978 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2979 struct net_device
*dev_peer
;
2981 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2983 /* remove_one() may have been run on the peer. */
2985 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2987 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2990 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2991 tg3_flag(tp_peer
, ENABLE_ASF
))
2996 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2997 tg3_flag(tp
, ENABLE_ASF
))
3001 tg3_pwrsrc_switch_to_vaux(tp
);
3003 tg3_pwrsrc_die_with_vmain(tp
);
3006 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3008 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3010 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3011 if (speed
!= SPEED_10
)
3013 } else if (speed
== SPEED_10
)
3019 static bool tg3_phy_power_bug(struct tg3
*tp
)
3021 switch (tg3_asic_rev(tp
)) {
3026 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3035 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3044 static bool tg3_phy_led_bug(struct tg3
*tp
)
3046 switch (tg3_asic_rev(tp
)) {
3049 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3058 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3062 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3065 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3066 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3067 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3068 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3071 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3072 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3073 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3078 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3080 val
= tr32(GRC_MISC_CFG
);
3081 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3084 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3086 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3089 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3090 tg3_writephy(tp
, MII_BMCR
,
3091 BMCR_ANENABLE
| BMCR_ANRESTART
);
3093 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3094 phytest
| MII_TG3_FET_SHADOW_EN
);
3095 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3096 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3098 MII_TG3_FET_SHDW_AUXMODE4
,
3101 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3104 } else if (do_low_power
) {
3105 if (!tg3_phy_led_bug(tp
))
3106 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3107 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3109 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3110 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3111 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3112 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3115 /* The PHY should not be powered down on some chips because
3118 if (tg3_phy_power_bug(tp
))
3121 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3122 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3123 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3124 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3125 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3126 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3129 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3132 /* tp->lock is held. */
3133 static int tg3_nvram_lock(struct tg3
*tp
)
3135 if (tg3_flag(tp
, NVRAM
)) {
3138 if (tp
->nvram_lock_cnt
== 0) {
3139 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3140 for (i
= 0; i
< 8000; i
++) {
3141 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3146 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3150 tp
->nvram_lock_cnt
++;
3155 /* tp->lock is held. */
3156 static void tg3_nvram_unlock(struct tg3
*tp
)
3158 if (tg3_flag(tp
, NVRAM
)) {
3159 if (tp
->nvram_lock_cnt
> 0)
3160 tp
->nvram_lock_cnt
--;
3161 if (tp
->nvram_lock_cnt
== 0)
3162 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3166 /* tp->lock is held. */
3167 static void tg3_enable_nvram_access(struct tg3
*tp
)
3169 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3170 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3172 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3176 /* tp->lock is held. */
3177 static void tg3_disable_nvram_access(struct tg3
*tp
)
3179 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3180 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3182 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3186 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3187 u32 offset
, u32
*val
)
3192 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3195 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3196 EEPROM_ADDR_DEVID_MASK
|
3198 tw32(GRC_EEPROM_ADDR
,
3200 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3201 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3202 EEPROM_ADDR_ADDR_MASK
) |
3203 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3205 for (i
= 0; i
< 1000; i
++) {
3206 tmp
= tr32(GRC_EEPROM_ADDR
);
3208 if (tmp
& EEPROM_ADDR_COMPLETE
)
3212 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3215 tmp
= tr32(GRC_EEPROM_DATA
);
3218 * The data will always be opposite the native endian
3219 * format. Perform a blind byteswap to compensate.
3226 #define NVRAM_CMD_TIMEOUT 5000
3228 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3232 tw32(NVRAM_CMD
, nvram_cmd
);
3233 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3234 usleep_range(10, 40);
3235 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3241 if (i
== NVRAM_CMD_TIMEOUT
)
3247 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3249 if (tg3_flag(tp
, NVRAM
) &&
3250 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3251 tg3_flag(tp
, FLASH
) &&
3252 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3253 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3255 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3256 ATMEL_AT45DB0X1B_PAGE_POS
) +
3257 (addr
% tp
->nvram_pagesize
);
3262 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3264 if (tg3_flag(tp
, NVRAM
) &&
3265 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3266 tg3_flag(tp
, FLASH
) &&
3267 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3268 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3270 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3271 tp
->nvram_pagesize
) +
3272 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3277 /* NOTE: Data read in from NVRAM is byteswapped according to
3278 * the byteswapping settings for all other register accesses.
3279 * tg3 devices are BE devices, so on a BE machine, the data
3280 * returned will be exactly as it is seen in NVRAM. On a LE
3281 * machine, the 32-bit value will be byteswapped.
3283 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3287 if (!tg3_flag(tp
, NVRAM
))
3288 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3290 offset
= tg3_nvram_phys_addr(tp
, offset
);
3292 if (offset
> NVRAM_ADDR_MSK
)
3295 ret
= tg3_nvram_lock(tp
);
3299 tg3_enable_nvram_access(tp
);
3301 tw32(NVRAM_ADDR
, offset
);
3302 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3303 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3306 *val
= tr32(NVRAM_RDDATA
);
3308 tg3_disable_nvram_access(tp
);
3310 tg3_nvram_unlock(tp
);
3315 /* Ensures NVRAM data is in bytestream format. */
3316 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3319 int res
= tg3_nvram_read(tp
, offset
, &v
);
3321 *val
= cpu_to_be32(v
);
3325 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3326 u32 offset
, u32 len
, u8
*buf
)
3331 for (i
= 0; i
< len
; i
+= 4) {
3337 memcpy(&data
, buf
+ i
, 4);
3340 * The SEEPROM interface expects the data to always be opposite
3341 * the native endian format. We accomplish this by reversing
3342 * all the operations that would have been performed on the
3343 * data from a call to tg3_nvram_read_be32().
3345 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3347 val
= tr32(GRC_EEPROM_ADDR
);
3348 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3350 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3352 tw32(GRC_EEPROM_ADDR
, val
|
3353 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3354 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3358 for (j
= 0; j
< 1000; j
++) {
3359 val
= tr32(GRC_EEPROM_ADDR
);
3361 if (val
& EEPROM_ADDR_COMPLETE
)
3365 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3374 /* offset and length are dword aligned */
3375 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3379 u32 pagesize
= tp
->nvram_pagesize
;
3380 u32 pagemask
= pagesize
- 1;
3384 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3390 u32 phy_addr
, page_off
, size
;
3392 phy_addr
= offset
& ~pagemask
;
3394 for (j
= 0; j
< pagesize
; j
+= 4) {
3395 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3396 (__be32
*) (tmp
+ j
));
3403 page_off
= offset
& pagemask
;
3410 memcpy(tmp
+ page_off
, buf
, size
);
3412 offset
= offset
+ (pagesize
- page_off
);
3414 tg3_enable_nvram_access(tp
);
3417 * Before we can erase the flash page, we need
3418 * to issue a special "write enable" command.
3420 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3422 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3425 /* Erase the target page */
3426 tw32(NVRAM_ADDR
, phy_addr
);
3428 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3429 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3431 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3434 /* Issue another write enable to start the write. */
3435 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3437 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3440 for (j
= 0; j
< pagesize
; j
+= 4) {
3443 data
= *((__be32
*) (tmp
+ j
));
3445 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3447 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3449 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3453 nvram_cmd
|= NVRAM_CMD_FIRST
;
3454 else if (j
== (pagesize
- 4))
3455 nvram_cmd
|= NVRAM_CMD_LAST
;
3457 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3465 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3466 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3473 /* offset and length are dword aligned */
3474 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3479 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3480 u32 page_off
, phy_addr
, nvram_cmd
;
3483 memcpy(&data
, buf
+ i
, 4);
3484 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3486 page_off
= offset
% tp
->nvram_pagesize
;
3488 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3490 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3492 if (page_off
== 0 || i
== 0)
3493 nvram_cmd
|= NVRAM_CMD_FIRST
;
3494 if (page_off
== (tp
->nvram_pagesize
- 4))
3495 nvram_cmd
|= NVRAM_CMD_LAST
;
3498 nvram_cmd
|= NVRAM_CMD_LAST
;
3500 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3501 !tg3_flag(tp
, FLASH
) ||
3502 !tg3_flag(tp
, 57765_PLUS
))
3503 tw32(NVRAM_ADDR
, phy_addr
);
3505 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3506 !tg3_flag(tp
, 5755_PLUS
) &&
3507 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3508 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3511 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3512 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3516 if (!tg3_flag(tp
, FLASH
)) {
3517 /* We always do complete word writes to eeprom. */
3518 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3521 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3528 /* offset and length are dword aligned */
3529 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3533 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3534 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3535 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3539 if (!tg3_flag(tp
, NVRAM
)) {
3540 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3544 ret
= tg3_nvram_lock(tp
);
3548 tg3_enable_nvram_access(tp
);
3549 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3550 tw32(NVRAM_WRITE1
, 0x406);
3552 grc_mode
= tr32(GRC_MODE
);
3553 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3555 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3556 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3559 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3563 grc_mode
= tr32(GRC_MODE
);
3564 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3566 tg3_disable_nvram_access(tp
);
3567 tg3_nvram_unlock(tp
);
3570 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3571 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3578 #define RX_CPU_SCRATCH_BASE 0x30000
3579 #define RX_CPU_SCRATCH_SIZE 0x04000
3580 #define TX_CPU_SCRATCH_BASE 0x34000
3581 #define TX_CPU_SCRATCH_SIZE 0x04000
3583 /* tp->lock is held. */
3584 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3587 const int iters
= 10000;
3589 for (i
= 0; i
< iters
; i
++) {
3590 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3591 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3592 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3594 if (pci_channel_offline(tp
->pdev
))
3598 return (i
== iters
) ? -EBUSY
: 0;
3601 /* tp->lock is held. */
3602 static int tg3_rxcpu_pause(struct tg3
*tp
)
3604 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3606 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3607 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3613 /* tp->lock is held. */
3614 static int tg3_txcpu_pause(struct tg3
*tp
)
3616 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3619 /* tp->lock is held. */
3620 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3622 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3623 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3626 /* tp->lock is held. */
3627 static void tg3_rxcpu_resume(struct tg3
*tp
)
3629 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3632 /* tp->lock is held. */
3633 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3637 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3639 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3640 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3642 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3645 if (cpu_base
== RX_CPU_BASE
) {
3646 rc
= tg3_rxcpu_pause(tp
);
3649 * There is only an Rx CPU for the 5750 derivative in the
3652 if (tg3_flag(tp
, IS_SSB_CORE
))
3655 rc
= tg3_txcpu_pause(tp
);
3659 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3660 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3664 /* Clear firmware's nvram arbitration. */
3665 if (tg3_flag(tp
, NVRAM
))
3666 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3670 static int tg3_fw_data_len(struct tg3
*tp
,
3671 const struct tg3_firmware_hdr
*fw_hdr
)
3675 /* Non fragmented firmware have one firmware header followed by a
3676 * contiguous chunk of data to be written. The length field in that
3677 * header is not the length of data to be written but the complete
3678 * length of the bss. The data length is determined based on
3679 * tp->fw->size minus headers.
3681 * Fragmented firmware have a main header followed by multiple
3682 * fragments. Each fragment is identical to non fragmented firmware
3683 * with a firmware header followed by a contiguous chunk of data. In
3684 * the main header, the length field is unused and set to 0xffffffff.
3685 * In each fragment header the length is the entire size of that
3686 * fragment i.e. fragment data + header length. Data length is
3687 * therefore length field in the header minus TG3_FW_HDR_LEN.
3689 if (tp
->fw_len
== 0xffffffff)
3690 fw_len
= be32_to_cpu(fw_hdr
->len
);
3692 fw_len
= tp
->fw
->size
;
3694 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3697 /* tp->lock is held. */
3698 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3699 u32 cpu_scratch_base
, int cpu_scratch_size
,
3700 const struct tg3_firmware_hdr
*fw_hdr
)
3703 void (*write_op
)(struct tg3
*, u32
, u32
);
3704 int total_len
= tp
->fw
->size
;
3706 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3708 "%s: Trying to load TX cpu firmware which is 5705\n",
3713 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3714 write_op
= tg3_write_mem
;
3716 write_op
= tg3_write_indirect_reg32
;
3718 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3719 /* It is possible that bootcode is still loading at this point.
3720 * Get the nvram lock first before halting the cpu.
3722 int lock_err
= tg3_nvram_lock(tp
);
3723 err
= tg3_halt_cpu(tp
, cpu_base
);
3725 tg3_nvram_unlock(tp
);
3729 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3730 write_op(tp
, cpu_scratch_base
+ i
, 0);
3731 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3732 tw32(cpu_base
+ CPU_MODE
,
3733 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3735 /* Subtract additional main header for fragmented firmware and
3736 * advance to the first fragment
3738 total_len
-= TG3_FW_HDR_LEN
;
3743 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3744 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3745 write_op(tp
, cpu_scratch_base
+
3746 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3748 be32_to_cpu(fw_data
[i
]));
3750 total_len
-= be32_to_cpu(fw_hdr
->len
);
3752 /* Advance to next fragment */
3753 fw_hdr
= (struct tg3_firmware_hdr
*)
3754 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3755 } while (total_len
> 0);
3763 /* tp->lock is held. */
3764 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3767 const int iters
= 5;
3769 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3770 tw32_f(cpu_base
+ CPU_PC
, pc
);
3772 for (i
= 0; i
< iters
; i
++) {
3773 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3775 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3776 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3777 tw32_f(cpu_base
+ CPU_PC
, pc
);
3781 return (i
== iters
) ? -EBUSY
: 0;
3784 /* tp->lock is held. */
3785 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3787 const struct tg3_firmware_hdr
*fw_hdr
;
3790 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3792 /* Firmware blob starts with version numbers, followed by
3793 start address and length. We are setting complete length.
3794 length = end_address_of_bss - start_address_of_text.
3795 Remainder is the blob to be loaded contiguously
3796 from start address. */
3798 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3799 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3804 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3805 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3810 /* Now startup only the RX cpu. */
3811 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3812 be32_to_cpu(fw_hdr
->base_addr
));
3814 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3815 "should be %08x\n", __func__
,
3816 tr32(RX_CPU_BASE
+ CPU_PC
),
3817 be32_to_cpu(fw_hdr
->base_addr
));
3821 tg3_rxcpu_resume(tp
);
3826 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3828 const int iters
= 1000;
3832 /* Wait for boot code to complete initialization and enter service
3833 * loop. It is then safe to download service patches
3835 for (i
= 0; i
< iters
; i
++) {
3836 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3843 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3847 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3849 netdev_warn(tp
->dev
,
3850 "Other patches exist. Not downloading EEE patch\n");
3857 /* tp->lock is held. */
3858 static void tg3_load_57766_firmware(struct tg3
*tp
)
3860 struct tg3_firmware_hdr
*fw_hdr
;
3862 if (!tg3_flag(tp
, NO_NVRAM
))
3865 if (tg3_validate_rxcpu_state(tp
))
3871 /* This firmware blob has a different format than older firmware
3872 * releases as given below. The main difference is we have fragmented
3873 * data to be written to non-contiguous locations.
3875 * In the beginning we have a firmware header identical to other
3876 * firmware which consists of version, base addr and length. The length
3877 * here is unused and set to 0xffffffff.
3879 * This is followed by a series of firmware fragments which are
3880 * individually identical to previous firmware. i.e. they have the
3881 * firmware header and followed by data for that fragment. The version
3882 * field of the individual fragment header is unused.
3885 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3886 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3889 if (tg3_rxcpu_pause(tp
))
3892 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3893 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3895 tg3_rxcpu_resume(tp
);
3898 /* tp->lock is held. */
3899 static int tg3_load_tso_firmware(struct tg3
*tp
)
3901 const struct tg3_firmware_hdr
*fw_hdr
;
3902 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3905 if (!tg3_flag(tp
, FW_TSO
))
3908 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3910 /* Firmware blob starts with version numbers, followed by
3911 start address and length. We are setting complete length.
3912 length = end_address_of_bss - start_address_of_text.
3913 Remainder is the blob to be loaded contiguously
3914 from start address. */
3916 cpu_scratch_size
= tp
->fw_len
;
3918 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3919 cpu_base
= RX_CPU_BASE
;
3920 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3922 cpu_base
= TX_CPU_BASE
;
3923 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3924 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3927 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3928 cpu_scratch_base
, cpu_scratch_size
,
3933 /* Now startup the cpu. */
3934 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3935 be32_to_cpu(fw_hdr
->base_addr
));
3938 "%s fails to set CPU PC, is %08x should be %08x\n",
3939 __func__
, tr32(cpu_base
+ CPU_PC
),
3940 be32_to_cpu(fw_hdr
->base_addr
));
3944 tg3_resume_cpu(tp
, cpu_base
);
3948 /* tp->lock is held. */
3949 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3951 u32 addr_high
, addr_low
;
3953 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3954 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3955 (mac_addr
[4] << 8) | mac_addr
[5]);
3958 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3959 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3962 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3963 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3967 /* tp->lock is held. */
3968 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3973 for (i
= 0; i
< 4; i
++) {
3974 if (i
== 1 && skip_mac_1
)
3976 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3979 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3980 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3981 for (i
= 4; i
< 16; i
++)
3982 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3985 addr_high
= (tp
->dev
->dev_addr
[0] +
3986 tp
->dev
->dev_addr
[1] +
3987 tp
->dev
->dev_addr
[2] +
3988 tp
->dev
->dev_addr
[3] +
3989 tp
->dev
->dev_addr
[4] +
3990 tp
->dev
->dev_addr
[5]) &
3991 TX_BACKOFF_SEED_MASK
;
3992 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3995 static void tg3_enable_register_access(struct tg3
*tp
)
3998 * Make sure register accesses (indirect or otherwise) will function
4001 pci_write_config_dword(tp
->pdev
,
4002 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4005 static int tg3_power_up(struct tg3
*tp
)
4009 tg3_enable_register_access(tp
);
4011 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4013 /* Switch out of Vaux if it is a NIC */
4014 tg3_pwrsrc_switch_to_vmain(tp
);
4016 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4022 static int tg3_setup_phy(struct tg3
*, bool);
4024 static int tg3_power_down_prepare(struct tg3
*tp
)
4027 bool device_should_wake
, do_low_power
;
4029 tg3_enable_register_access(tp
);
4031 /* Restore the CLKREQ setting. */
4032 if (tg3_flag(tp
, CLKREQ_BUG
))
4033 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4034 PCI_EXP_LNKCTL_CLKREQ_EN
);
4036 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4037 tw32(TG3PCI_MISC_HOST_CTRL
,
4038 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4040 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4041 tg3_flag(tp
, WOL_ENABLE
);
4043 if (tg3_flag(tp
, USE_PHYLIB
)) {
4044 do_low_power
= false;
4045 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4046 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4047 struct phy_device
*phydev
;
4048 u32 phyid
, advertising
;
4050 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4052 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4054 tp
->link_config
.speed
= phydev
->speed
;
4055 tp
->link_config
.duplex
= phydev
->duplex
;
4056 tp
->link_config
.autoneg
= phydev
->autoneg
;
4057 tp
->link_config
.advertising
= phydev
->advertising
;
4059 advertising
= ADVERTISED_TP
|
4061 ADVERTISED_Autoneg
|
4062 ADVERTISED_10baseT_Half
;
4064 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4065 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4067 ADVERTISED_100baseT_Half
|
4068 ADVERTISED_100baseT_Full
|
4069 ADVERTISED_10baseT_Full
;
4071 advertising
|= ADVERTISED_10baseT_Full
;
4074 phydev
->advertising
= advertising
;
4076 phy_start_aneg(phydev
);
4078 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4079 if (phyid
!= PHY_ID_BCMAC131
) {
4080 phyid
&= PHY_BCM_OUI_MASK
;
4081 if (phyid
== PHY_BCM_OUI_1
||
4082 phyid
== PHY_BCM_OUI_2
||
4083 phyid
== PHY_BCM_OUI_3
)
4084 do_low_power
= true;
4088 do_low_power
= true;
4090 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4091 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4093 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4094 tg3_setup_phy(tp
, false);
4097 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4100 val
= tr32(GRC_VCPU_EXT_CTRL
);
4101 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4102 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4106 for (i
= 0; i
< 200; i
++) {
4107 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4108 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4113 if (tg3_flag(tp
, WOL_CAP
))
4114 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4115 WOL_DRV_STATE_SHUTDOWN
|
4119 if (device_should_wake
) {
4122 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4124 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4125 tg3_phy_auxctl_write(tp
,
4126 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4127 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4128 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4129 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4133 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4134 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4135 else if (tp
->phy_flags
&
4136 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4137 if (tp
->link_config
.active_speed
== SPEED_1000
)
4138 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4140 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4142 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4144 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4145 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4146 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4147 SPEED_100
: SPEED_10
;
4148 if (tg3_5700_link_polarity(tp
, speed
))
4149 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4151 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4154 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4157 if (!tg3_flag(tp
, 5750_PLUS
))
4158 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4160 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4161 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4162 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4163 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4165 if (tg3_flag(tp
, ENABLE_APE
))
4166 mac_mode
|= MAC_MODE_APE_TX_EN
|
4167 MAC_MODE_APE_RX_EN
|
4168 MAC_MODE_TDE_ENABLE
;
4170 tw32_f(MAC_MODE
, mac_mode
);
4173 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4177 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4178 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4179 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4182 base_val
= tp
->pci_clock_ctrl
;
4183 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4184 CLOCK_CTRL_TXCLK_DISABLE
);
4186 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4187 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4188 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4189 tg3_flag(tp
, CPMU_PRESENT
) ||
4190 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4192 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4193 u32 newbits1
, newbits2
;
4195 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4196 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4197 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4198 CLOCK_CTRL_TXCLK_DISABLE
|
4200 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4201 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4202 newbits1
= CLOCK_CTRL_625_CORE
;
4203 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4205 newbits1
= CLOCK_CTRL_ALTCLK
;
4206 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4209 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4212 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4215 if (!tg3_flag(tp
, 5705_PLUS
)) {
4218 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4219 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4220 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4221 CLOCK_CTRL_TXCLK_DISABLE
|
4222 CLOCK_CTRL_44MHZ_CORE
);
4224 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4227 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4228 tp
->pci_clock_ctrl
| newbits3
, 40);
4232 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4233 tg3_power_down_phy(tp
, do_low_power
);
4235 tg3_frob_aux_power(tp
, true);
4237 /* Workaround for unstable PLL clock */
4238 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4239 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4240 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4241 u32 val
= tr32(0x7d00);
4243 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4245 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4248 err
= tg3_nvram_lock(tp
);
4249 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4251 tg3_nvram_unlock(tp
);
4255 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4257 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4262 static void tg3_power_down(struct tg3
*tp
)
4264 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4265 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4268 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4270 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4271 case MII_TG3_AUX_STAT_10HALF
:
4273 *duplex
= DUPLEX_HALF
;
4276 case MII_TG3_AUX_STAT_10FULL
:
4278 *duplex
= DUPLEX_FULL
;
4281 case MII_TG3_AUX_STAT_100HALF
:
4283 *duplex
= DUPLEX_HALF
;
4286 case MII_TG3_AUX_STAT_100FULL
:
4288 *duplex
= DUPLEX_FULL
;
4291 case MII_TG3_AUX_STAT_1000HALF
:
4292 *speed
= SPEED_1000
;
4293 *duplex
= DUPLEX_HALF
;
4296 case MII_TG3_AUX_STAT_1000FULL
:
4297 *speed
= SPEED_1000
;
4298 *duplex
= DUPLEX_FULL
;
4302 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4303 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4305 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4309 *speed
= SPEED_UNKNOWN
;
4310 *duplex
= DUPLEX_UNKNOWN
;
4315 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4320 new_adv
= ADVERTISE_CSMA
;
4321 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4322 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4324 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4328 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4329 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4331 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4332 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4333 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4335 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4340 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4343 tw32(TG3_CPMU_EEE_MODE
,
4344 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4346 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4351 /* Advertise 100-BaseTX EEE ability */
4352 if (advertise
& ADVERTISED_100baseT_Full
)
4353 val
|= MDIO_AN_EEE_ADV_100TX
;
4354 /* Advertise 1000-BaseT EEE ability */
4355 if (advertise
& ADVERTISED_1000baseT_Full
)
4356 val
|= MDIO_AN_EEE_ADV_1000T
;
4358 if (!tp
->eee
.eee_enabled
) {
4360 tp
->eee
.advertised
= 0;
4362 tp
->eee
.advertised
= advertise
&
4363 (ADVERTISED_100baseT_Full
|
4364 ADVERTISED_1000baseT_Full
);
4367 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4371 switch (tg3_asic_rev(tp
)) {
4373 case ASIC_REV_57765
:
4374 case ASIC_REV_57766
:
4376 /* If we advertised any eee advertisements above... */
4378 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4379 MII_TG3_DSP_TAP26_RMRXSTO
|
4380 MII_TG3_DSP_TAP26_OPCSINPT
;
4381 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4385 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4386 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4387 MII_TG3_DSP_CH34TP2_HIBW01
);
4390 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4399 static void tg3_phy_copper_begin(struct tg3
*tp
)
4401 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4402 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4405 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4406 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4407 adv
= ADVERTISED_10baseT_Half
|
4408 ADVERTISED_10baseT_Full
;
4409 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4410 adv
|= ADVERTISED_100baseT_Half
|
4411 ADVERTISED_100baseT_Full
;
4412 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4413 if (!(tp
->phy_flags
&
4414 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4415 adv
|= ADVERTISED_1000baseT_Half
;
4416 adv
|= ADVERTISED_1000baseT_Full
;
4419 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4421 adv
= tp
->link_config
.advertising
;
4422 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4423 adv
&= ~(ADVERTISED_1000baseT_Half
|
4424 ADVERTISED_1000baseT_Full
);
4426 fc
= tp
->link_config
.flowctrl
;
4429 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4431 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4432 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4433 /* Normally during power down we want to autonegotiate
4434 * the lowest possible speed for WOL. However, to avoid
4435 * link flap, we leave it untouched.
4440 tg3_writephy(tp
, MII_BMCR
,
4441 BMCR_ANENABLE
| BMCR_ANRESTART
);
4444 u32 bmcr
, orig_bmcr
;
4446 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4447 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4449 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4450 /* With autoneg disabled, 5715 only links up when the
4451 * advertisement register has the configured speed
4454 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4458 switch (tp
->link_config
.speed
) {
4464 bmcr
|= BMCR_SPEED100
;
4468 bmcr
|= BMCR_SPEED1000
;
4472 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4473 bmcr
|= BMCR_FULLDPLX
;
4475 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4476 (bmcr
!= orig_bmcr
)) {
4477 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4478 for (i
= 0; i
< 1500; i
++) {
4482 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4483 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4485 if (!(tmp
& BMSR_LSTATUS
)) {
4490 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4496 static int tg3_phy_pull_config(struct tg3
*tp
)
4501 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4505 if (!(val
& BMCR_ANENABLE
)) {
4506 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4507 tp
->link_config
.advertising
= 0;
4508 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4512 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4514 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4517 tp
->link_config
.speed
= SPEED_10
;
4520 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4523 tp
->link_config
.speed
= SPEED_100
;
4525 case BMCR_SPEED1000
:
4526 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4527 tp
->link_config
.speed
= SPEED_1000
;
4535 if (val
& BMCR_FULLDPLX
)
4536 tp
->link_config
.duplex
= DUPLEX_FULL
;
4538 tp
->link_config
.duplex
= DUPLEX_HALF
;
4540 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4546 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4547 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4548 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4550 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4553 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4557 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4558 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4560 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4562 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4565 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4568 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4569 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4573 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4575 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4579 adv
= tg3_decode_flowctrl_1000X(val
);
4580 tp
->link_config
.flowctrl
= adv
;
4582 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4583 adv
= mii_adv_to_ethtool_adv_x(val
);
4586 tp
->link_config
.advertising
|= adv
;
4593 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4597 /* Turn off tap power management. */
4598 /* Set Extended packet length bit */
4599 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4601 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4602 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4603 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4604 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4605 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4612 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4614 struct ethtool_eee eee
;
4616 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4619 tg3_eee_pull_config(tp
, &eee
);
4621 if (tp
->eee
.eee_enabled
) {
4622 if (tp
->eee
.advertised
!= eee
.advertised
||
4623 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4624 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4627 /* EEE is disabled but we're advertising */
4635 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4637 u32 advmsk
, tgtadv
, advertising
;
4639 advertising
= tp
->link_config
.advertising
;
4640 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4642 advmsk
= ADVERTISE_ALL
;
4643 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4644 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4645 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4648 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4651 if ((*lcladv
& advmsk
) != tgtadv
)
4654 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4657 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4659 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4663 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4664 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4665 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4666 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4667 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4669 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4672 if (tg3_ctrl
!= tgtadv
)
4679 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4683 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4686 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4689 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4692 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4695 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4696 tp
->link_config
.rmt_adv
= lpeth
;
4701 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4703 if (curr_link_up
!= tp
->link_up
) {
4705 netif_carrier_on(tp
->dev
);
4707 netif_carrier_off(tp
->dev
);
4708 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4709 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4712 tg3_link_report(tp
);
4719 static void tg3_clear_mac_status(struct tg3
*tp
)
4724 MAC_STATUS_SYNC_CHANGED
|
4725 MAC_STATUS_CFG_CHANGED
|
4726 MAC_STATUS_MI_COMPLETION
|
4727 MAC_STATUS_LNKSTATE_CHANGED
);
4731 static void tg3_setup_eee(struct tg3
*tp
)
4735 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4736 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4737 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4738 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4740 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4742 tw32_f(TG3_CPMU_EEE_CTRL
,
4743 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4745 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4746 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4747 TG3_CPMU_EEEMD_LPI_IN_RX
|
4748 TG3_CPMU_EEEMD_EEE_ENABLE
;
4750 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4751 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4753 if (tg3_flag(tp
, ENABLE_APE
))
4754 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4756 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4758 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4759 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4760 (tp
->eee
.tx_lpi_timer
& 0xffff));
4762 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4763 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4764 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4767 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4769 bool current_link_up
;
4771 u32 lcl_adv
, rmt_adv
;
4776 tg3_clear_mac_status(tp
);
4778 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4780 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4784 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4786 /* Some third-party PHYs need to be reset on link going
4789 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4790 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4791 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4793 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4794 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4795 !(bmsr
& BMSR_LSTATUS
))
4801 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4802 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4803 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4804 !tg3_flag(tp
, INIT_COMPLETE
))
4807 if (!(bmsr
& BMSR_LSTATUS
)) {
4808 err
= tg3_init_5401phy_dsp(tp
);
4812 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4813 for (i
= 0; i
< 1000; i
++) {
4815 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4816 (bmsr
& BMSR_LSTATUS
)) {
4822 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4823 TG3_PHY_REV_BCM5401_B0
&&
4824 !(bmsr
& BMSR_LSTATUS
) &&
4825 tp
->link_config
.active_speed
== SPEED_1000
) {
4826 err
= tg3_phy_reset(tp
);
4828 err
= tg3_init_5401phy_dsp(tp
);
4833 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4834 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4835 /* 5701 {A0,B0} CRC bug workaround */
4836 tg3_writephy(tp
, 0x15, 0x0a75);
4837 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4838 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4839 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4842 /* Clear pending interrupts... */
4843 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4844 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4846 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4847 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4848 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4849 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4851 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4852 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4853 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4854 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4855 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4857 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4860 current_link_up
= false;
4861 current_speed
= SPEED_UNKNOWN
;
4862 current_duplex
= DUPLEX_UNKNOWN
;
4863 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4864 tp
->link_config
.rmt_adv
= 0;
4866 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4867 err
= tg3_phy_auxctl_read(tp
,
4868 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4870 if (!err
&& !(val
& (1 << 10))) {
4871 tg3_phy_auxctl_write(tp
,
4872 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4879 for (i
= 0; i
< 100; i
++) {
4880 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4881 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4882 (bmsr
& BMSR_LSTATUS
))
4887 if (bmsr
& BMSR_LSTATUS
) {
4890 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4891 for (i
= 0; i
< 2000; i
++) {
4893 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4898 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4903 for (i
= 0; i
< 200; i
++) {
4904 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4905 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4907 if (bmcr
&& bmcr
!= 0x7fff)
4915 tp
->link_config
.active_speed
= current_speed
;
4916 tp
->link_config
.active_duplex
= current_duplex
;
4918 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4919 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4921 if ((bmcr
& BMCR_ANENABLE
) &&
4923 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4924 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4925 current_link_up
= true;
4927 /* EEE settings changes take effect only after a phy
4928 * reset. If we have skipped a reset due to Link Flap
4929 * Avoidance being enabled, do it now.
4931 if (!eee_config_ok
&&
4932 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4938 if (!(bmcr
& BMCR_ANENABLE
) &&
4939 tp
->link_config
.speed
== current_speed
&&
4940 tp
->link_config
.duplex
== current_duplex
) {
4941 current_link_up
= true;
4945 if (current_link_up
&&
4946 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4949 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4950 reg
= MII_TG3_FET_GEN_STAT
;
4951 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4953 reg
= MII_TG3_EXT_STAT
;
4954 bit
= MII_TG3_EXT_STAT_MDIX
;
4957 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4958 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4960 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4965 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4966 tg3_phy_copper_begin(tp
);
4968 if (tg3_flag(tp
, ROBOSWITCH
)) {
4969 current_link_up
= true;
4970 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4971 current_speed
= SPEED_1000
;
4972 current_duplex
= DUPLEX_FULL
;
4973 tp
->link_config
.active_speed
= current_speed
;
4974 tp
->link_config
.active_duplex
= current_duplex
;
4977 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4978 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4979 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4980 current_link_up
= true;
4983 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4984 if (current_link_up
) {
4985 if (tp
->link_config
.active_speed
== SPEED_100
||
4986 tp
->link_config
.active_speed
== SPEED_10
)
4987 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4989 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4990 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4991 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4993 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4995 /* In order for the 5750 core in BCM4785 chip to work properly
4996 * in RGMII mode, the Led Control Register must be set up.
4998 if (tg3_flag(tp
, RGMII_MODE
)) {
4999 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5000 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5002 if (tp
->link_config
.active_speed
== SPEED_10
)
5003 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5004 else if (tp
->link_config
.active_speed
== SPEED_100
)
5005 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5006 LED_CTRL_100MBPS_ON
);
5007 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5008 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5009 LED_CTRL_1000MBPS_ON
);
5011 tw32(MAC_LED_CTRL
, led_ctrl
);
5015 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5016 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5017 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5019 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5020 if (current_link_up
&&
5021 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5022 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5024 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5027 /* ??? Without this setting Netgear GA302T PHY does not
5028 * ??? send/receive packets...
5030 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5031 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5032 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5033 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5037 tw32_f(MAC_MODE
, tp
->mac_mode
);
5040 tg3_phy_eee_adjust(tp
, current_link_up
);
5042 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5043 /* Polled via timer. */
5044 tw32_f(MAC_EVENT
, 0);
5046 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5050 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5052 tp
->link_config
.active_speed
== SPEED_1000
&&
5053 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5056 (MAC_STATUS_SYNC_CHANGED
|
5057 MAC_STATUS_CFG_CHANGED
));
5060 NIC_SRAM_FIRMWARE_MBOX
,
5061 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5064 /* Prevent send BD corruption. */
5065 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5066 if (tp
->link_config
.active_speed
== SPEED_100
||
5067 tp
->link_config
.active_speed
== SPEED_10
)
5068 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5069 PCI_EXP_LNKCTL_CLKREQ_EN
);
5071 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5072 PCI_EXP_LNKCTL_CLKREQ_EN
);
5075 tg3_test_and_report_link_chg(tp
, current_link_up
);
5080 struct tg3_fiber_aneginfo
{
5082 #define ANEG_STATE_UNKNOWN 0
5083 #define ANEG_STATE_AN_ENABLE 1
5084 #define ANEG_STATE_RESTART_INIT 2
5085 #define ANEG_STATE_RESTART 3
5086 #define ANEG_STATE_DISABLE_LINK_OK 4
5087 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5088 #define ANEG_STATE_ABILITY_DETECT 6
5089 #define ANEG_STATE_ACK_DETECT_INIT 7
5090 #define ANEG_STATE_ACK_DETECT 8
5091 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5092 #define ANEG_STATE_COMPLETE_ACK 10
5093 #define ANEG_STATE_IDLE_DETECT_INIT 11
5094 #define ANEG_STATE_IDLE_DETECT 12
5095 #define ANEG_STATE_LINK_OK 13
5096 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5097 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5100 #define MR_AN_ENABLE 0x00000001
5101 #define MR_RESTART_AN 0x00000002
5102 #define MR_AN_COMPLETE 0x00000004
5103 #define MR_PAGE_RX 0x00000008
5104 #define MR_NP_LOADED 0x00000010
5105 #define MR_TOGGLE_TX 0x00000020
5106 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5107 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5108 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5109 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5110 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5111 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5112 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5113 #define MR_TOGGLE_RX 0x00002000
5114 #define MR_NP_RX 0x00004000
5116 #define MR_LINK_OK 0x80000000
5118 unsigned long link_time
, cur_time
;
5120 u32 ability_match_cfg
;
5121 int ability_match_count
;
5123 char ability_match
, idle_match
, ack_match
;
5125 u32 txconfig
, rxconfig
;
5126 #define ANEG_CFG_NP 0x00000080
5127 #define ANEG_CFG_ACK 0x00000040
5128 #define ANEG_CFG_RF2 0x00000020
5129 #define ANEG_CFG_RF1 0x00000010
5130 #define ANEG_CFG_PS2 0x00000001
5131 #define ANEG_CFG_PS1 0x00008000
5132 #define ANEG_CFG_HD 0x00004000
5133 #define ANEG_CFG_FD 0x00002000
5134 #define ANEG_CFG_INVAL 0x00001f06
5139 #define ANEG_TIMER_ENAB 2
5140 #define ANEG_FAILED -1
5142 #define ANEG_STATE_SETTLE_TIME 10000
5144 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5145 struct tg3_fiber_aneginfo
*ap
)
5148 unsigned long delta
;
5152 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5156 ap
->ability_match_cfg
= 0;
5157 ap
->ability_match_count
= 0;
5158 ap
->ability_match
= 0;
5164 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5165 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5167 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5168 ap
->ability_match_cfg
= rx_cfg_reg
;
5169 ap
->ability_match
= 0;
5170 ap
->ability_match_count
= 0;
5172 if (++ap
->ability_match_count
> 1) {
5173 ap
->ability_match
= 1;
5174 ap
->ability_match_cfg
= rx_cfg_reg
;
5177 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5185 ap
->ability_match_cfg
= 0;
5186 ap
->ability_match_count
= 0;
5187 ap
->ability_match
= 0;
5193 ap
->rxconfig
= rx_cfg_reg
;
5196 switch (ap
->state
) {
5197 case ANEG_STATE_UNKNOWN
:
5198 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5199 ap
->state
= ANEG_STATE_AN_ENABLE
;
5202 case ANEG_STATE_AN_ENABLE
:
5203 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5204 if (ap
->flags
& MR_AN_ENABLE
) {
5207 ap
->ability_match_cfg
= 0;
5208 ap
->ability_match_count
= 0;
5209 ap
->ability_match
= 0;
5213 ap
->state
= ANEG_STATE_RESTART_INIT
;
5215 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5219 case ANEG_STATE_RESTART_INIT
:
5220 ap
->link_time
= ap
->cur_time
;
5221 ap
->flags
&= ~(MR_NP_LOADED
);
5223 tw32(MAC_TX_AUTO_NEG
, 0);
5224 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5225 tw32_f(MAC_MODE
, tp
->mac_mode
);
5228 ret
= ANEG_TIMER_ENAB
;
5229 ap
->state
= ANEG_STATE_RESTART
;
5232 case ANEG_STATE_RESTART
:
5233 delta
= ap
->cur_time
- ap
->link_time
;
5234 if (delta
> ANEG_STATE_SETTLE_TIME
)
5235 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5237 ret
= ANEG_TIMER_ENAB
;
5240 case ANEG_STATE_DISABLE_LINK_OK
:
5244 case ANEG_STATE_ABILITY_DETECT_INIT
:
5245 ap
->flags
&= ~(MR_TOGGLE_TX
);
5246 ap
->txconfig
= ANEG_CFG_FD
;
5247 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5248 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5249 ap
->txconfig
|= ANEG_CFG_PS1
;
5250 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5251 ap
->txconfig
|= ANEG_CFG_PS2
;
5252 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5253 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5254 tw32_f(MAC_MODE
, tp
->mac_mode
);
5257 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5260 case ANEG_STATE_ABILITY_DETECT
:
5261 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5262 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5265 case ANEG_STATE_ACK_DETECT_INIT
:
5266 ap
->txconfig
|= ANEG_CFG_ACK
;
5267 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5268 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5269 tw32_f(MAC_MODE
, tp
->mac_mode
);
5272 ap
->state
= ANEG_STATE_ACK_DETECT
;
5275 case ANEG_STATE_ACK_DETECT
:
5276 if (ap
->ack_match
!= 0) {
5277 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5278 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5279 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5281 ap
->state
= ANEG_STATE_AN_ENABLE
;
5283 } else if (ap
->ability_match
!= 0 &&
5284 ap
->rxconfig
== 0) {
5285 ap
->state
= ANEG_STATE_AN_ENABLE
;
5289 case ANEG_STATE_COMPLETE_ACK_INIT
:
5290 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5294 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5295 MR_LP_ADV_HALF_DUPLEX
|
5296 MR_LP_ADV_SYM_PAUSE
|
5297 MR_LP_ADV_ASYM_PAUSE
|
5298 MR_LP_ADV_REMOTE_FAULT1
|
5299 MR_LP_ADV_REMOTE_FAULT2
|
5300 MR_LP_ADV_NEXT_PAGE
|
5303 if (ap
->rxconfig
& ANEG_CFG_FD
)
5304 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5305 if (ap
->rxconfig
& ANEG_CFG_HD
)
5306 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5307 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5308 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5309 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5310 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5311 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5312 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5313 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5314 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5315 if (ap
->rxconfig
& ANEG_CFG_NP
)
5316 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5318 ap
->link_time
= ap
->cur_time
;
5320 ap
->flags
^= (MR_TOGGLE_TX
);
5321 if (ap
->rxconfig
& 0x0008)
5322 ap
->flags
|= MR_TOGGLE_RX
;
5323 if (ap
->rxconfig
& ANEG_CFG_NP
)
5324 ap
->flags
|= MR_NP_RX
;
5325 ap
->flags
|= MR_PAGE_RX
;
5327 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5328 ret
= ANEG_TIMER_ENAB
;
5331 case ANEG_STATE_COMPLETE_ACK
:
5332 if (ap
->ability_match
!= 0 &&
5333 ap
->rxconfig
== 0) {
5334 ap
->state
= ANEG_STATE_AN_ENABLE
;
5337 delta
= ap
->cur_time
- ap
->link_time
;
5338 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5339 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5340 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5342 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5343 !(ap
->flags
& MR_NP_RX
)) {
5344 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5352 case ANEG_STATE_IDLE_DETECT_INIT
:
5353 ap
->link_time
= ap
->cur_time
;
5354 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5355 tw32_f(MAC_MODE
, tp
->mac_mode
);
5358 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5359 ret
= ANEG_TIMER_ENAB
;
5362 case ANEG_STATE_IDLE_DETECT
:
5363 if (ap
->ability_match
!= 0 &&
5364 ap
->rxconfig
== 0) {
5365 ap
->state
= ANEG_STATE_AN_ENABLE
;
5368 delta
= ap
->cur_time
- ap
->link_time
;
5369 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5370 /* XXX another gem from the Broadcom driver :( */
5371 ap
->state
= ANEG_STATE_LINK_OK
;
5375 case ANEG_STATE_LINK_OK
:
5376 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5380 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5381 /* ??? unimplemented */
5384 case ANEG_STATE_NEXT_PAGE_WAIT
:
5385 /* ??? unimplemented */
5396 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5399 struct tg3_fiber_aneginfo aninfo
;
5400 int status
= ANEG_FAILED
;
5404 tw32_f(MAC_TX_AUTO_NEG
, 0);
5406 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5407 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5410 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5413 memset(&aninfo
, 0, sizeof(aninfo
));
5414 aninfo
.flags
|= MR_AN_ENABLE
;
5415 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5416 aninfo
.cur_time
= 0;
5418 while (++tick
< 195000) {
5419 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5420 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5426 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5427 tw32_f(MAC_MODE
, tp
->mac_mode
);
5430 *txflags
= aninfo
.txconfig
;
5431 *rxflags
= aninfo
.flags
;
5433 if (status
== ANEG_DONE
&&
5434 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5435 MR_LP_ADV_FULL_DUPLEX
)))
5441 static void tg3_init_bcm8002(struct tg3
*tp
)
5443 u32 mac_status
= tr32(MAC_STATUS
);
5446 /* Reset when initting first time or we have a link. */
5447 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5448 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5451 /* Set PLL lock range. */
5452 tg3_writephy(tp
, 0x16, 0x8007);
5455 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5457 /* Wait for reset to complete. */
5458 /* XXX schedule_timeout() ... */
5459 for (i
= 0; i
< 500; i
++)
5462 /* Config mode; select PMA/Ch 1 regs. */
5463 tg3_writephy(tp
, 0x10, 0x8411);
5465 /* Enable auto-lock and comdet, select txclk for tx. */
5466 tg3_writephy(tp
, 0x11, 0x0a10);
5468 tg3_writephy(tp
, 0x18, 0x00a0);
5469 tg3_writephy(tp
, 0x16, 0x41ff);
5471 /* Assert and deassert POR. */
5472 tg3_writephy(tp
, 0x13, 0x0400);
5474 tg3_writephy(tp
, 0x13, 0x0000);
5476 tg3_writephy(tp
, 0x11, 0x0a50);
5478 tg3_writephy(tp
, 0x11, 0x0a10);
5480 /* Wait for signal to stabilize */
5481 /* XXX schedule_timeout() ... */
5482 for (i
= 0; i
< 15000; i
++)
5485 /* Deselect the channel register so we can read the PHYID
5488 tg3_writephy(tp
, 0x10, 0x8011);
5491 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5494 bool current_link_up
;
5495 u32 sg_dig_ctrl
, sg_dig_status
;
5496 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5497 int workaround
, port_a
;
5500 expected_sg_dig_ctrl
= 0;
5503 current_link_up
= false;
5505 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5506 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5508 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5511 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5512 /* preserve bits 20-23 for voltage regulator */
5513 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5516 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5518 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5519 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5521 u32 val
= serdes_cfg
;
5527 tw32_f(MAC_SERDES_CFG
, val
);
5530 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5532 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5533 tg3_setup_flow_control(tp
, 0, 0);
5534 current_link_up
= true;
5539 /* Want auto-negotiation. */
5540 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5542 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5543 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5544 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5545 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5546 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5548 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5549 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5550 tp
->serdes_counter
&&
5551 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5552 MAC_STATUS_RCVD_CFG
)) ==
5553 MAC_STATUS_PCS_SYNCED
)) {
5554 tp
->serdes_counter
--;
5555 current_link_up
= true;
5560 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5561 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5563 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5565 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5566 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5567 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5568 MAC_STATUS_SIGNAL_DET
)) {
5569 sg_dig_status
= tr32(SG_DIG_STATUS
);
5570 mac_status
= tr32(MAC_STATUS
);
5572 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5573 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5574 u32 local_adv
= 0, remote_adv
= 0;
5576 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5577 local_adv
|= ADVERTISE_1000XPAUSE
;
5578 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5579 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5581 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5582 remote_adv
|= LPA_1000XPAUSE
;
5583 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5584 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5586 tp
->link_config
.rmt_adv
=
5587 mii_adv_to_ethtool_adv_x(remote_adv
);
5589 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5590 current_link_up
= true;
5591 tp
->serdes_counter
= 0;
5592 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5593 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5594 if (tp
->serdes_counter
)
5595 tp
->serdes_counter
--;
5598 u32 val
= serdes_cfg
;
5605 tw32_f(MAC_SERDES_CFG
, val
);
5608 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5611 /* Link parallel detection - link is up */
5612 /* only if we have PCS_SYNC and not */
5613 /* receiving config code words */
5614 mac_status
= tr32(MAC_STATUS
);
5615 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5616 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5617 tg3_setup_flow_control(tp
, 0, 0);
5618 current_link_up
= true;
5620 TG3_PHYFLG_PARALLEL_DETECT
;
5621 tp
->serdes_counter
=
5622 SERDES_PARALLEL_DET_TIMEOUT
;
5624 goto restart_autoneg
;
5628 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5629 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5633 return current_link_up
;
5636 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5638 bool current_link_up
= false;
5640 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5643 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5644 u32 txflags
, rxflags
;
5647 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5648 u32 local_adv
= 0, remote_adv
= 0;
5650 if (txflags
& ANEG_CFG_PS1
)
5651 local_adv
|= ADVERTISE_1000XPAUSE
;
5652 if (txflags
& ANEG_CFG_PS2
)
5653 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5655 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5656 remote_adv
|= LPA_1000XPAUSE
;
5657 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5658 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5660 tp
->link_config
.rmt_adv
=
5661 mii_adv_to_ethtool_adv_x(remote_adv
);
5663 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5665 current_link_up
= true;
5667 for (i
= 0; i
< 30; i
++) {
5670 (MAC_STATUS_SYNC_CHANGED
|
5671 MAC_STATUS_CFG_CHANGED
));
5673 if ((tr32(MAC_STATUS
) &
5674 (MAC_STATUS_SYNC_CHANGED
|
5675 MAC_STATUS_CFG_CHANGED
)) == 0)
5679 mac_status
= tr32(MAC_STATUS
);
5680 if (!current_link_up
&&
5681 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5682 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5683 current_link_up
= true;
5685 tg3_setup_flow_control(tp
, 0, 0);
5687 /* Forcing 1000FD link up. */
5688 current_link_up
= true;
5690 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5693 tw32_f(MAC_MODE
, tp
->mac_mode
);
5698 return current_link_up
;
5701 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5704 u16 orig_active_speed
;
5705 u8 orig_active_duplex
;
5707 bool current_link_up
;
5710 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5711 orig_active_speed
= tp
->link_config
.active_speed
;
5712 orig_active_duplex
= tp
->link_config
.active_duplex
;
5714 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5716 tg3_flag(tp
, INIT_COMPLETE
)) {
5717 mac_status
= tr32(MAC_STATUS
);
5718 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5719 MAC_STATUS_SIGNAL_DET
|
5720 MAC_STATUS_CFG_CHANGED
|
5721 MAC_STATUS_RCVD_CFG
);
5722 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5723 MAC_STATUS_SIGNAL_DET
)) {
5724 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5725 MAC_STATUS_CFG_CHANGED
));
5730 tw32_f(MAC_TX_AUTO_NEG
, 0);
5732 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5733 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5734 tw32_f(MAC_MODE
, tp
->mac_mode
);
5737 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5738 tg3_init_bcm8002(tp
);
5740 /* Enable link change event even when serdes polling. */
5741 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5744 current_link_up
= false;
5745 tp
->link_config
.rmt_adv
= 0;
5746 mac_status
= tr32(MAC_STATUS
);
5748 if (tg3_flag(tp
, HW_AUTONEG
))
5749 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5751 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5753 tp
->napi
[0].hw_status
->status
=
5754 (SD_STATUS_UPDATED
|
5755 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5757 for (i
= 0; i
< 100; i
++) {
5758 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5759 MAC_STATUS_CFG_CHANGED
));
5761 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5762 MAC_STATUS_CFG_CHANGED
|
5763 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5767 mac_status
= tr32(MAC_STATUS
);
5768 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5769 current_link_up
= false;
5770 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5771 tp
->serdes_counter
== 0) {
5772 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5773 MAC_MODE_SEND_CONFIGS
));
5775 tw32_f(MAC_MODE
, tp
->mac_mode
);
5779 if (current_link_up
) {
5780 tp
->link_config
.active_speed
= SPEED_1000
;
5781 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5782 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5783 LED_CTRL_LNKLED_OVERRIDE
|
5784 LED_CTRL_1000MBPS_ON
));
5786 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5787 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5788 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5789 LED_CTRL_LNKLED_OVERRIDE
|
5790 LED_CTRL_TRAFFIC_OVERRIDE
));
5793 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5794 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5795 if (orig_pause_cfg
!= now_pause_cfg
||
5796 orig_active_speed
!= tp
->link_config
.active_speed
||
5797 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5798 tg3_link_report(tp
);
5804 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5808 u16 current_speed
= SPEED_UNKNOWN
;
5809 u8 current_duplex
= DUPLEX_UNKNOWN
;
5810 bool current_link_up
= false;
5811 u32 local_adv
, remote_adv
, sgsr
;
5813 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5814 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5815 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5816 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5821 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5823 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5824 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5826 current_link_up
= true;
5827 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5828 current_speed
= SPEED_1000
;
5829 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5830 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5831 current_speed
= SPEED_100
;
5832 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5834 current_speed
= SPEED_10
;
5835 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5838 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5839 current_duplex
= DUPLEX_FULL
;
5841 current_duplex
= DUPLEX_HALF
;
5844 tw32_f(MAC_MODE
, tp
->mac_mode
);
5847 tg3_clear_mac_status(tp
);
5849 goto fiber_setup_done
;
5852 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5853 tw32_f(MAC_MODE
, tp
->mac_mode
);
5856 tg3_clear_mac_status(tp
);
5861 tp
->link_config
.rmt_adv
= 0;
5863 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5864 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5865 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5866 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5867 bmsr
|= BMSR_LSTATUS
;
5869 bmsr
&= ~BMSR_LSTATUS
;
5872 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5874 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5875 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5876 /* do nothing, just check for link up at the end */
5877 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5880 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5881 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5882 ADVERTISE_1000XPAUSE
|
5883 ADVERTISE_1000XPSE_ASYM
|
5886 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5887 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5889 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5890 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5891 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5892 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5894 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5895 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5896 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5903 bmcr
&= ~BMCR_SPEED1000
;
5904 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5906 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5907 new_bmcr
|= BMCR_FULLDPLX
;
5909 if (new_bmcr
!= bmcr
) {
5910 /* BMCR_SPEED1000 is a reserved bit that needs
5911 * to be set on write.
5913 new_bmcr
|= BMCR_SPEED1000
;
5915 /* Force a linkdown */
5919 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5920 adv
&= ~(ADVERTISE_1000XFULL
|
5921 ADVERTISE_1000XHALF
|
5923 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5924 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5928 tg3_carrier_off(tp
);
5930 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5932 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5933 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5934 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5935 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5936 bmsr
|= BMSR_LSTATUS
;
5938 bmsr
&= ~BMSR_LSTATUS
;
5940 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5944 if (bmsr
& BMSR_LSTATUS
) {
5945 current_speed
= SPEED_1000
;
5946 current_link_up
= true;
5947 if (bmcr
& BMCR_FULLDPLX
)
5948 current_duplex
= DUPLEX_FULL
;
5950 current_duplex
= DUPLEX_HALF
;
5955 if (bmcr
& BMCR_ANENABLE
) {
5958 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5959 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5960 common
= local_adv
& remote_adv
;
5961 if (common
& (ADVERTISE_1000XHALF
|
5962 ADVERTISE_1000XFULL
)) {
5963 if (common
& ADVERTISE_1000XFULL
)
5964 current_duplex
= DUPLEX_FULL
;
5966 current_duplex
= DUPLEX_HALF
;
5968 tp
->link_config
.rmt_adv
=
5969 mii_adv_to_ethtool_adv_x(remote_adv
);
5970 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5971 /* Link is up via parallel detect */
5973 current_link_up
= false;
5979 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5980 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5982 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5983 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5984 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5986 tw32_f(MAC_MODE
, tp
->mac_mode
);
5989 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5991 tp
->link_config
.active_speed
= current_speed
;
5992 tp
->link_config
.active_duplex
= current_duplex
;
5994 tg3_test_and_report_link_chg(tp
, current_link_up
);
5998 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6000 if (tp
->serdes_counter
) {
6001 /* Give autoneg time to complete. */
6002 tp
->serdes_counter
--;
6007 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6010 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6011 if (bmcr
& BMCR_ANENABLE
) {
6014 /* Select shadow register 0x1f */
6015 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6016 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6018 /* Select expansion interrupt status register */
6019 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6020 MII_TG3_DSP_EXP1_INT_STAT
);
6021 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6022 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6024 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6025 /* We have signal detect and not receiving
6026 * config code words, link is up by parallel
6030 bmcr
&= ~BMCR_ANENABLE
;
6031 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6032 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6033 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6036 } else if (tp
->link_up
&&
6037 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6038 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6041 /* Select expansion interrupt status register */
6042 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6043 MII_TG3_DSP_EXP1_INT_STAT
);
6044 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6048 /* Config code words received, turn on autoneg. */
6049 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6050 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6052 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6058 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6063 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6064 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6065 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6066 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6068 err
= tg3_setup_copper_phy(tp
, force_reset
);
6070 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6073 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6074 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6076 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6081 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6082 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6083 tw32(GRC_MISC_CFG
, val
);
6086 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6087 (6 << TX_LENGTHS_IPG_SHIFT
);
6088 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6089 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6090 val
|= tr32(MAC_TX_LENGTHS
) &
6091 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6092 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6094 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6095 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6096 tw32(MAC_TX_LENGTHS
, val
|
6097 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6099 tw32(MAC_TX_LENGTHS
, val
|
6100 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6102 if (!tg3_flag(tp
, 5705_PLUS
)) {
6104 tw32(HOSTCC_STAT_COAL_TICKS
,
6105 tp
->coal
.stats_block_coalesce_usecs
);
6107 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6111 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6112 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6114 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6117 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6118 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6124 /* tp->lock must be held */
6125 static u64
tg3_refclk_read(struct tg3
*tp
)
6127 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6128 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6131 /* tp->lock must be held */
6132 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6134 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6136 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6137 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6138 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6139 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6142 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6143 static inline void tg3_full_unlock(struct tg3
*tp
);
6144 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6146 struct tg3
*tp
= netdev_priv(dev
);
6148 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6149 SOF_TIMESTAMPING_RX_SOFTWARE
|
6150 SOF_TIMESTAMPING_SOFTWARE
;
6152 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6153 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6154 SOF_TIMESTAMPING_RX_HARDWARE
|
6155 SOF_TIMESTAMPING_RAW_HARDWARE
;
6159 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6161 info
->phc_index
= -1;
6163 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6165 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6166 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6167 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6168 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6172 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6174 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6175 bool neg_adj
= false;
6183 /* Frequency adjustment is performed using hardware with a 24 bit
6184 * accumulator and a programmable correction value. On each clk, the
6185 * correction value gets added to the accumulator and when it
6186 * overflows, the time counter is incremented/decremented.
6188 * So conversion from ppb to correction value is
6189 * ppb * (1 << 24) / 1000000000
6191 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6192 TG3_EAV_REF_CLK_CORRECT_MASK
;
6194 tg3_full_lock(tp
, 0);
6197 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6198 TG3_EAV_REF_CLK_CORRECT_EN
|
6199 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6201 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6203 tg3_full_unlock(tp
);
6208 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6210 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6212 tg3_full_lock(tp
, 0);
6213 tp
->ptp_adjust
+= delta
;
6214 tg3_full_unlock(tp
);
6219 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
6222 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6224 tg3_full_lock(tp
, 0);
6225 ns
= tg3_refclk_read(tp
);
6226 ns
+= tp
->ptp_adjust
;
6227 tg3_full_unlock(tp
);
6229 *ts
= ns_to_timespec64(ns
);
6234 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6235 const struct timespec64
*ts
)
6238 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6240 ns
= timespec64_to_ns(ts
);
6242 tg3_full_lock(tp
, 0);
6243 tg3_refclk_write(tp
, ns
);
6245 tg3_full_unlock(tp
);
6250 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6251 struct ptp_clock_request
*rq
, int on
)
6253 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6258 case PTP_CLK_REQ_PEROUT
:
6259 if (rq
->perout
.index
!= 0)
6262 tg3_full_lock(tp
, 0);
6263 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6264 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6269 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6270 rq
->perout
.start
.nsec
;
6272 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6273 netdev_warn(tp
->dev
,
6274 "Device supports only a one-shot timesync output, period must be 0\n");
6279 if (nsec
& (1ULL << 63)) {
6280 netdev_warn(tp
->dev
,
6281 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6286 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6287 tw32(TG3_EAV_WATCHDOG0_MSB
,
6288 TG3_EAV_WATCHDOG0_EN
|
6289 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6291 tw32(TG3_EAV_REF_CLCK_CTL
,
6292 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6294 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6295 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6299 tg3_full_unlock(tp
);
6309 static const struct ptp_clock_info tg3_ptp_caps
= {
6310 .owner
= THIS_MODULE
,
6311 .name
= "tg3 clock",
6312 .max_adj
= 250000000,
6318 .adjfreq
= tg3_ptp_adjfreq
,
6319 .adjtime
= tg3_ptp_adjtime
,
6320 .gettime64
= tg3_ptp_gettime
,
6321 .settime64
= tg3_ptp_settime
,
6322 .enable
= tg3_ptp_enable
,
6325 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6326 struct skb_shared_hwtstamps
*timestamp
)
6328 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6329 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6333 /* tp->lock must be held */
6334 static void tg3_ptp_init(struct tg3
*tp
)
6336 if (!tg3_flag(tp
, PTP_CAPABLE
))
6339 /* Initialize the hardware clock to the system time. */
6340 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6342 tp
->ptp_info
= tg3_ptp_caps
;
6345 /* tp->lock must be held */
6346 static void tg3_ptp_resume(struct tg3
*tp
)
6348 if (!tg3_flag(tp
, PTP_CAPABLE
))
6351 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6355 static void tg3_ptp_fini(struct tg3
*tp
)
6357 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6360 ptp_clock_unregister(tp
->ptp_clock
);
6361 tp
->ptp_clock
= NULL
;
6365 static inline int tg3_irq_sync(struct tg3
*tp
)
6367 return tp
->irq_sync
;
6370 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6374 dst
= (u32
*)((u8
*)dst
+ off
);
6375 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6376 *dst
++ = tr32(off
+ i
);
6379 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6381 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6382 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6383 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6384 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6385 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6386 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6387 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6388 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6389 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6390 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6391 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6392 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6393 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6394 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6395 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6396 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6397 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6398 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6399 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6401 if (tg3_flag(tp
, SUPPORT_MSIX
))
6402 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6404 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6405 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6406 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6407 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6408 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6409 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6410 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6411 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6413 if (!tg3_flag(tp
, 5705_PLUS
)) {
6414 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6415 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6416 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6419 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6420 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6421 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6422 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6423 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6425 if (tg3_flag(tp
, NVRAM
))
6426 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6429 static void tg3_dump_state(struct tg3
*tp
)
6434 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6438 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6439 /* Read up to but not including private PCI registers */
6440 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6441 regs
[i
/ sizeof(u32
)] = tr32(i
);
6443 tg3_dump_legacy_regs(tp
, regs
);
6445 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6446 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6447 !regs
[i
+ 2] && !regs
[i
+ 3])
6450 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6452 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6457 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6458 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6460 /* SW status block */
6462 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6464 tnapi
->hw_status
->status
,
6465 tnapi
->hw_status
->status_tag
,
6466 tnapi
->hw_status
->rx_jumbo_consumer
,
6467 tnapi
->hw_status
->rx_consumer
,
6468 tnapi
->hw_status
->rx_mini_consumer
,
6469 tnapi
->hw_status
->idx
[0].rx_producer
,
6470 tnapi
->hw_status
->idx
[0].tx_consumer
);
6473 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6475 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6476 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6478 tnapi
->prodring
.rx_std_prod_idx
,
6479 tnapi
->prodring
.rx_std_cons_idx
,
6480 tnapi
->prodring
.rx_jmb_prod_idx
,
6481 tnapi
->prodring
.rx_jmb_cons_idx
);
6485 /* This is called whenever we suspect that the system chipset is re-
6486 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6487 * is bogus tx completions. We try to recover by setting the
6488 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6491 static void tg3_tx_recover(struct tg3
*tp
)
6493 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6494 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6496 netdev_warn(tp
->dev
,
6497 "The system may be re-ordering memory-mapped I/O "
6498 "cycles to the network device, attempting to recover. "
6499 "Please report the problem to the driver maintainer "
6500 "and include system chipset information.\n");
6502 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6505 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6507 /* Tell compiler to fetch tx indices from memory. */
6509 return tnapi
->tx_pending
-
6510 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6513 /* Tigon3 never reports partial packet sends. So we do not
6514 * need special logic to handle SKBs that have not had all
6515 * of their frags sent yet, like SunGEM does.
6517 static void tg3_tx(struct tg3_napi
*tnapi
)
6519 struct tg3
*tp
= tnapi
->tp
;
6520 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6521 u32 sw_idx
= tnapi
->tx_cons
;
6522 struct netdev_queue
*txq
;
6523 int index
= tnapi
- tp
->napi
;
6524 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6526 if (tg3_flag(tp
, ENABLE_TSS
))
6529 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6531 while (sw_idx
!= hw_idx
) {
6532 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6533 struct sk_buff
*skb
= ri
->skb
;
6536 if (unlikely(skb
== NULL
)) {
6541 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6542 struct skb_shared_hwtstamps timestamp
;
6543 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6544 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6546 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6548 skb_tstamp_tx(skb
, ×tamp
);
6551 pci_unmap_single(tp
->pdev
,
6552 dma_unmap_addr(ri
, mapping
),
6558 while (ri
->fragmented
) {
6559 ri
->fragmented
= false;
6560 sw_idx
= NEXT_TX(sw_idx
);
6561 ri
= &tnapi
->tx_buffers
[sw_idx
];
6564 sw_idx
= NEXT_TX(sw_idx
);
6566 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6567 ri
= &tnapi
->tx_buffers
[sw_idx
];
6568 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6571 pci_unmap_page(tp
->pdev
,
6572 dma_unmap_addr(ri
, mapping
),
6573 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6576 while (ri
->fragmented
) {
6577 ri
->fragmented
= false;
6578 sw_idx
= NEXT_TX(sw_idx
);
6579 ri
= &tnapi
->tx_buffers
[sw_idx
];
6582 sw_idx
= NEXT_TX(sw_idx
);
6586 bytes_compl
+= skb
->len
;
6588 dev_kfree_skb_any(skb
);
6590 if (unlikely(tx_bug
)) {
6596 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6598 tnapi
->tx_cons
= sw_idx
;
6600 /* Need to make the tx_cons update visible to tg3_start_xmit()
6601 * before checking for netif_queue_stopped(). Without the
6602 * memory barrier, there is a small possibility that tg3_start_xmit()
6603 * will miss it and cause the queue to be stopped forever.
6607 if (unlikely(netif_tx_queue_stopped(txq
) &&
6608 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6609 __netif_tx_lock(txq
, smp_processor_id());
6610 if (netif_tx_queue_stopped(txq
) &&
6611 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6612 netif_tx_wake_queue(txq
);
6613 __netif_tx_unlock(txq
);
6617 static void tg3_frag_free(bool is_frag
, void *data
)
6620 skb_free_frag(data
);
6625 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6627 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6628 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6633 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6634 map_sz
, PCI_DMA_FROMDEVICE
);
6635 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6640 /* Returns size of skb allocated or < 0 on error.
6642 * We only need to fill in the address because the other members
6643 * of the RX descriptor are invariant, see tg3_init_rings.
6645 * Note the purposeful assymetry of cpu vs. chip accesses. For
6646 * posting buffers we only dirty the first cache line of the RX
6647 * descriptor (containing the address). Whereas for the RX status
6648 * buffers the cpu only reads the last cacheline of the RX descriptor
6649 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6651 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6652 u32 opaque_key
, u32 dest_idx_unmasked
,
6653 unsigned int *frag_size
)
6655 struct tg3_rx_buffer_desc
*desc
;
6656 struct ring_info
*map
;
6659 int skb_size
, data_size
, dest_idx
;
6661 switch (opaque_key
) {
6662 case RXD_OPAQUE_RING_STD
:
6663 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6664 desc
= &tpr
->rx_std
[dest_idx
];
6665 map
= &tpr
->rx_std_buffers
[dest_idx
];
6666 data_size
= tp
->rx_pkt_map_sz
;
6669 case RXD_OPAQUE_RING_JUMBO
:
6670 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6671 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6672 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6673 data_size
= TG3_RX_JMB_MAP_SZ
;
6680 /* Do not overwrite any of the map or rp information
6681 * until we are sure we can commit to a new buffer.
6683 * Callers depend upon this behavior and assume that
6684 * we leave everything unchanged if we fail.
6686 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6687 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6688 if (skb_size
<= PAGE_SIZE
) {
6689 data
= netdev_alloc_frag(skb_size
);
6690 *frag_size
= skb_size
;
6692 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6698 mapping
= pci_map_single(tp
->pdev
,
6699 data
+ TG3_RX_OFFSET(tp
),
6701 PCI_DMA_FROMDEVICE
);
6702 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6703 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6708 dma_unmap_addr_set(map
, mapping
, mapping
);
6710 desc
->addr_hi
= ((u64
)mapping
>> 32);
6711 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6716 /* We only need to move over in the address because the other
6717 * members of the RX descriptor are invariant. See notes above
6718 * tg3_alloc_rx_data for full details.
6720 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6721 struct tg3_rx_prodring_set
*dpr
,
6722 u32 opaque_key
, int src_idx
,
6723 u32 dest_idx_unmasked
)
6725 struct tg3
*tp
= tnapi
->tp
;
6726 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6727 struct ring_info
*src_map
, *dest_map
;
6728 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6731 switch (opaque_key
) {
6732 case RXD_OPAQUE_RING_STD
:
6733 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6734 dest_desc
= &dpr
->rx_std
[dest_idx
];
6735 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6736 src_desc
= &spr
->rx_std
[src_idx
];
6737 src_map
= &spr
->rx_std_buffers
[src_idx
];
6740 case RXD_OPAQUE_RING_JUMBO
:
6741 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6742 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6743 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6744 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6745 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6752 dest_map
->data
= src_map
->data
;
6753 dma_unmap_addr_set(dest_map
, mapping
,
6754 dma_unmap_addr(src_map
, mapping
));
6755 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6756 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6758 /* Ensure that the update to the skb happens after the physical
6759 * addresses have been transferred to the new BD location.
6763 src_map
->data
= NULL
;
6766 /* The RX ring scheme is composed of multiple rings which post fresh
6767 * buffers to the chip, and one special ring the chip uses to report
6768 * status back to the host.
6770 * The special ring reports the status of received packets to the
6771 * host. The chip does not write into the original descriptor the
6772 * RX buffer was obtained from. The chip simply takes the original
6773 * descriptor as provided by the host, updates the status and length
6774 * field, then writes this into the next status ring entry.
6776 * Each ring the host uses to post buffers to the chip is described
6777 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6778 * it is first placed into the on-chip ram. When the packet's length
6779 * is known, it walks down the TG3_BDINFO entries to select the ring.
6780 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6781 * which is within the range of the new packet's length is chosen.
6783 * The "separate ring for rx status" scheme may sound queer, but it makes
6784 * sense from a cache coherency perspective. If only the host writes
6785 * to the buffer post rings, and only the chip writes to the rx status
6786 * rings, then cache lines never move beyond shared-modified state.
6787 * If both the host and chip were to write into the same ring, cache line
6788 * eviction could occur since both entities want it in an exclusive state.
6790 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6792 struct tg3
*tp
= tnapi
->tp
;
6793 u32 work_mask
, rx_std_posted
= 0;
6794 u32 std_prod_idx
, jmb_prod_idx
;
6795 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6798 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6800 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6802 * We need to order the read of hw_idx and the read of
6803 * the opaque cookie.
6808 std_prod_idx
= tpr
->rx_std_prod_idx
;
6809 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6810 while (sw_idx
!= hw_idx
&& budget
> 0) {
6811 struct ring_info
*ri
;
6812 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6814 struct sk_buff
*skb
;
6815 dma_addr_t dma_addr
;
6816 u32 opaque_key
, desc_idx
, *post_ptr
;
6820 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6821 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6822 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6823 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6824 dma_addr
= dma_unmap_addr(ri
, mapping
);
6826 post_ptr
= &std_prod_idx
;
6828 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6829 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6830 dma_addr
= dma_unmap_addr(ri
, mapping
);
6832 post_ptr
= &jmb_prod_idx
;
6834 goto next_pkt_nopost
;
6836 work_mask
|= opaque_key
;
6838 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6840 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6841 desc_idx
, *post_ptr
);
6843 /* Other statistics kept track of by card. */
6848 prefetch(data
+ TG3_RX_OFFSET(tp
));
6849 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6852 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6853 RXD_FLAG_PTPSTAT_PTPV1
||
6854 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6855 RXD_FLAG_PTPSTAT_PTPV2
) {
6856 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6857 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6860 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6862 unsigned int frag_size
;
6864 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6865 *post_ptr
, &frag_size
);
6869 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6870 PCI_DMA_FROMDEVICE
);
6872 /* Ensure that the update to the data happens
6873 * after the usage of the old DMA mapping.
6879 skb
= build_skb(data
, frag_size
);
6881 tg3_frag_free(frag_size
!= 0, data
);
6882 goto drop_it_no_recycle
;
6884 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6886 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6887 desc_idx
, *post_ptr
);
6889 skb
= netdev_alloc_skb(tp
->dev
,
6890 len
+ TG3_RAW_IP_ALIGN
);
6892 goto drop_it_no_recycle
;
6894 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6895 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6897 data
+ TG3_RX_OFFSET(tp
),
6899 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6904 tg3_hwclock_to_timestamp(tp
, tstamp
,
6905 skb_hwtstamps(skb
));
6907 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6908 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6909 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6910 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6911 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6913 skb_checksum_none_assert(skb
);
6915 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6917 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6918 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6919 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6920 dev_kfree_skb_any(skb
);
6921 goto drop_it_no_recycle
;
6924 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6925 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6926 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6927 desc
->err_vlan
& RXD_VLAN_MASK
);
6929 napi_gro_receive(&tnapi
->napi
, skb
);
6937 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6938 tpr
->rx_std_prod_idx
= std_prod_idx
&
6939 tp
->rx_std_ring_mask
;
6940 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6941 tpr
->rx_std_prod_idx
);
6942 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6947 sw_idx
&= tp
->rx_ret_ring_mask
;
6949 /* Refresh hw_idx to see if there is new work */
6950 if (sw_idx
== hw_idx
) {
6951 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6956 /* ACK the status ring. */
6957 tnapi
->rx_rcb_ptr
= sw_idx
;
6958 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6960 /* Refill RX ring(s). */
6961 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6962 /* Sync BD data before updating mailbox */
6965 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6966 tpr
->rx_std_prod_idx
= std_prod_idx
&
6967 tp
->rx_std_ring_mask
;
6968 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6969 tpr
->rx_std_prod_idx
);
6971 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6972 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6973 tp
->rx_jmb_ring_mask
;
6974 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6975 tpr
->rx_jmb_prod_idx
);
6978 } else if (work_mask
) {
6979 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6980 * updated before the producer indices can be updated.
6984 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6985 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6987 if (tnapi
!= &tp
->napi
[1]) {
6988 tp
->rx_refill
= true;
6989 napi_schedule(&tp
->napi
[1].napi
);
6996 static void tg3_poll_link(struct tg3
*tp
)
6998 /* handle link change and other phy events */
6999 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7000 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7002 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7003 sblk
->status
= SD_STATUS_UPDATED
|
7004 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7005 spin_lock(&tp
->lock
);
7006 if (tg3_flag(tp
, USE_PHYLIB
)) {
7008 (MAC_STATUS_SYNC_CHANGED
|
7009 MAC_STATUS_CFG_CHANGED
|
7010 MAC_STATUS_MI_COMPLETION
|
7011 MAC_STATUS_LNKSTATE_CHANGED
));
7014 tg3_setup_phy(tp
, false);
7015 spin_unlock(&tp
->lock
);
7020 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7021 struct tg3_rx_prodring_set
*dpr
,
7022 struct tg3_rx_prodring_set
*spr
)
7024 u32 si
, di
, cpycnt
, src_prod_idx
;
7028 src_prod_idx
= spr
->rx_std_prod_idx
;
7030 /* Make sure updates to the rx_std_buffers[] entries and the
7031 * standard producer index are seen in the correct order.
7035 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7038 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7039 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7041 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7042 spr
->rx_std_cons_idx
;
7044 cpycnt
= min(cpycnt
,
7045 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7047 si
= spr
->rx_std_cons_idx
;
7048 di
= dpr
->rx_std_prod_idx
;
7050 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7051 if (dpr
->rx_std_buffers
[i
].data
) {
7061 /* Ensure that updates to the rx_std_buffers ring and the
7062 * shadowed hardware producer ring from tg3_recycle_skb() are
7063 * ordered correctly WRT the skb check above.
7067 memcpy(&dpr
->rx_std_buffers
[di
],
7068 &spr
->rx_std_buffers
[si
],
7069 cpycnt
* sizeof(struct ring_info
));
7071 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7072 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7073 sbd
= &spr
->rx_std
[si
];
7074 dbd
= &dpr
->rx_std
[di
];
7075 dbd
->addr_hi
= sbd
->addr_hi
;
7076 dbd
->addr_lo
= sbd
->addr_lo
;
7079 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7080 tp
->rx_std_ring_mask
;
7081 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7082 tp
->rx_std_ring_mask
;
7086 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7088 /* Make sure updates to the rx_jmb_buffers[] entries and
7089 * the jumbo producer index are seen in the correct order.
7093 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7096 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7097 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7099 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7100 spr
->rx_jmb_cons_idx
;
7102 cpycnt
= min(cpycnt
,
7103 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7105 si
= spr
->rx_jmb_cons_idx
;
7106 di
= dpr
->rx_jmb_prod_idx
;
7108 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7109 if (dpr
->rx_jmb_buffers
[i
].data
) {
7119 /* Ensure that updates to the rx_jmb_buffers ring and the
7120 * shadowed hardware producer ring from tg3_recycle_skb() are
7121 * ordered correctly WRT the skb check above.
7125 memcpy(&dpr
->rx_jmb_buffers
[di
],
7126 &spr
->rx_jmb_buffers
[si
],
7127 cpycnt
* sizeof(struct ring_info
));
7129 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7130 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7131 sbd
= &spr
->rx_jmb
[si
].std
;
7132 dbd
= &dpr
->rx_jmb
[di
].std
;
7133 dbd
->addr_hi
= sbd
->addr_hi
;
7134 dbd
->addr_lo
= sbd
->addr_lo
;
7137 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7138 tp
->rx_jmb_ring_mask
;
7139 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7140 tp
->rx_jmb_ring_mask
;
7146 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7148 struct tg3
*tp
= tnapi
->tp
;
7150 /* run TX completion thread */
7151 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7153 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7157 if (!tnapi
->rx_rcb_prod_idx
)
7160 /* run RX thread, within the bounds set by NAPI.
7161 * All RX "locking" is done by ensuring outside
7162 * code synchronizes with tg3->napi.poll()
7164 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7165 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7167 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7168 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7170 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7171 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7173 tp
->rx_refill
= false;
7174 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7175 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7176 &tp
->napi
[i
].prodring
);
7180 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7181 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7182 dpr
->rx_std_prod_idx
);
7184 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7185 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7186 dpr
->rx_jmb_prod_idx
);
7191 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7197 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7199 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7200 schedule_work(&tp
->reset_task
);
7203 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7205 cancel_work_sync(&tp
->reset_task
);
7206 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7207 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7210 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7212 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7213 struct tg3
*tp
= tnapi
->tp
;
7215 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7218 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7220 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7223 if (unlikely(work_done
>= budget
))
7226 /* tp->last_tag is used in tg3_int_reenable() below
7227 * to tell the hw how much work has been processed,
7228 * so we must read it before checking for more work.
7230 tnapi
->last_tag
= sblk
->status_tag
;
7231 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7234 /* check for RX/TX work to do */
7235 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7236 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7238 /* This test here is not race free, but will reduce
7239 * the number of interrupts by looping again.
7241 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7244 napi_complete_done(napi
, work_done
);
7245 /* Reenable interrupts. */
7246 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7248 /* This test here is synchronized by napi_schedule()
7249 * and napi_complete() to close the race condition.
7251 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7252 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7253 HOSTCC_MODE_ENABLE
|
7264 /* work_done is guaranteed to be less than budget. */
7265 napi_complete(napi
);
7266 tg3_reset_task_schedule(tp
);
7270 static void tg3_process_error(struct tg3
*tp
)
7273 bool real_error
= false;
7275 if (tg3_flag(tp
, ERROR_PROCESSED
))
7278 /* Check Flow Attention register */
7279 val
= tr32(HOSTCC_FLOW_ATTN
);
7280 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7281 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7285 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7286 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7290 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7291 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7300 tg3_flag_set(tp
, ERROR_PROCESSED
);
7301 tg3_reset_task_schedule(tp
);
7304 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7306 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7307 struct tg3
*tp
= tnapi
->tp
;
7309 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7312 if (sblk
->status
& SD_STATUS_ERROR
)
7313 tg3_process_error(tp
);
7317 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7319 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7322 if (unlikely(work_done
>= budget
))
7325 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7326 /* tp->last_tag is used in tg3_int_reenable() below
7327 * to tell the hw how much work has been processed,
7328 * so we must read it before checking for more work.
7330 tnapi
->last_tag
= sblk
->status_tag
;
7331 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7334 sblk
->status
&= ~SD_STATUS_UPDATED
;
7336 if (likely(!tg3_has_work(tnapi
))) {
7337 napi_complete_done(napi
, work_done
);
7338 tg3_int_reenable(tnapi
);
7346 /* work_done is guaranteed to be less than budget. */
7347 napi_complete(napi
);
7348 tg3_reset_task_schedule(tp
);
7352 static void tg3_napi_disable(struct tg3
*tp
)
7356 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7357 napi_disable(&tp
->napi
[i
].napi
);
7360 static void tg3_napi_enable(struct tg3
*tp
)
7364 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7365 napi_enable(&tp
->napi
[i
].napi
);
7368 static void tg3_napi_init(struct tg3
*tp
)
7372 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7373 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7374 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7377 static void tg3_napi_fini(struct tg3
*tp
)
7381 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7382 netif_napi_del(&tp
->napi
[i
].napi
);
7385 static inline void tg3_netif_stop(struct tg3
*tp
)
7387 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7388 tg3_napi_disable(tp
);
7389 netif_carrier_off(tp
->dev
);
7390 netif_tx_disable(tp
->dev
);
7393 /* tp->lock must be held */
7394 static inline void tg3_netif_start(struct tg3
*tp
)
7398 /* NOTE: unconditional netif_tx_wake_all_queues is only
7399 * appropriate so long as all callers are assured to
7400 * have free tx slots (such as after tg3_init_hw)
7402 netif_tx_wake_all_queues(tp
->dev
);
7405 netif_carrier_on(tp
->dev
);
7407 tg3_napi_enable(tp
);
7408 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7409 tg3_enable_ints(tp
);
7412 static void tg3_irq_quiesce(struct tg3
*tp
)
7413 __releases(tp
->lock
)
7414 __acquires(tp
->lock
)
7418 BUG_ON(tp
->irq_sync
);
7423 spin_unlock_bh(&tp
->lock
);
7425 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7426 synchronize_irq(tp
->napi
[i
].irq_vec
);
7428 spin_lock_bh(&tp
->lock
);
7431 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7432 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7433 * with as well. Most of the time, this is not necessary except when
7434 * shutting down the device.
7436 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7438 spin_lock_bh(&tp
->lock
);
7440 tg3_irq_quiesce(tp
);
7443 static inline void tg3_full_unlock(struct tg3
*tp
)
7445 spin_unlock_bh(&tp
->lock
);
7448 /* One-shot MSI handler - Chip automatically disables interrupt
7449 * after sending MSI so driver doesn't have to do it.
7451 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7453 struct tg3_napi
*tnapi
= dev_id
;
7454 struct tg3
*tp
= tnapi
->tp
;
7456 prefetch(tnapi
->hw_status
);
7458 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7460 if (likely(!tg3_irq_sync(tp
)))
7461 napi_schedule(&tnapi
->napi
);
7466 /* MSI ISR - No need to check for interrupt sharing and no need to
7467 * flush status block and interrupt mailbox. PCI ordering rules
7468 * guarantee that MSI will arrive after the status block.
7470 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7472 struct tg3_napi
*tnapi
= dev_id
;
7473 struct tg3
*tp
= tnapi
->tp
;
7475 prefetch(tnapi
->hw_status
);
7477 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7479 * Writing any value to intr-mbox-0 clears PCI INTA# and
7480 * chip-internal interrupt pending events.
7481 * Writing non-zero to intr-mbox-0 additional tells the
7482 * NIC to stop sending us irqs, engaging "in-intr-handler"
7485 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7486 if (likely(!tg3_irq_sync(tp
)))
7487 napi_schedule(&tnapi
->napi
);
7489 return IRQ_RETVAL(1);
7492 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7494 struct tg3_napi
*tnapi
= dev_id
;
7495 struct tg3
*tp
= tnapi
->tp
;
7496 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7497 unsigned int handled
= 1;
7499 /* In INTx mode, it is possible for the interrupt to arrive at
7500 * the CPU before the status block posted prior to the interrupt.
7501 * Reading the PCI State register will confirm whether the
7502 * interrupt is ours and will flush the status block.
7504 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7505 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7506 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7513 * Writing any value to intr-mbox-0 clears PCI INTA# and
7514 * chip-internal interrupt pending events.
7515 * Writing non-zero to intr-mbox-0 additional tells the
7516 * NIC to stop sending us irqs, engaging "in-intr-handler"
7519 * Flush the mailbox to de-assert the IRQ immediately to prevent
7520 * spurious interrupts. The flush impacts performance but
7521 * excessive spurious interrupts can be worse in some cases.
7523 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7524 if (tg3_irq_sync(tp
))
7526 sblk
->status
&= ~SD_STATUS_UPDATED
;
7527 if (likely(tg3_has_work(tnapi
))) {
7528 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7529 napi_schedule(&tnapi
->napi
);
7531 /* No work, shared interrupt perhaps? re-enable
7532 * interrupts, and flush that PCI write
7534 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7538 return IRQ_RETVAL(handled
);
7541 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7543 struct tg3_napi
*tnapi
= dev_id
;
7544 struct tg3
*tp
= tnapi
->tp
;
7545 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7546 unsigned int handled
= 1;
7548 /* In INTx mode, it is possible for the interrupt to arrive at
7549 * the CPU before the status block posted prior to the interrupt.
7550 * Reading the PCI State register will confirm whether the
7551 * interrupt is ours and will flush the status block.
7553 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7554 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7555 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7562 * writing any value to intr-mbox-0 clears PCI INTA# and
7563 * chip-internal interrupt pending events.
7564 * writing non-zero to intr-mbox-0 additional tells the
7565 * NIC to stop sending us irqs, engaging "in-intr-handler"
7568 * Flush the mailbox to de-assert the IRQ immediately to prevent
7569 * spurious interrupts. The flush impacts performance but
7570 * excessive spurious interrupts can be worse in some cases.
7572 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7575 * In a shared interrupt configuration, sometimes other devices'
7576 * interrupts will scream. We record the current status tag here
7577 * so that the above check can report that the screaming interrupts
7578 * are unhandled. Eventually they will be silenced.
7580 tnapi
->last_irq_tag
= sblk
->status_tag
;
7582 if (tg3_irq_sync(tp
))
7585 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7587 napi_schedule(&tnapi
->napi
);
7590 return IRQ_RETVAL(handled
);
7593 /* ISR for interrupt test */
7594 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7596 struct tg3_napi
*tnapi
= dev_id
;
7597 struct tg3
*tp
= tnapi
->tp
;
7598 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7600 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7601 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7602 tg3_disable_ints(tp
);
7603 return IRQ_RETVAL(1);
7605 return IRQ_RETVAL(0);
7608 #ifdef CONFIG_NET_POLL_CONTROLLER
7609 static void tg3_poll_controller(struct net_device
*dev
)
7612 struct tg3
*tp
= netdev_priv(dev
);
7614 if (tg3_irq_sync(tp
))
7617 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7618 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7622 static void tg3_tx_timeout(struct net_device
*dev
)
7624 struct tg3
*tp
= netdev_priv(dev
);
7626 if (netif_msg_tx_err(tp
)) {
7627 netdev_err(dev
, "transmit timed out, resetting\n");
7631 tg3_reset_task_schedule(tp
);
7634 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7635 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7637 u32 base
= (u32
) mapping
& 0xffffffff;
7639 return base
+ len
+ 8 < base
;
7642 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7643 * of any 4GB boundaries: 4G, 8G, etc
7645 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7648 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7649 u32 base
= (u32
) mapping
& 0xffffffff;
7651 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7656 /* Test for DMA addresses > 40-bit */
7657 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7660 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7661 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7662 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7669 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7670 dma_addr_t mapping
, u32 len
, u32 flags
,
7673 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7674 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7675 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7676 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7679 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7680 dma_addr_t map
, u32 len
, u32 flags
,
7683 struct tg3
*tp
= tnapi
->tp
;
7686 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7689 if (tg3_4g_overflow_test(map
, len
))
7692 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7695 if (tg3_40bit_overflow_test(tp
, map
, len
))
7698 if (tp
->dma_limit
) {
7699 u32 prvidx
= *entry
;
7700 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7701 while (len
> tp
->dma_limit
&& *budget
) {
7702 u32 frag_len
= tp
->dma_limit
;
7703 len
-= tp
->dma_limit
;
7705 /* Avoid the 8byte DMA problem */
7707 len
+= tp
->dma_limit
/ 2;
7708 frag_len
= tp
->dma_limit
/ 2;
7711 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7713 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7714 frag_len
, tmp_flag
, mss
, vlan
);
7717 *entry
= NEXT_TX(*entry
);
7724 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7725 len
, flags
, mss
, vlan
);
7727 *entry
= NEXT_TX(*entry
);
7730 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7734 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7735 len
, flags
, mss
, vlan
);
7736 *entry
= NEXT_TX(*entry
);
7742 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7745 struct sk_buff
*skb
;
7746 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7751 pci_unmap_single(tnapi
->tp
->pdev
,
7752 dma_unmap_addr(txb
, mapping
),
7756 while (txb
->fragmented
) {
7757 txb
->fragmented
= false;
7758 entry
= NEXT_TX(entry
);
7759 txb
= &tnapi
->tx_buffers
[entry
];
7762 for (i
= 0; i
<= last
; i
++) {
7763 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7765 entry
= NEXT_TX(entry
);
7766 txb
= &tnapi
->tx_buffers
[entry
];
7768 pci_unmap_page(tnapi
->tp
->pdev
,
7769 dma_unmap_addr(txb
, mapping
),
7770 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7772 while (txb
->fragmented
) {
7773 txb
->fragmented
= false;
7774 entry
= NEXT_TX(entry
);
7775 txb
= &tnapi
->tx_buffers
[entry
];
7780 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7781 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7782 struct sk_buff
**pskb
,
7783 u32
*entry
, u32
*budget
,
7784 u32 base_flags
, u32 mss
, u32 vlan
)
7786 struct tg3
*tp
= tnapi
->tp
;
7787 struct sk_buff
*new_skb
, *skb
= *pskb
;
7788 dma_addr_t new_addr
= 0;
7791 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7792 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7794 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7796 new_skb
= skb_copy_expand(skb
,
7797 skb_headroom(skb
) + more_headroom
,
7798 skb_tailroom(skb
), GFP_ATOMIC
);
7804 /* New SKB is guaranteed to be linear. */
7805 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7807 /* Make sure the mapping succeeded */
7808 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7809 dev_kfree_skb_any(new_skb
);
7812 u32 save_entry
= *entry
;
7814 base_flags
|= TXD_FLAG_END
;
7816 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7817 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7820 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7821 new_skb
->len
, base_flags
,
7823 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7824 dev_kfree_skb_any(new_skb
);
7830 dev_kfree_skb_any(skb
);
7835 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7837 /* Check if we will never have enough descriptors,
7838 * as gso_segs can be more than current ring size
7840 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7843 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7845 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7846 * indicated in tg3_tx_frag_set()
7848 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7849 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7851 struct sk_buff
*segs
, *nskb
;
7852 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7854 /* Estimate the number of fragments in the worst case */
7855 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7856 netif_tx_stop_queue(txq
);
7858 /* netif_tx_stop_queue() must be done before checking
7859 * checking tx index in tg3_tx_avail() below, because in
7860 * tg3_tx(), we update tx index before checking for
7861 * netif_tx_queue_stopped().
7864 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7865 return NETDEV_TX_BUSY
;
7867 netif_tx_wake_queue(txq
);
7870 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7871 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7872 if (IS_ERR(segs
) || !segs
)
7873 goto tg3_tso_bug_end
;
7879 tg3_start_xmit(nskb
, tp
->dev
);
7883 dev_kfree_skb_any(skb
);
7885 return NETDEV_TX_OK
;
7888 /* hard_start_xmit for all devices */
7889 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7891 struct tg3
*tp
= netdev_priv(dev
);
7892 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7894 int i
= -1, would_hit_hwbug
;
7896 struct tg3_napi
*tnapi
;
7897 struct netdev_queue
*txq
;
7899 struct iphdr
*iph
= NULL
;
7900 struct tcphdr
*tcph
= NULL
;
7901 __sum16 tcp_csum
= 0, ip_csum
= 0;
7902 __be16 ip_tot_len
= 0;
7904 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7905 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7906 if (tg3_flag(tp
, ENABLE_TSS
))
7909 budget
= tg3_tx_avail(tnapi
);
7911 /* We are running in BH disabled context with netif_tx_lock
7912 * and TX reclaim runs via tp->napi.poll inside of a software
7913 * interrupt. Furthermore, IRQ processing runs lockless so we have
7914 * no IRQ context deadlocks to worry about either. Rejoice!
7916 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7917 if (!netif_tx_queue_stopped(txq
)) {
7918 netif_tx_stop_queue(txq
);
7920 /* This is a hard error, log it. */
7922 "BUG! Tx Ring full when queue awake!\n");
7924 return NETDEV_TX_BUSY
;
7927 entry
= tnapi
->tx_prod
;
7930 mss
= skb_shinfo(skb
)->gso_size
;
7932 u32 tcp_opt_len
, hdr_len
;
7934 if (skb_cow_head(skb
, 0))
7938 tcp_opt_len
= tcp_optlen(skb
);
7940 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7942 /* HW/FW can not correctly segment packets that have been
7943 * vlan encapsulated.
7945 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7946 skb
->protocol
== htons(ETH_P_8021AD
)) {
7947 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7948 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7952 if (!skb_is_gso_v6(skb
)) {
7953 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7954 tg3_flag(tp
, TSO_BUG
)) {
7955 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7956 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7959 ip_csum
= iph
->check
;
7960 ip_tot_len
= iph
->tot_len
;
7962 iph
->tot_len
= htons(mss
+ hdr_len
);
7965 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7966 TXD_FLAG_CPU_POST_DMA
);
7968 tcph
= tcp_hdr(skb
);
7969 tcp_csum
= tcph
->check
;
7971 if (tg3_flag(tp
, HW_TSO_1
) ||
7972 tg3_flag(tp
, HW_TSO_2
) ||
7973 tg3_flag(tp
, HW_TSO_3
)) {
7975 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7977 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
7981 if (tg3_flag(tp
, HW_TSO_3
)) {
7982 mss
|= (hdr_len
& 0xc) << 12;
7984 base_flags
|= 0x00000010;
7985 base_flags
|= (hdr_len
& 0x3e0) << 5;
7986 } else if (tg3_flag(tp
, HW_TSO_2
))
7987 mss
|= hdr_len
<< 9;
7988 else if (tg3_flag(tp
, HW_TSO_1
) ||
7989 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7990 if (tcp_opt_len
|| iph
->ihl
> 5) {
7993 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7994 mss
|= (tsflags
<< 11);
7997 if (tcp_opt_len
|| iph
->ihl
> 5) {
8000 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8001 base_flags
|= tsflags
<< 12;
8004 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8005 /* HW/FW can not correctly checksum packets that have been
8006 * vlan encapsulated.
8008 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8009 skb
->protocol
== htons(ETH_P_8021AD
)) {
8010 if (skb_checksum_help(skb
))
8013 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8017 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8018 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8019 base_flags
|= TXD_FLAG_JMB_PKT
;
8021 if (skb_vlan_tag_present(skb
)) {
8022 base_flags
|= TXD_FLAG_VLAN
;
8023 vlan
= skb_vlan_tag_get(skb
);
8026 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8027 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8028 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8029 base_flags
|= TXD_FLAG_HWTSTAMP
;
8032 len
= skb_headlen(skb
);
8034 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8035 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8039 tnapi
->tx_buffers
[entry
].skb
= skb
;
8040 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8042 would_hit_hwbug
= 0;
8044 if (tg3_flag(tp
, 5701_DMA_BUG
))
8045 would_hit_hwbug
= 1;
8047 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8048 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8050 would_hit_hwbug
= 1;
8051 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8054 if (!tg3_flag(tp
, HW_TSO_1
) &&
8055 !tg3_flag(tp
, HW_TSO_2
) &&
8056 !tg3_flag(tp
, HW_TSO_3
))
8059 /* Now loop through additional data
8060 * fragments, and queue them.
8062 last
= skb_shinfo(skb
)->nr_frags
- 1;
8063 for (i
= 0; i
<= last
; i
++) {
8064 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8066 len
= skb_frag_size(frag
);
8067 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8068 len
, DMA_TO_DEVICE
);
8070 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8071 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8073 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8077 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8079 ((i
== last
) ? TXD_FLAG_END
: 0),
8081 would_hit_hwbug
= 1;
8087 if (would_hit_hwbug
) {
8088 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8090 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8091 /* If it's a TSO packet, do GSO instead of
8092 * allocating and copying to a large linear SKB
8095 iph
->check
= ip_csum
;
8096 iph
->tot_len
= ip_tot_len
;
8098 tcph
->check
= tcp_csum
;
8099 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8102 /* If the workaround fails due to memory/mapping
8103 * failure, silently drop this packet.
8105 entry
= tnapi
->tx_prod
;
8106 budget
= tg3_tx_avail(tnapi
);
8107 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8108 base_flags
, mss
, vlan
))
8112 skb_tx_timestamp(skb
);
8113 netdev_tx_sent_queue(txq
, skb
->len
);
8115 /* Sync BD data before updating mailbox */
8118 tnapi
->tx_prod
= entry
;
8119 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8120 netif_tx_stop_queue(txq
);
8122 /* netif_tx_stop_queue() must be done before checking
8123 * checking tx index in tg3_tx_avail() below, because in
8124 * tg3_tx(), we update tx index before checking for
8125 * netif_tx_queue_stopped().
8128 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8129 netif_tx_wake_queue(txq
);
8132 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
)) {
8133 /* Packets are ready, update Tx producer idx on card. */
8134 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8138 return NETDEV_TX_OK
;
8141 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8142 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8144 dev_kfree_skb_any(skb
);
8147 return NETDEV_TX_OK
;
8150 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8153 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8154 MAC_MODE_PORT_MODE_MASK
);
8156 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8158 if (!tg3_flag(tp
, 5705_PLUS
))
8159 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8161 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8162 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8164 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8166 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8168 if (tg3_flag(tp
, 5705_PLUS
) ||
8169 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8170 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8171 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8174 tw32(MAC_MODE
, tp
->mac_mode
);
8178 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8180 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8182 tg3_phy_toggle_apd(tp
, false);
8183 tg3_phy_toggle_automdix(tp
, false);
8185 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8188 bmcr
= BMCR_FULLDPLX
;
8193 bmcr
|= BMCR_SPEED100
;
8197 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8199 bmcr
|= BMCR_SPEED100
;
8202 bmcr
|= BMCR_SPEED1000
;
8207 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8208 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8209 val
|= CTL1000_AS_MASTER
|
8210 CTL1000_ENABLE_MASTER
;
8211 tg3_writephy(tp
, MII_CTRL1000
, val
);
8213 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8214 MII_TG3_FET_PTEST_TRIM_2
;
8215 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8218 bmcr
|= BMCR_LOOPBACK
;
8220 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8222 /* The write needs to be flushed for the FETs */
8223 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8224 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8228 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8229 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8230 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8231 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8232 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8234 /* The write needs to be flushed for the AC131 */
8235 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8238 /* Reset to prevent losing 1st rx packet intermittently */
8239 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8240 tg3_flag(tp
, 5780_CLASS
)) {
8241 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8243 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8246 mac_mode
= tp
->mac_mode
&
8247 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8248 if (speed
== SPEED_1000
)
8249 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8251 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8253 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8254 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8256 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8257 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8258 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8259 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8261 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8262 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8265 tw32(MAC_MODE
, mac_mode
);
8271 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8273 struct tg3
*tp
= netdev_priv(dev
);
8275 if (features
& NETIF_F_LOOPBACK
) {
8276 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8279 spin_lock_bh(&tp
->lock
);
8280 tg3_mac_loopback(tp
, true);
8281 netif_carrier_on(tp
->dev
);
8282 spin_unlock_bh(&tp
->lock
);
8283 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8285 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8288 spin_lock_bh(&tp
->lock
);
8289 tg3_mac_loopback(tp
, false);
8290 /* Force link status check */
8291 tg3_setup_phy(tp
, true);
8292 spin_unlock_bh(&tp
->lock
);
8293 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8297 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8298 netdev_features_t features
)
8300 struct tg3
*tp
= netdev_priv(dev
);
8302 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8303 features
&= ~NETIF_F_ALL_TSO
;
8308 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8310 netdev_features_t changed
= dev
->features
^ features
;
8312 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8313 tg3_set_loopback(dev
, features
);
8318 static void tg3_rx_prodring_free(struct tg3
*tp
,
8319 struct tg3_rx_prodring_set
*tpr
)
8323 if (tpr
!= &tp
->napi
[0].prodring
) {
8324 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8325 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8326 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8329 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8330 for (i
= tpr
->rx_jmb_cons_idx
;
8331 i
!= tpr
->rx_jmb_prod_idx
;
8332 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8333 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8341 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8342 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8345 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8346 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8347 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8352 /* Initialize rx rings for packet processing.
8354 * The chip has been shut down and the driver detached from
8355 * the networking, so no interrupts or new tx packets will
8356 * end up in the driver. tp->{tx,}lock are held and thus
8359 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8360 struct tg3_rx_prodring_set
*tpr
)
8362 u32 i
, rx_pkt_dma_sz
;
8364 tpr
->rx_std_cons_idx
= 0;
8365 tpr
->rx_std_prod_idx
= 0;
8366 tpr
->rx_jmb_cons_idx
= 0;
8367 tpr
->rx_jmb_prod_idx
= 0;
8369 if (tpr
!= &tp
->napi
[0].prodring
) {
8370 memset(&tpr
->rx_std_buffers
[0], 0,
8371 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8372 if (tpr
->rx_jmb_buffers
)
8373 memset(&tpr
->rx_jmb_buffers
[0], 0,
8374 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8378 /* Zero out all descriptors. */
8379 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8381 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8382 if (tg3_flag(tp
, 5780_CLASS
) &&
8383 tp
->dev
->mtu
> ETH_DATA_LEN
)
8384 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8385 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8387 /* Initialize invariants of the rings, we only set this
8388 * stuff once. This works because the card does not
8389 * write into the rx buffer posting rings.
8391 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8392 struct tg3_rx_buffer_desc
*rxd
;
8394 rxd
= &tpr
->rx_std
[i
];
8395 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8396 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8397 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8398 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8401 /* Now allocate fresh SKBs for each rx ring. */
8402 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8403 unsigned int frag_size
;
8405 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8407 netdev_warn(tp
->dev
,
8408 "Using a smaller RX standard ring. Only "
8409 "%d out of %d buffers were allocated "
8410 "successfully\n", i
, tp
->rx_pending
);
8418 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8421 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8423 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8426 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8427 struct tg3_rx_buffer_desc
*rxd
;
8429 rxd
= &tpr
->rx_jmb
[i
].std
;
8430 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8431 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8433 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8434 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8437 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8438 unsigned int frag_size
;
8440 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8442 netdev_warn(tp
->dev
,
8443 "Using a smaller RX jumbo ring. Only %d "
8444 "out of %d buffers were allocated "
8445 "successfully\n", i
, tp
->rx_jumbo_pending
);
8448 tp
->rx_jumbo_pending
= i
;
8457 tg3_rx_prodring_free(tp
, tpr
);
8461 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8462 struct tg3_rx_prodring_set
*tpr
)
8464 kfree(tpr
->rx_std_buffers
);
8465 tpr
->rx_std_buffers
= NULL
;
8466 kfree(tpr
->rx_jmb_buffers
);
8467 tpr
->rx_jmb_buffers
= NULL
;
8469 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8470 tpr
->rx_std
, tpr
->rx_std_mapping
);
8474 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8475 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8480 static int tg3_rx_prodring_init(struct tg3
*tp
,
8481 struct tg3_rx_prodring_set
*tpr
)
8483 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8485 if (!tpr
->rx_std_buffers
)
8488 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8489 TG3_RX_STD_RING_BYTES(tp
),
8490 &tpr
->rx_std_mapping
,
8495 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8496 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8498 if (!tpr
->rx_jmb_buffers
)
8501 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8502 TG3_RX_JMB_RING_BYTES(tp
),
8503 &tpr
->rx_jmb_mapping
,
8512 tg3_rx_prodring_fini(tp
, tpr
);
8516 /* Free up pending packets in all rx/tx rings.
8518 * The chip has been shut down and the driver detached from
8519 * the networking, so no interrupts or new tx packets will
8520 * end up in the driver. tp->{tx,}lock is not held and we are not
8521 * in an interrupt context and thus may sleep.
8523 static void tg3_free_rings(struct tg3
*tp
)
8527 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8528 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8530 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8532 if (!tnapi
->tx_buffers
)
8535 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8536 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8541 tg3_tx_skb_unmap(tnapi
, i
,
8542 skb_shinfo(skb
)->nr_frags
- 1);
8544 dev_kfree_skb_any(skb
);
8546 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8550 /* Initialize tx/rx rings for packet processing.
8552 * The chip has been shut down and the driver detached from
8553 * the networking, so no interrupts or new tx packets will
8554 * end up in the driver. tp->{tx,}lock are held and thus
8557 static int tg3_init_rings(struct tg3
*tp
)
8561 /* Free up all the SKBs. */
8564 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8565 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8567 tnapi
->last_tag
= 0;
8568 tnapi
->last_irq_tag
= 0;
8569 tnapi
->hw_status
->status
= 0;
8570 tnapi
->hw_status
->status_tag
= 0;
8571 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8576 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8578 tnapi
->rx_rcb_ptr
= 0;
8580 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8582 if (tnapi
->prodring
.rx_std
&&
8583 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8592 static void tg3_mem_tx_release(struct tg3
*tp
)
8596 for (i
= 0; i
< tp
->irq_max
; i
++) {
8597 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8599 if (tnapi
->tx_ring
) {
8600 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8601 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8602 tnapi
->tx_ring
= NULL
;
8605 kfree(tnapi
->tx_buffers
);
8606 tnapi
->tx_buffers
= NULL
;
8610 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8613 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8615 /* If multivector TSS is enabled, vector 0 does not handle
8616 * tx interrupts. Don't allocate any resources for it.
8618 if (tg3_flag(tp
, ENABLE_TSS
))
8621 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8622 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8623 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8624 if (!tnapi
->tx_buffers
)
8627 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8629 &tnapi
->tx_desc_mapping
,
8631 if (!tnapi
->tx_ring
)
8638 tg3_mem_tx_release(tp
);
8642 static void tg3_mem_rx_release(struct tg3
*tp
)
8646 for (i
= 0; i
< tp
->irq_max
; i
++) {
8647 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8649 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8654 dma_free_coherent(&tp
->pdev
->dev
,
8655 TG3_RX_RCB_RING_BYTES(tp
),
8657 tnapi
->rx_rcb_mapping
);
8658 tnapi
->rx_rcb
= NULL
;
8662 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8664 unsigned int i
, limit
;
8666 limit
= tp
->rxq_cnt
;
8668 /* If RSS is enabled, we need a (dummy) producer ring
8669 * set on vector zero. This is the true hw prodring.
8671 if (tg3_flag(tp
, ENABLE_RSS
))
8674 for (i
= 0; i
< limit
; i
++) {
8675 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8677 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8680 /* If multivector RSS is enabled, vector 0
8681 * does not handle rx or tx interrupts.
8682 * Don't allocate any resources for it.
8684 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8687 tnapi
->rx_rcb
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8688 TG3_RX_RCB_RING_BYTES(tp
),
8689 &tnapi
->rx_rcb_mapping
,
8698 tg3_mem_rx_release(tp
);
8703 * Must not be invoked with interrupt sources disabled and
8704 * the hardware shutdown down.
8706 static void tg3_free_consistent(struct tg3
*tp
)
8710 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8711 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8713 if (tnapi
->hw_status
) {
8714 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8716 tnapi
->status_mapping
);
8717 tnapi
->hw_status
= NULL
;
8721 tg3_mem_rx_release(tp
);
8722 tg3_mem_tx_release(tp
);
8724 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8725 tg3_full_lock(tp
, 0);
8727 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8728 tp
->hw_stats
, tp
->stats_mapping
);
8729 tp
->hw_stats
= NULL
;
8731 tg3_full_unlock(tp
);
8735 * Must not be invoked with interrupt sources disabled and
8736 * the hardware shutdown down. Can sleep.
8738 static int tg3_alloc_consistent(struct tg3
*tp
)
8742 tp
->hw_stats
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8743 sizeof(struct tg3_hw_stats
),
8744 &tp
->stats_mapping
, GFP_KERNEL
);
8748 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8749 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8750 struct tg3_hw_status
*sblk
;
8752 tnapi
->hw_status
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8754 &tnapi
->status_mapping
,
8756 if (!tnapi
->hw_status
)
8759 sblk
= tnapi
->hw_status
;
8761 if (tg3_flag(tp
, ENABLE_RSS
)) {
8762 u16
*prodptr
= NULL
;
8765 * When RSS is enabled, the status block format changes
8766 * slightly. The "rx_jumbo_consumer", "reserved",
8767 * and "rx_mini_consumer" members get mapped to the
8768 * other three rx return ring producer indexes.
8772 prodptr
= &sblk
->idx
[0].rx_producer
;
8775 prodptr
= &sblk
->rx_jumbo_consumer
;
8778 prodptr
= &sblk
->reserved
;
8781 prodptr
= &sblk
->rx_mini_consumer
;
8784 tnapi
->rx_rcb_prod_idx
= prodptr
;
8786 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8790 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8796 tg3_free_consistent(tp
);
8800 #define MAX_WAIT_CNT 1000
8802 /* To stop a block, clear the enable bit and poll till it
8803 * clears. tp->lock is held.
8805 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8810 if (tg3_flag(tp
, 5705_PLUS
)) {
8817 /* We can't enable/disable these bits of the
8818 * 5705/5750, just say success.
8831 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8832 if (pci_channel_offline(tp
->pdev
)) {
8833 dev_err(&tp
->pdev
->dev
,
8834 "tg3_stop_block device offline, "
8835 "ofs=%lx enable_bit=%x\n",
8842 if ((val
& enable_bit
) == 0)
8846 if (i
== MAX_WAIT_CNT
&& !silent
) {
8847 dev_err(&tp
->pdev
->dev
,
8848 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8856 /* tp->lock is held. */
8857 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8861 tg3_disable_ints(tp
);
8863 if (pci_channel_offline(tp
->pdev
)) {
8864 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8865 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8870 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8871 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8874 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8875 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8876 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8877 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8878 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8879 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8881 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8882 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8883 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8884 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8885 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8886 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8887 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8889 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8890 tw32_f(MAC_MODE
, tp
->mac_mode
);
8893 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8894 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8896 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8898 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8901 if (i
>= MAX_WAIT_CNT
) {
8902 dev_err(&tp
->pdev
->dev
,
8903 "%s timed out, TX_MODE_ENABLE will not clear "
8904 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8908 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8909 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8910 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8912 tw32(FTQ_RESET
, 0xffffffff);
8913 tw32(FTQ_RESET
, 0x00000000);
8915 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8916 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8919 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8920 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8921 if (tnapi
->hw_status
)
8922 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8928 /* Save PCI command register before chip reset */
8929 static void tg3_save_pci_state(struct tg3
*tp
)
8931 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8934 /* Restore PCI state after chip reset */
8935 static void tg3_restore_pci_state(struct tg3
*tp
)
8939 /* Re-enable indirect register accesses. */
8940 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8941 tp
->misc_host_ctrl
);
8943 /* Set MAX PCI retry to zero. */
8944 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8945 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8946 tg3_flag(tp
, PCIX_MODE
))
8947 val
|= PCISTATE_RETRY_SAME_DMA
;
8948 /* Allow reads and writes to the APE register and memory space. */
8949 if (tg3_flag(tp
, ENABLE_APE
))
8950 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8951 PCISTATE_ALLOW_APE_SHMEM_WR
|
8952 PCISTATE_ALLOW_APE_PSPACE_WR
;
8953 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8955 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8957 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8958 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8959 tp
->pci_cacheline_sz
);
8960 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8964 /* Make sure PCI-X relaxed ordering bit is clear. */
8965 if (tg3_flag(tp
, PCIX_MODE
)) {
8968 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8970 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8971 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8975 if (tg3_flag(tp
, 5780_CLASS
)) {
8977 /* Chip reset on 5780 will reset MSI enable bit,
8978 * so need to restore it.
8980 if (tg3_flag(tp
, USING_MSI
)) {
8983 pci_read_config_word(tp
->pdev
,
8984 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8986 pci_write_config_word(tp
->pdev
,
8987 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8988 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8989 val
= tr32(MSGINT_MODE
);
8990 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8995 static void tg3_override_clk(struct tg3
*tp
)
8999 switch (tg3_asic_rev(tp
)) {
9001 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9002 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9003 TG3_CPMU_MAC_ORIDE_ENABLE
);
9008 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9016 static void tg3_restore_clk(struct tg3
*tp
)
9020 switch (tg3_asic_rev(tp
)) {
9022 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9023 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9024 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9029 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9030 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9038 /* tp->lock is held. */
9039 static int tg3_chip_reset(struct tg3
*tp
)
9040 __releases(tp
->lock
)
9041 __acquires(tp
->lock
)
9044 void (*write_op
)(struct tg3
*, u32
, u32
);
9047 if (!pci_device_is_present(tp
->pdev
))
9052 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9054 /* No matching tg3_nvram_unlock() after this because
9055 * chip reset below will undo the nvram lock.
9057 tp
->nvram_lock_cnt
= 0;
9059 /* GRC_MISC_CFG core clock reset will clear the memory
9060 * enable bit in PCI register 4 and the MSI enable bit
9061 * on some chips, so we save relevant registers here.
9063 tg3_save_pci_state(tp
);
9065 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9066 tg3_flag(tp
, 5755_PLUS
))
9067 tw32(GRC_FASTBOOT_PC
, 0);
9070 * We must avoid the readl() that normally takes place.
9071 * It locks machines, causes machine checks, and other
9072 * fun things. So, temporarily disable the 5701
9073 * hardware workaround, while we do the reset.
9075 write_op
= tp
->write32
;
9076 if (write_op
== tg3_write_flush_reg32
)
9077 tp
->write32
= tg3_write32
;
9079 /* Prevent the irq handler from reading or writing PCI registers
9080 * during chip reset when the memory enable bit in the PCI command
9081 * register may be cleared. The chip does not generate interrupt
9082 * at this time, but the irq handler may still be called due to irq
9083 * sharing or irqpoll.
9085 tg3_flag_set(tp
, CHIP_RESETTING
);
9086 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9087 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9088 if (tnapi
->hw_status
) {
9089 tnapi
->hw_status
->status
= 0;
9090 tnapi
->hw_status
->status_tag
= 0;
9092 tnapi
->last_tag
= 0;
9093 tnapi
->last_irq_tag
= 0;
9097 tg3_full_unlock(tp
);
9099 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9100 synchronize_irq(tp
->napi
[i
].irq_vec
);
9102 tg3_full_lock(tp
, 0);
9104 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9105 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9106 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9110 val
= GRC_MISC_CFG_CORECLK_RESET
;
9112 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9113 /* Force PCIe 1.0a mode */
9114 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9115 !tg3_flag(tp
, 57765_PLUS
) &&
9116 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9117 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9118 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9120 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9121 tw32(GRC_MISC_CFG
, (1 << 29));
9126 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9127 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9128 tw32(GRC_VCPU_EXT_CTRL
,
9129 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9132 /* Set the clock to the highest frequency to avoid timeouts. With link
9133 * aware mode, the clock speed could be slow and bootcode does not
9134 * complete within the expected time. Override the clock to allow the
9135 * bootcode to finish sooner and then restore it.
9137 tg3_override_clk(tp
);
9139 /* Manage gphy power for all CPMU absent PCIe devices. */
9140 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9141 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9143 tw32(GRC_MISC_CFG
, val
);
9145 /* restore 5701 hardware bug workaround write method */
9146 tp
->write32
= write_op
;
9148 /* Unfortunately, we have to delay before the PCI read back.
9149 * Some 575X chips even will not respond to a PCI cfg access
9150 * when the reset command is given to the chip.
9152 * How do these hardware designers expect things to work
9153 * properly if the PCI write is posted for a long period
9154 * of time? It is always necessary to have some method by
9155 * which a register read back can occur to push the write
9156 * out which does the reset.
9158 * For most tg3 variants the trick below was working.
9163 /* Flush PCI posted writes. The normal MMIO registers
9164 * are inaccessible at this time so this is the only
9165 * way to make this reliably (actually, this is no longer
9166 * the case, see above). I tried to use indirect
9167 * register read/write but this upset some 5701 variants.
9169 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9173 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9176 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9180 /* Wait for link training to complete. */
9181 for (j
= 0; j
< 5000; j
++)
9184 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9185 pci_write_config_dword(tp
->pdev
, 0xc4,
9186 cfg_val
| (1 << 15));
9189 /* Clear the "no snoop" and "relaxed ordering" bits. */
9190 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9192 * Older PCIe devices only support the 128 byte
9193 * MPS setting. Enforce the restriction.
9195 if (!tg3_flag(tp
, CPMU_PRESENT
))
9196 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9197 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9199 /* Clear error status */
9200 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9201 PCI_EXP_DEVSTA_CED
|
9202 PCI_EXP_DEVSTA_NFED
|
9203 PCI_EXP_DEVSTA_FED
|
9204 PCI_EXP_DEVSTA_URD
);
9207 tg3_restore_pci_state(tp
);
9209 tg3_flag_clear(tp
, CHIP_RESETTING
);
9210 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9213 if (tg3_flag(tp
, 5780_CLASS
))
9214 val
= tr32(MEMARB_MODE
);
9215 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9217 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9219 tw32(0x5000, 0x400);
9222 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9224 * BCM4785: In order to avoid repercussions from using
9225 * potentially defective internal ROM, stop the Rx RISC CPU,
9226 * which is not required.
9229 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9232 err
= tg3_poll_fw(tp
);
9236 tw32(GRC_MODE
, tp
->grc_mode
);
9238 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9241 tw32(0xc4, val
| (1 << 15));
9244 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9245 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9246 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9247 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9248 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9249 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9252 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9253 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9255 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9256 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9261 tw32_f(MAC_MODE
, val
);
9264 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9268 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9269 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9270 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9271 !tg3_flag(tp
, 57765_PLUS
)) {
9274 tw32(0x7c00, val
| (1 << 25));
9277 tg3_restore_clk(tp
);
9279 /* Reprobe ASF enable state. */
9280 tg3_flag_clear(tp
, ENABLE_ASF
);
9281 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9282 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9284 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9285 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9286 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9289 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9290 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9291 tg3_flag_set(tp
, ENABLE_ASF
);
9292 tp
->last_event_jiffies
= jiffies
;
9293 if (tg3_flag(tp
, 5750_PLUS
))
9294 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9296 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9297 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9298 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9299 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9300 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9307 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9308 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9309 static void __tg3_set_rx_mode(struct net_device
*);
9311 /* tp->lock is held. */
9312 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9318 tg3_write_sig_pre_reset(tp
, kind
);
9320 tg3_abort_hw(tp
, silent
);
9321 err
= tg3_chip_reset(tp
);
9323 __tg3_set_mac_addr(tp
, false);
9325 tg3_write_sig_legacy(tp
, kind
);
9326 tg3_write_sig_post_reset(tp
, kind
);
9329 /* Save the stats across chip resets... */
9330 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9331 tg3_get_estats(tp
, &tp
->estats_prev
);
9333 /* And make sure the next sample is new data */
9334 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9340 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9342 struct tg3
*tp
= netdev_priv(dev
);
9343 struct sockaddr
*addr
= p
;
9345 bool skip_mac_1
= false;
9347 if (!is_valid_ether_addr(addr
->sa_data
))
9348 return -EADDRNOTAVAIL
;
9350 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9352 if (!netif_running(dev
))
9355 if (tg3_flag(tp
, ENABLE_ASF
)) {
9356 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9358 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9359 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9360 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9361 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9363 /* Skip MAC addr 1 if ASF is using it. */
9364 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9365 !(addr1_high
== 0 && addr1_low
== 0))
9368 spin_lock_bh(&tp
->lock
);
9369 __tg3_set_mac_addr(tp
, skip_mac_1
);
9370 __tg3_set_rx_mode(dev
);
9371 spin_unlock_bh(&tp
->lock
);
9376 /* tp->lock is held. */
9377 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9378 dma_addr_t mapping
, u32 maxlen_flags
,
9382 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9383 ((u64
) mapping
>> 32));
9385 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9386 ((u64
) mapping
& 0xffffffff));
9388 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9391 if (!tg3_flag(tp
, 5705_PLUS
))
9393 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9398 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9402 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9403 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9404 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9405 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9407 tw32(HOSTCC_TXCOL_TICKS
, 0);
9408 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9409 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9411 for (; i
< tp
->txq_cnt
; i
++) {
9414 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9415 tw32(reg
, ec
->tx_coalesce_usecs
);
9416 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9417 tw32(reg
, ec
->tx_max_coalesced_frames
);
9418 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9419 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9423 for (; i
< tp
->irq_max
- 1; i
++) {
9424 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9425 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9426 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9430 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9433 u32 limit
= tp
->rxq_cnt
;
9435 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9436 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9437 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9438 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9441 tw32(HOSTCC_RXCOL_TICKS
, 0);
9442 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9443 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9446 for (; i
< limit
; i
++) {
9449 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9450 tw32(reg
, ec
->rx_coalesce_usecs
);
9451 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9452 tw32(reg
, ec
->rx_max_coalesced_frames
);
9453 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9454 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9457 for (; i
< tp
->irq_max
- 1; i
++) {
9458 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9459 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9460 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9464 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9466 tg3_coal_tx_init(tp
, ec
);
9467 tg3_coal_rx_init(tp
, ec
);
9469 if (!tg3_flag(tp
, 5705_PLUS
)) {
9470 u32 val
= ec
->stats_block_coalesce_usecs
;
9472 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9473 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9478 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9482 /* tp->lock is held. */
9483 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9487 /* Disable all transmit rings but the first. */
9488 if (!tg3_flag(tp
, 5705_PLUS
))
9489 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9490 else if (tg3_flag(tp
, 5717_PLUS
))
9491 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9492 else if (tg3_flag(tp
, 57765_CLASS
) ||
9493 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9494 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9496 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9498 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9499 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9500 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9501 BDINFO_FLAGS_DISABLED
);
9504 /* tp->lock is held. */
9505 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9508 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9510 if (tg3_flag(tp
, ENABLE_TSS
))
9513 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9514 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9516 if (!tnapi
->tx_ring
)
9519 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9520 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9521 NIC_SRAM_TX_BUFFER_DESC
);
9525 /* tp->lock is held. */
9526 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9530 /* Disable all receive return rings but the first. */
9531 if (tg3_flag(tp
, 5717_PLUS
))
9532 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9533 else if (!tg3_flag(tp
, 5705_PLUS
))
9534 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9535 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9536 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9537 tg3_flag(tp
, 57765_CLASS
))
9538 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9540 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9542 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9543 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9544 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9545 BDINFO_FLAGS_DISABLED
);
9548 /* tp->lock is held. */
9549 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9552 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9554 if (tg3_flag(tp
, ENABLE_RSS
))
9557 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9558 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9563 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9564 (tp
->rx_ret_ring_mask
+ 1) <<
9565 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9569 /* tp->lock is held. */
9570 static void tg3_rings_reset(struct tg3
*tp
)
9574 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9576 tg3_tx_rcbs_disable(tp
);
9578 tg3_rx_ret_rcbs_disable(tp
);
9580 /* Disable interrupts */
9581 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9582 tp
->napi
[0].chk_msi_cnt
= 0;
9583 tp
->napi
[0].last_rx_cons
= 0;
9584 tp
->napi
[0].last_tx_cons
= 0;
9586 /* Zero mailbox registers. */
9587 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9588 for (i
= 1; i
< tp
->irq_max
; i
++) {
9589 tp
->napi
[i
].tx_prod
= 0;
9590 tp
->napi
[i
].tx_cons
= 0;
9591 if (tg3_flag(tp
, ENABLE_TSS
))
9592 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9593 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9594 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9595 tp
->napi
[i
].chk_msi_cnt
= 0;
9596 tp
->napi
[i
].last_rx_cons
= 0;
9597 tp
->napi
[i
].last_tx_cons
= 0;
9599 if (!tg3_flag(tp
, ENABLE_TSS
))
9600 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9602 tp
->napi
[0].tx_prod
= 0;
9603 tp
->napi
[0].tx_cons
= 0;
9604 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9605 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9608 /* Make sure the NIC-based send BD rings are disabled. */
9609 if (!tg3_flag(tp
, 5705_PLUS
)) {
9610 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9611 for (i
= 0; i
< 16; i
++)
9612 tw32_tx_mbox(mbox
+ i
* 8, 0);
9615 /* Clear status block in ram. */
9616 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9618 /* Set status block DMA address */
9619 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9620 ((u64
) tnapi
->status_mapping
>> 32));
9621 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9622 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9624 stblk
= HOSTCC_STATBLCK_RING1
;
9626 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9627 u64 mapping
= (u64
)tnapi
->status_mapping
;
9628 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9629 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9632 /* Clear status block in ram. */
9633 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9636 tg3_tx_rcbs_init(tp
);
9637 tg3_rx_ret_rcbs_init(tp
);
9640 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9642 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9644 if (!tg3_flag(tp
, 5750_PLUS
) ||
9645 tg3_flag(tp
, 5780_CLASS
) ||
9646 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9647 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9648 tg3_flag(tp
, 57765_PLUS
))
9649 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9650 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9651 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9652 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9654 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9656 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9657 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9659 val
= min(nic_rep_thresh
, host_rep_thresh
);
9660 tw32(RCVBDI_STD_THRESH
, val
);
9662 if (tg3_flag(tp
, 57765_PLUS
))
9663 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9665 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9668 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9670 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9672 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9673 tw32(RCVBDI_JUMBO_THRESH
, val
);
9675 if (tg3_flag(tp
, 57765_PLUS
))
9676 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9679 static inline u32
calc_crc(unsigned char *buf
, int len
)
9687 for (j
= 0; j
< len
; j
++) {
9690 for (k
= 0; k
< 8; k
++) {
9703 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9705 /* accept or reject all multicast frames */
9706 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9707 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9708 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9709 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9712 static void __tg3_set_rx_mode(struct net_device
*dev
)
9714 struct tg3
*tp
= netdev_priv(dev
);
9717 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9718 RX_MODE_KEEP_VLAN_TAG
);
9720 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9721 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9724 if (!tg3_flag(tp
, ENABLE_ASF
))
9725 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9728 if (dev
->flags
& IFF_PROMISC
) {
9729 /* Promiscuous mode. */
9730 rx_mode
|= RX_MODE_PROMISC
;
9731 } else if (dev
->flags
& IFF_ALLMULTI
) {
9732 /* Accept all multicast. */
9733 tg3_set_multi(tp
, 1);
9734 } else if (netdev_mc_empty(dev
)) {
9735 /* Reject all multicast. */
9736 tg3_set_multi(tp
, 0);
9738 /* Accept one or more multicast(s). */
9739 struct netdev_hw_addr
*ha
;
9740 u32 mc_filter
[4] = { 0, };
9745 netdev_for_each_mc_addr(ha
, dev
) {
9746 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9748 regidx
= (bit
& 0x60) >> 5;
9750 mc_filter
[regidx
] |= (1 << bit
);
9753 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9754 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9755 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9756 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9759 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9760 rx_mode
|= RX_MODE_PROMISC
;
9761 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9762 /* Add all entries into to the mac addr filter list */
9764 struct netdev_hw_addr
*ha
;
9766 netdev_for_each_uc_addr(ha
, dev
) {
9767 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9768 i
+ TG3_UCAST_ADDR_IDX(tp
));
9773 if (rx_mode
!= tp
->rx_mode
) {
9774 tp
->rx_mode
= rx_mode
;
9775 tw32_f(MAC_RX_MODE
, rx_mode
);
9780 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9784 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9785 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9788 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9792 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9795 if (tp
->rxq_cnt
== 1) {
9796 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9800 /* Validate table against current IRQ count */
9801 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9802 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9806 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9807 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9810 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9813 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9815 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9816 u32 val
= tp
->rss_ind_tbl
[i
];
9818 for (; i
% 8; i
++) {
9820 val
|= tp
->rss_ind_tbl
[i
];
9827 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9829 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9830 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9832 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9835 /* tp->lock is held. */
9836 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9838 u32 val
, rdmac_mode
;
9840 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9842 tg3_disable_ints(tp
);
9846 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9848 if (tg3_flag(tp
, INIT_COMPLETE
))
9849 tg3_abort_hw(tp
, 1);
9851 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9852 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9853 tg3_phy_pull_config(tp
);
9854 tg3_eee_pull_config(tp
, NULL
);
9855 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9858 /* Enable MAC control of LPI */
9859 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9865 err
= tg3_chip_reset(tp
);
9869 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9871 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9872 val
= tr32(TG3_CPMU_CTRL
);
9873 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9874 tw32(TG3_CPMU_CTRL
, val
);
9876 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9877 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9878 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9879 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9881 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9882 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9883 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9884 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9886 val
= tr32(TG3_CPMU_HST_ACC
);
9887 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9888 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9889 tw32(TG3_CPMU_HST_ACC
, val
);
9892 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9893 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9894 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9895 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9896 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9898 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9899 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9901 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9903 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9904 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9907 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9908 u32 grc_mode
= tr32(GRC_MODE
);
9910 /* Access the lower 1K of PL PCIE block registers. */
9911 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9912 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9914 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9915 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9916 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9918 tw32(GRC_MODE
, grc_mode
);
9921 if (tg3_flag(tp
, 57765_CLASS
)) {
9922 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9923 u32 grc_mode
= tr32(GRC_MODE
);
9925 /* Access the lower 1K of PL PCIE block registers. */
9926 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9927 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9929 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9930 TG3_PCIE_PL_LO_PHYCTL5
);
9931 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9932 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9934 tw32(GRC_MODE
, grc_mode
);
9937 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9940 /* Fix transmit hangs */
9941 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9942 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9943 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9945 grc_mode
= tr32(GRC_MODE
);
9947 /* Access the lower 1K of DL PCIE block registers. */
9948 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9949 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9951 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9952 TG3_PCIE_DL_LO_FTSMAX
);
9953 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9954 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9955 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9957 tw32(GRC_MODE
, grc_mode
);
9960 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9961 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9962 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9963 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9966 /* This works around an issue with Athlon chipsets on
9967 * B3 tigon3 silicon. This bit has no effect on any
9968 * other revision. But do not set this on PCI Express
9969 * chips and don't even touch the clocks if the CPMU is present.
9971 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9972 if (!tg3_flag(tp
, PCI_EXPRESS
))
9973 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9974 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9977 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9978 tg3_flag(tp
, PCIX_MODE
)) {
9979 val
= tr32(TG3PCI_PCISTATE
);
9980 val
|= PCISTATE_RETRY_SAME_DMA
;
9981 tw32(TG3PCI_PCISTATE
, val
);
9984 if (tg3_flag(tp
, ENABLE_APE
)) {
9985 /* Allow reads and writes to the
9986 * APE register and memory space.
9988 val
= tr32(TG3PCI_PCISTATE
);
9989 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9990 PCISTATE_ALLOW_APE_SHMEM_WR
|
9991 PCISTATE_ALLOW_APE_PSPACE_WR
;
9992 tw32(TG3PCI_PCISTATE
, val
);
9995 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9996 /* Enable some hw fixes. */
9997 val
= tr32(TG3PCI_MSI_DATA
);
9998 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9999 tw32(TG3PCI_MSI_DATA
, val
);
10002 /* Descriptor ring init may make accesses to the
10003 * NIC SRAM area to setup the TX descriptors, so we
10004 * can only do this after the hardware has been
10005 * successfully reset.
10007 err
= tg3_init_rings(tp
);
10011 if (tg3_flag(tp
, 57765_PLUS
)) {
10012 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10013 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10014 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10015 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10016 if (!tg3_flag(tp
, 57765_CLASS
) &&
10017 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10018 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10019 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10020 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10021 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10022 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10023 /* This value is determined during the probe time DMA
10024 * engine test, tg3_test_dma.
10026 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10029 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10030 GRC_MODE_4X_NIC_SEND_RINGS
|
10031 GRC_MODE_NO_TX_PHDR_CSUM
|
10032 GRC_MODE_NO_RX_PHDR_CSUM
);
10033 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10035 /* Pseudo-header checksum is done by hardware logic and not
10036 * the offload processers, so make the chip do the pseudo-
10037 * header checksums on receive. For transmit it is more
10038 * convenient to do the pseudo-header checksum in software
10039 * as Linux does that on transmit for us in all cases.
10041 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10043 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10045 tw32(TG3_RX_PTP_CTL
,
10046 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10048 if (tg3_flag(tp
, PTP_CAPABLE
))
10049 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10051 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10053 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10054 val
= tr32(GRC_MISC_CFG
);
10056 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10057 tw32(GRC_MISC_CFG
, val
);
10059 /* Initialize MBUF/DESC pool. */
10060 if (tg3_flag(tp
, 5750_PLUS
)) {
10062 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10063 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10064 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10065 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10067 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10068 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10069 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10070 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10073 fw_len
= tp
->fw_len
;
10074 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10075 tw32(BUFMGR_MB_POOL_ADDR
,
10076 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10077 tw32(BUFMGR_MB_POOL_SIZE
,
10078 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10081 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10082 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10083 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10084 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10085 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10086 tw32(BUFMGR_MB_HIGH_WATER
,
10087 tp
->bufmgr_config
.mbuf_high_water
);
10089 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10090 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10091 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10092 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10093 tw32(BUFMGR_MB_HIGH_WATER
,
10094 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10096 tw32(BUFMGR_DMA_LOW_WATER
,
10097 tp
->bufmgr_config
.dma_low_water
);
10098 tw32(BUFMGR_DMA_HIGH_WATER
,
10099 tp
->bufmgr_config
.dma_high_water
);
10101 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10102 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10103 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10104 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10105 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10106 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10107 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10108 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10109 tw32(BUFMGR_MODE
, val
);
10110 for (i
= 0; i
< 2000; i
++) {
10111 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10116 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10120 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10121 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10123 tg3_setup_rxbd_thresholds(tp
);
10125 /* Initialize TG3_BDINFO's at:
10126 * RCVDBDI_STD_BD: standard eth size rx ring
10127 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10128 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10131 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10132 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10133 * ring attribute flags
10134 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10136 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10137 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10139 * The size of each ring is fixed in the firmware, but the location is
10142 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10143 ((u64
) tpr
->rx_std_mapping
>> 32));
10144 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10145 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10146 if (!tg3_flag(tp
, 5717_PLUS
))
10147 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10148 NIC_SRAM_RX_BUFFER_DESC
);
10150 /* Disable the mini ring */
10151 if (!tg3_flag(tp
, 5705_PLUS
))
10152 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10153 BDINFO_FLAGS_DISABLED
);
10155 /* Program the jumbo buffer descriptor ring control
10156 * blocks on those devices that have them.
10158 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10159 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10161 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10162 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10163 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10164 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10165 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10166 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10167 BDINFO_FLAGS_MAXLEN_SHIFT
;
10168 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10169 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10170 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10171 tg3_flag(tp
, 57765_CLASS
) ||
10172 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10173 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10174 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10176 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10177 BDINFO_FLAGS_DISABLED
);
10180 if (tg3_flag(tp
, 57765_PLUS
)) {
10181 val
= TG3_RX_STD_RING_SIZE(tp
);
10182 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10183 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10185 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10187 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10189 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10191 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10192 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10194 tpr
->rx_jmb_prod_idx
=
10195 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10196 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10198 tg3_rings_reset(tp
);
10200 /* Initialize MAC address and backoff seed. */
10201 __tg3_set_mac_addr(tp
, false);
10203 /* MTU + ethernet header + FCS + optional VLAN tag */
10204 tw32(MAC_RX_MTU_SIZE
,
10205 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10207 /* The slot time is changed by tg3_setup_phy if we
10208 * run at gigabit with half duplex.
10210 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10211 (6 << TX_LENGTHS_IPG_SHIFT
) |
10212 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10214 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10215 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10216 val
|= tr32(MAC_TX_LENGTHS
) &
10217 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10218 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10220 tw32(MAC_TX_LENGTHS
, val
);
10222 /* Receive rules. */
10223 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10224 tw32(RCVLPC_CONFIG
, 0x0181);
10226 /* Calculate RDMAC_MODE setting early, we need it to determine
10227 * the RCVLPC_STATE_ENABLE mask.
10229 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10230 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10231 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10232 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10233 RDMAC_MODE_LNGREAD_ENAB
);
10235 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10236 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10238 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10239 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10240 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10241 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10242 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10243 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10245 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10246 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10247 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10248 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10249 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10250 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10251 !tg3_flag(tp
, IS_5788
)) {
10252 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10256 if (tg3_flag(tp
, PCI_EXPRESS
))
10257 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10259 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10261 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10262 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10263 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10267 if (tg3_flag(tp
, HW_TSO_1
) ||
10268 tg3_flag(tp
, HW_TSO_2
) ||
10269 tg3_flag(tp
, HW_TSO_3
))
10270 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10272 if (tg3_flag(tp
, 57765_PLUS
) ||
10273 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10274 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10275 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10277 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10278 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10279 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10281 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10282 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10283 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10284 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10285 tg3_flag(tp
, 57765_PLUS
)) {
10288 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10289 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10291 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10293 val
= tr32(tgtreg
);
10294 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10295 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10296 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10297 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10298 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10299 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10300 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10301 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10303 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10306 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10307 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10308 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10311 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10312 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10314 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10316 val
= tr32(tgtreg
);
10318 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10319 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10322 /* Receive/send statistics. */
10323 if (tg3_flag(tp
, 5750_PLUS
)) {
10324 val
= tr32(RCVLPC_STATS_ENABLE
);
10325 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10326 tw32(RCVLPC_STATS_ENABLE
, val
);
10327 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10328 tg3_flag(tp
, TSO_CAPABLE
)) {
10329 val
= tr32(RCVLPC_STATS_ENABLE
);
10330 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10331 tw32(RCVLPC_STATS_ENABLE
, val
);
10333 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10335 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10336 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10337 tw32(SNDDATAI_STATSCTRL
,
10338 (SNDDATAI_SCTRL_ENABLE
|
10339 SNDDATAI_SCTRL_FASTUPD
));
10341 /* Setup host coalescing engine. */
10342 tw32(HOSTCC_MODE
, 0);
10343 for (i
= 0; i
< 2000; i
++) {
10344 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10349 __tg3_set_coalesce(tp
, &tp
->coal
);
10351 if (!tg3_flag(tp
, 5705_PLUS
)) {
10352 /* Status/statistics block address. See tg3_timer,
10353 * the tg3_periodic_fetch_stats call there, and
10354 * tg3_get_stats to see how this works for 5705/5750 chips.
10356 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10357 ((u64
) tp
->stats_mapping
>> 32));
10358 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10359 ((u64
) tp
->stats_mapping
& 0xffffffff));
10360 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10362 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10364 /* Clear statistics and status block memory areas */
10365 for (i
= NIC_SRAM_STATS_BLK
;
10366 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10367 i
+= sizeof(u32
)) {
10368 tg3_write_mem(tp
, i
, 0);
10373 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10375 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10376 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10377 if (!tg3_flag(tp
, 5705_PLUS
))
10378 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10380 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10381 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10382 /* reset to prevent losing 1st rx packet intermittently */
10383 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10387 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10388 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10389 MAC_MODE_FHDE_ENABLE
;
10390 if (tg3_flag(tp
, ENABLE_APE
))
10391 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10392 if (!tg3_flag(tp
, 5705_PLUS
) &&
10393 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10394 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10395 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10396 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10399 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10400 * If TG3_FLAG_IS_NIC is zero, we should read the
10401 * register to preserve the GPIO settings for LOMs. The GPIOs,
10402 * whether used as inputs or outputs, are set by boot code after
10405 if (!tg3_flag(tp
, IS_NIC
)) {
10408 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10409 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10410 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10412 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10413 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10414 GRC_LCLCTRL_GPIO_OUTPUT3
;
10416 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10417 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10419 tp
->grc_local_ctrl
&= ~gpio_mask
;
10420 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10422 /* GPIO1 must be driven high for eeprom write protect */
10423 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10424 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10425 GRC_LCLCTRL_GPIO_OUTPUT1
);
10427 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10430 if (tg3_flag(tp
, USING_MSIX
)) {
10431 val
= tr32(MSGINT_MODE
);
10432 val
|= MSGINT_MODE_ENABLE
;
10433 if (tp
->irq_cnt
> 1)
10434 val
|= MSGINT_MODE_MULTIVEC_EN
;
10435 if (!tg3_flag(tp
, 1SHOT_MSI
))
10436 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10437 tw32(MSGINT_MODE
, val
);
10440 if (!tg3_flag(tp
, 5705_PLUS
)) {
10441 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10445 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10446 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10447 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10448 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10449 WDMAC_MODE_LNGREAD_ENAB
);
10451 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10452 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10453 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10454 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10455 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10457 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10458 !tg3_flag(tp
, IS_5788
)) {
10459 val
|= WDMAC_MODE_RX_ACCEL
;
10463 /* Enable host coalescing bug fix */
10464 if (tg3_flag(tp
, 5755_PLUS
))
10465 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10467 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10468 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10470 tw32_f(WDMAC_MODE
, val
);
10473 if (tg3_flag(tp
, PCIX_MODE
)) {
10476 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10478 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10479 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10480 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10481 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10482 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10483 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10485 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10489 tw32_f(RDMAC_MODE
, rdmac_mode
);
10492 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10493 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10494 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10495 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10498 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10499 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10500 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10501 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10502 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10506 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10507 if (!tg3_flag(tp
, 5705_PLUS
))
10508 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10510 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10511 tw32(SNDDATAC_MODE
,
10512 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10514 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10516 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10517 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10518 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10519 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10520 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10521 tw32(RCVDBDI_MODE
, val
);
10522 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10523 if (tg3_flag(tp
, HW_TSO_1
) ||
10524 tg3_flag(tp
, HW_TSO_2
) ||
10525 tg3_flag(tp
, HW_TSO_3
))
10526 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10527 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10528 if (tg3_flag(tp
, ENABLE_TSS
))
10529 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10530 tw32(SNDBDI_MODE
, val
);
10531 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10533 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10534 err
= tg3_load_5701_a0_firmware_fix(tp
);
10539 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10540 /* Ignore any errors for the firmware download. If download
10541 * fails, the device will operate with EEE disabled
10543 tg3_load_57766_firmware(tp
);
10546 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10547 err
= tg3_load_tso_firmware(tp
);
10552 tp
->tx_mode
= TX_MODE_ENABLE
;
10554 if (tg3_flag(tp
, 5755_PLUS
) ||
10555 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10556 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10558 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10559 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10560 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10561 tp
->tx_mode
&= ~val
;
10562 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10565 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10568 if (tg3_flag(tp
, ENABLE_RSS
)) {
10571 tg3_rss_write_indir_tbl(tp
);
10573 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10575 for (i
= 0; i
< 10 ; i
++)
10576 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10579 tp
->rx_mode
= RX_MODE_ENABLE
;
10580 if (tg3_flag(tp
, 5755_PLUS
))
10581 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10583 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10584 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10586 if (tg3_flag(tp
, ENABLE_RSS
))
10587 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10588 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10589 RX_MODE_RSS_IPV6_HASH_EN
|
10590 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10591 RX_MODE_RSS_IPV4_HASH_EN
|
10592 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10594 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10597 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10599 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10600 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10601 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10604 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10607 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10608 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10609 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10610 /* Set drive transmission level to 1.2V */
10611 /* only if the signal pre-emphasis bit is not set */
10612 val
= tr32(MAC_SERDES_CFG
);
10615 tw32(MAC_SERDES_CFG
, val
);
10617 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10618 tw32(MAC_SERDES_CFG
, 0x616000);
10621 /* Prevent chip from dropping frames when flow control
10624 if (tg3_flag(tp
, 57765_CLASS
))
10628 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10630 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10631 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10632 /* Use hardware link auto-negotiation */
10633 tg3_flag_set(tp
, HW_AUTONEG
);
10636 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10637 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10640 tmp
= tr32(SERDES_RX_CTRL
);
10641 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10642 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10643 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10644 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10647 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10648 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10649 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10651 err
= tg3_setup_phy(tp
, false);
10655 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10656 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10659 /* Clear CRC stats. */
10660 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10661 tg3_writephy(tp
, MII_TG3_TEST1
,
10662 tmp
| MII_TG3_TEST1_CRC_EN
);
10663 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10668 __tg3_set_rx_mode(tp
->dev
);
10670 /* Initialize receive rules. */
10671 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10672 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10673 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10674 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10676 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10680 if (tg3_flag(tp
, ENABLE_ASF
))
10684 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10686 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10688 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10690 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10692 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10694 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10696 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10698 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10700 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10702 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10704 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10706 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10708 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10710 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10718 if (tg3_flag(tp
, ENABLE_APE
))
10719 /* Write our heartbeat update interval to APE. */
10720 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10721 APE_HOST_HEARTBEAT_INT_DISABLE
);
10723 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10728 /* Called at device open time to get the chip ready for
10729 * packet processing. Invoked with tp->lock held.
10731 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10733 /* Chip may have been just powered on. If so, the boot code may still
10734 * be running initialization. Wait for it to finish to avoid races in
10735 * accessing the hardware.
10737 tg3_enable_register_access(tp
);
10740 tg3_switch_clocks(tp
);
10742 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10744 return tg3_reset_hw(tp
, reset_phy
);
10747 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10751 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10752 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10754 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10757 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10758 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10759 memset(ocir
, 0, TG3_OCIR_LEN
);
10763 /* sysfs attributes for hwmon */
10764 static ssize_t
tg3_show_temp(struct device
*dev
,
10765 struct device_attribute
*devattr
, char *buf
)
10767 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10768 struct tg3
*tp
= dev_get_drvdata(dev
);
10771 spin_lock_bh(&tp
->lock
);
10772 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10773 sizeof(temperature
));
10774 spin_unlock_bh(&tp
->lock
);
10775 return sprintf(buf
, "%u\n", temperature
* 1000);
10779 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10780 TG3_TEMP_SENSOR_OFFSET
);
10781 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10782 TG3_TEMP_CAUTION_OFFSET
);
10783 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10784 TG3_TEMP_MAX_OFFSET
);
10786 static struct attribute
*tg3_attrs
[] = {
10787 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10788 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10789 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10792 ATTRIBUTE_GROUPS(tg3
);
10794 static void tg3_hwmon_close(struct tg3
*tp
)
10796 if (tp
->hwmon_dev
) {
10797 hwmon_device_unregister(tp
->hwmon_dev
);
10798 tp
->hwmon_dev
= NULL
;
10802 static void tg3_hwmon_open(struct tg3
*tp
)
10806 struct pci_dev
*pdev
= tp
->pdev
;
10807 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10809 tg3_sd_scan_scratchpad(tp
, ocirs
);
10811 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10812 if (!ocirs
[i
].src_data_length
)
10815 size
+= ocirs
[i
].src_hdr_length
;
10816 size
+= ocirs
[i
].src_data_length
;
10822 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10824 if (IS_ERR(tp
->hwmon_dev
)) {
10825 tp
->hwmon_dev
= NULL
;
10826 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10831 #define TG3_STAT_ADD32(PSTAT, REG) \
10832 do { u32 __val = tr32(REG); \
10833 (PSTAT)->low += __val; \
10834 if ((PSTAT)->low < __val) \
10835 (PSTAT)->high += 1; \
10838 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10840 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10845 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10846 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10847 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10848 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10849 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10850 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10851 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10852 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10853 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10854 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10855 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10856 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10857 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10858 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10859 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10860 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10863 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10864 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10865 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10866 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10869 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10870 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10871 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10872 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10873 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10874 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10875 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10876 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10877 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10878 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10879 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10880 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10881 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10882 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10884 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10885 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10886 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10887 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10888 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10889 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10891 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10892 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10894 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10895 sp
->rx_discards
.low
+= val
;
10896 if (sp
->rx_discards
.low
< val
)
10897 sp
->rx_discards
.high
+= 1;
10899 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10901 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10904 static void tg3_chk_missed_msi(struct tg3
*tp
)
10908 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10909 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10911 if (tg3_has_work(tnapi
)) {
10912 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10913 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10914 if (tnapi
->chk_msi_cnt
< 1) {
10915 tnapi
->chk_msi_cnt
++;
10921 tnapi
->chk_msi_cnt
= 0;
10922 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10923 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10927 static void tg3_timer(unsigned long __opaque
)
10929 struct tg3
*tp
= (struct tg3
*) __opaque
;
10931 spin_lock(&tp
->lock
);
10933 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10934 spin_unlock(&tp
->lock
);
10935 goto restart_timer
;
10938 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10939 tg3_flag(tp
, 57765_CLASS
))
10940 tg3_chk_missed_msi(tp
);
10942 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10943 /* BCM4785: Flush posted writes from GbE to host memory. */
10947 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10948 /* All of this garbage is because when using non-tagged
10949 * IRQ status the mailbox/status_block protocol the chip
10950 * uses with the cpu is race prone.
10952 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10953 tw32(GRC_LOCAL_CTRL
,
10954 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10956 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10957 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10960 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10961 spin_unlock(&tp
->lock
);
10962 tg3_reset_task_schedule(tp
);
10963 goto restart_timer
;
10967 /* This part only runs once per second. */
10968 if (!--tp
->timer_counter
) {
10969 if (tg3_flag(tp
, 5705_PLUS
))
10970 tg3_periodic_fetch_stats(tp
);
10972 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10973 tg3_phy_eee_enable(tp
);
10975 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10979 mac_stat
= tr32(MAC_STATUS
);
10982 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10983 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10985 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10989 tg3_setup_phy(tp
, false);
10990 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10991 u32 mac_stat
= tr32(MAC_STATUS
);
10992 int need_setup
= 0;
10995 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10998 if (!tp
->link_up
&&
10999 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11000 MAC_STATUS_SIGNAL_DET
))) {
11004 if (!tp
->serdes_counter
) {
11007 ~MAC_MODE_PORT_MODE_MASK
));
11009 tw32_f(MAC_MODE
, tp
->mac_mode
);
11012 tg3_setup_phy(tp
, false);
11014 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11015 tg3_flag(tp
, 5780_CLASS
)) {
11016 tg3_serdes_parallel_detect(tp
);
11017 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11018 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11019 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11020 TG3_CPMU_STATUS_LINK_MASK
);
11022 if (link_up
!= tp
->link_up
)
11023 tg3_setup_phy(tp
, false);
11026 tp
->timer_counter
= tp
->timer_multiplier
;
11029 /* Heartbeat is only sent once every 2 seconds.
11031 * The heartbeat is to tell the ASF firmware that the host
11032 * driver is still alive. In the event that the OS crashes,
11033 * ASF needs to reset the hardware to free up the FIFO space
11034 * that may be filled with rx packets destined for the host.
11035 * If the FIFO is full, ASF will no longer function properly.
11037 * Unintended resets have been reported on real time kernels
11038 * where the timer doesn't run on time. Netpoll will also have
11041 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11042 * to check the ring condition when the heartbeat is expiring
11043 * before doing the reset. This will prevent most unintended
11046 if (!--tp
->asf_counter
) {
11047 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11048 tg3_wait_for_event_ack(tp
);
11050 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11051 FWCMD_NICDRV_ALIVE3
);
11052 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11053 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11054 TG3_FW_UPDATE_TIMEOUT_SEC
);
11056 tg3_generate_fw_event(tp
);
11058 tp
->asf_counter
= tp
->asf_multiplier
;
11061 spin_unlock(&tp
->lock
);
11064 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11065 add_timer(&tp
->timer
);
11068 static void tg3_timer_init(struct tg3
*tp
)
11070 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11071 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11072 !tg3_flag(tp
, 57765_CLASS
))
11073 tp
->timer_offset
= HZ
;
11075 tp
->timer_offset
= HZ
/ 10;
11077 BUG_ON(tp
->timer_offset
> HZ
);
11079 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11080 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11081 TG3_FW_UPDATE_FREQ_SEC
;
11083 init_timer(&tp
->timer
);
11084 tp
->timer
.data
= (unsigned long) tp
;
11085 tp
->timer
.function
= tg3_timer
;
11088 static void tg3_timer_start(struct tg3
*tp
)
11090 tp
->asf_counter
= tp
->asf_multiplier
;
11091 tp
->timer_counter
= tp
->timer_multiplier
;
11093 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11094 add_timer(&tp
->timer
);
11097 static void tg3_timer_stop(struct tg3
*tp
)
11099 del_timer_sync(&tp
->timer
);
11102 /* Restart hardware after configuration changes, self-test, etc.
11103 * Invoked with tp->lock held.
11105 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11106 __releases(tp
->lock
)
11107 __acquires(tp
->lock
)
11111 err
= tg3_init_hw(tp
, reset_phy
);
11113 netdev_err(tp
->dev
,
11114 "Failed to re-initialize device, aborting\n");
11115 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11116 tg3_full_unlock(tp
);
11117 tg3_timer_stop(tp
);
11119 tg3_napi_enable(tp
);
11120 dev_close(tp
->dev
);
11121 tg3_full_lock(tp
, 0);
11126 static void tg3_reset_task(struct work_struct
*work
)
11128 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11132 tg3_full_lock(tp
, 0);
11134 if (!netif_running(tp
->dev
)) {
11135 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11136 tg3_full_unlock(tp
);
11141 tg3_full_unlock(tp
);
11145 tg3_netif_stop(tp
);
11147 tg3_full_lock(tp
, 1);
11149 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11150 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11151 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11152 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11153 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11156 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11157 err
= tg3_init_hw(tp
, true);
11161 tg3_netif_start(tp
);
11164 tg3_full_unlock(tp
);
11169 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11173 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11176 unsigned long flags
;
11178 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11180 if (tp
->irq_cnt
== 1)
11181 name
= tp
->dev
->name
;
11183 name
= &tnapi
->irq_lbl
[0];
11184 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11185 snprintf(name
, IFNAMSIZ
,
11186 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11187 else if (tnapi
->tx_buffers
)
11188 snprintf(name
, IFNAMSIZ
,
11189 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11190 else if (tnapi
->rx_rcb
)
11191 snprintf(name
, IFNAMSIZ
,
11192 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11194 snprintf(name
, IFNAMSIZ
,
11195 "%s-%d", tp
->dev
->name
, irq_num
);
11196 name
[IFNAMSIZ
-1] = 0;
11199 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11201 if (tg3_flag(tp
, 1SHOT_MSI
))
11202 fn
= tg3_msi_1shot
;
11205 fn
= tg3_interrupt
;
11206 if (tg3_flag(tp
, TAGGED_STATUS
))
11207 fn
= tg3_interrupt_tagged
;
11208 flags
= IRQF_SHARED
;
11211 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11214 static int tg3_test_interrupt(struct tg3
*tp
)
11216 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11217 struct net_device
*dev
= tp
->dev
;
11218 int err
, i
, intr_ok
= 0;
11221 if (!netif_running(dev
))
11224 tg3_disable_ints(tp
);
11226 free_irq(tnapi
->irq_vec
, tnapi
);
11229 * Turn off MSI one shot mode. Otherwise this test has no
11230 * observable way to know whether the interrupt was delivered.
11232 if (tg3_flag(tp
, 57765_PLUS
)) {
11233 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11234 tw32(MSGINT_MODE
, val
);
11237 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11238 IRQF_SHARED
, dev
->name
, tnapi
);
11242 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11243 tg3_enable_ints(tp
);
11245 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11248 for (i
= 0; i
< 5; i
++) {
11249 u32 int_mbox
, misc_host_ctrl
;
11251 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11252 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11254 if ((int_mbox
!= 0) ||
11255 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11260 if (tg3_flag(tp
, 57765_PLUS
) &&
11261 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11262 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11267 tg3_disable_ints(tp
);
11269 free_irq(tnapi
->irq_vec
, tnapi
);
11271 err
= tg3_request_irq(tp
, 0);
11277 /* Reenable MSI one shot mode. */
11278 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11279 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11280 tw32(MSGINT_MODE
, val
);
11288 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11289 * successfully restored
11291 static int tg3_test_msi(struct tg3
*tp
)
11296 if (!tg3_flag(tp
, USING_MSI
))
11299 /* Turn off SERR reporting in case MSI terminates with Master
11302 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11303 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11304 pci_cmd
& ~PCI_COMMAND_SERR
);
11306 err
= tg3_test_interrupt(tp
);
11308 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11313 /* other failures */
11317 /* MSI test failed, go back to INTx mode */
11318 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11319 "to INTx mode. Please report this failure to the PCI "
11320 "maintainer and include system chipset information\n");
11322 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11324 pci_disable_msi(tp
->pdev
);
11326 tg3_flag_clear(tp
, USING_MSI
);
11327 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11329 err
= tg3_request_irq(tp
, 0);
11333 /* Need to reset the chip because the MSI cycle may have terminated
11334 * with Master Abort.
11336 tg3_full_lock(tp
, 1);
11338 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11339 err
= tg3_init_hw(tp
, true);
11341 tg3_full_unlock(tp
);
11344 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11349 static int tg3_request_firmware(struct tg3
*tp
)
11351 const struct tg3_firmware_hdr
*fw_hdr
;
11353 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11354 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11359 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11361 /* Firmware blob starts with version numbers, followed by
11362 * start address and _full_ length including BSS sections
11363 * (which must be longer than the actual data, of course
11366 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11367 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11368 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11369 tp
->fw_len
, tp
->fw_needed
);
11370 release_firmware(tp
->fw
);
11375 /* We no longer need firmware; we have it. */
11376 tp
->fw_needed
= NULL
;
11380 static u32
tg3_irq_count(struct tg3
*tp
)
11382 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11385 /* We want as many rx rings enabled as there are cpus.
11386 * In multiqueue MSI-X mode, the first MSI-X vector
11387 * only deals with link interrupts, etc, so we add
11388 * one to the number of vectors we are requesting.
11390 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11396 static bool tg3_enable_msix(struct tg3
*tp
)
11399 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11401 tp
->txq_cnt
= tp
->txq_req
;
11402 tp
->rxq_cnt
= tp
->rxq_req
;
11404 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11405 if (tp
->rxq_cnt
> tp
->rxq_max
)
11406 tp
->rxq_cnt
= tp
->rxq_max
;
11408 /* Disable multiple TX rings by default. Simple round-robin hardware
11409 * scheduling of the TX rings can cause starvation of rings with
11410 * small packets when other rings have TSO or jumbo packets.
11415 tp
->irq_cnt
= tg3_irq_count(tp
);
11417 for (i
= 0; i
< tp
->irq_max
; i
++) {
11418 msix_ent
[i
].entry
= i
;
11419 msix_ent
[i
].vector
= 0;
11422 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11425 } else if (rc
< tp
->irq_cnt
) {
11426 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11429 tp
->rxq_cnt
= max(rc
- 1, 1);
11431 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11434 for (i
= 0; i
< tp
->irq_max
; i
++)
11435 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11437 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11438 pci_disable_msix(tp
->pdev
);
11442 if (tp
->irq_cnt
== 1)
11445 tg3_flag_set(tp
, ENABLE_RSS
);
11447 if (tp
->txq_cnt
> 1)
11448 tg3_flag_set(tp
, ENABLE_TSS
);
11450 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11455 static void tg3_ints_init(struct tg3
*tp
)
11457 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11458 !tg3_flag(tp
, TAGGED_STATUS
)) {
11459 /* All MSI supporting chips should support tagged
11460 * status. Assert that this is the case.
11462 netdev_warn(tp
->dev
,
11463 "MSI without TAGGED_STATUS? Not using MSI\n");
11467 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11468 tg3_flag_set(tp
, USING_MSIX
);
11469 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11470 tg3_flag_set(tp
, USING_MSI
);
11472 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11473 u32 msi_mode
= tr32(MSGINT_MODE
);
11474 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11475 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11476 if (!tg3_flag(tp
, 1SHOT_MSI
))
11477 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11478 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11481 if (!tg3_flag(tp
, USING_MSIX
)) {
11483 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11486 if (tp
->irq_cnt
== 1) {
11489 netif_set_real_num_tx_queues(tp
->dev
, 1);
11490 netif_set_real_num_rx_queues(tp
->dev
, 1);
11494 static void tg3_ints_fini(struct tg3
*tp
)
11496 if (tg3_flag(tp
, USING_MSIX
))
11497 pci_disable_msix(tp
->pdev
);
11498 else if (tg3_flag(tp
, USING_MSI
))
11499 pci_disable_msi(tp
->pdev
);
11500 tg3_flag_clear(tp
, USING_MSI
);
11501 tg3_flag_clear(tp
, USING_MSIX
);
11502 tg3_flag_clear(tp
, ENABLE_RSS
);
11503 tg3_flag_clear(tp
, ENABLE_TSS
);
11506 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11509 struct net_device
*dev
= tp
->dev
;
11513 * Setup interrupts first so we know how
11514 * many NAPI resources to allocate
11518 tg3_rss_check_indir_tbl(tp
);
11520 /* The placement of this call is tied
11521 * to the setup and use of Host TX descriptors.
11523 err
= tg3_alloc_consistent(tp
);
11525 goto out_ints_fini
;
11529 tg3_napi_enable(tp
);
11531 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11532 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11533 err
= tg3_request_irq(tp
, i
);
11535 for (i
--; i
>= 0; i
--) {
11536 tnapi
= &tp
->napi
[i
];
11537 free_irq(tnapi
->irq_vec
, tnapi
);
11539 goto out_napi_fini
;
11543 tg3_full_lock(tp
, 0);
11546 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11548 err
= tg3_init_hw(tp
, reset_phy
);
11550 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11551 tg3_free_rings(tp
);
11554 tg3_full_unlock(tp
);
11559 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11560 err
= tg3_test_msi(tp
);
11563 tg3_full_lock(tp
, 0);
11564 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11565 tg3_free_rings(tp
);
11566 tg3_full_unlock(tp
);
11568 goto out_napi_fini
;
11571 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11572 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11574 tw32(PCIE_TRANSACTION_CFG
,
11575 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11581 tg3_hwmon_open(tp
);
11583 tg3_full_lock(tp
, 0);
11585 tg3_timer_start(tp
);
11586 tg3_flag_set(tp
, INIT_COMPLETE
);
11587 tg3_enable_ints(tp
);
11589 tg3_ptp_resume(tp
);
11591 tg3_full_unlock(tp
);
11593 netif_tx_start_all_queues(dev
);
11596 * Reset loopback feature if it was turned on while the device was down
11597 * make sure that it's installed properly now.
11599 if (dev
->features
& NETIF_F_LOOPBACK
)
11600 tg3_set_loopback(dev
, dev
->features
);
11605 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11606 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11607 free_irq(tnapi
->irq_vec
, tnapi
);
11611 tg3_napi_disable(tp
);
11613 tg3_free_consistent(tp
);
11621 static void tg3_stop(struct tg3
*tp
)
11625 tg3_reset_task_cancel(tp
);
11626 tg3_netif_stop(tp
);
11628 tg3_timer_stop(tp
);
11630 tg3_hwmon_close(tp
);
11634 tg3_full_lock(tp
, 1);
11636 tg3_disable_ints(tp
);
11638 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11639 tg3_free_rings(tp
);
11640 tg3_flag_clear(tp
, INIT_COMPLETE
);
11642 tg3_full_unlock(tp
);
11644 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11645 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11646 free_irq(tnapi
->irq_vec
, tnapi
);
11653 tg3_free_consistent(tp
);
11656 static int tg3_open(struct net_device
*dev
)
11658 struct tg3
*tp
= netdev_priv(dev
);
11661 if (tp
->pcierr_recovery
) {
11662 netdev_err(dev
, "Failed to open device. PCI error recovery "
11667 if (tp
->fw_needed
) {
11668 err
= tg3_request_firmware(tp
);
11669 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11671 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11672 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11673 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11674 netdev_warn(tp
->dev
, "EEE capability restored\n");
11675 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11677 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11681 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11682 tg3_flag_clear(tp
, TSO_CAPABLE
);
11683 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11684 netdev_notice(tp
->dev
, "TSO capability restored\n");
11685 tg3_flag_set(tp
, TSO_CAPABLE
);
11689 tg3_carrier_off(tp
);
11691 err
= tg3_power_up(tp
);
11695 tg3_full_lock(tp
, 0);
11697 tg3_disable_ints(tp
);
11698 tg3_flag_clear(tp
, INIT_COMPLETE
);
11700 tg3_full_unlock(tp
);
11702 err
= tg3_start(tp
,
11703 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11706 tg3_frob_aux_power(tp
, false);
11707 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11713 static int tg3_close(struct net_device
*dev
)
11715 struct tg3
*tp
= netdev_priv(dev
);
11717 if (tp
->pcierr_recovery
) {
11718 netdev_err(dev
, "Failed to close device. PCI error recovery "
11725 /* Clear stats across close / open calls */
11726 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11727 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11729 if (pci_device_is_present(tp
->pdev
)) {
11730 tg3_power_down_prepare(tp
);
11732 tg3_carrier_off(tp
);
11737 static inline u64
get_stat64(tg3_stat64_t
*val
)
11739 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11742 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11744 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11746 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11747 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11748 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11751 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11752 tg3_writephy(tp
, MII_TG3_TEST1
,
11753 val
| MII_TG3_TEST1_CRC_EN
);
11754 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11758 tp
->phy_crc_errors
+= val
;
11760 return tp
->phy_crc_errors
;
11763 return get_stat64(&hw_stats
->rx_fcs_errors
);
11766 #define ESTAT_ADD(member) \
11767 estats->member = old_estats->member + \
11768 get_stat64(&hw_stats->member)
11770 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11772 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11773 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11775 ESTAT_ADD(rx_octets
);
11776 ESTAT_ADD(rx_fragments
);
11777 ESTAT_ADD(rx_ucast_packets
);
11778 ESTAT_ADD(rx_mcast_packets
);
11779 ESTAT_ADD(rx_bcast_packets
);
11780 ESTAT_ADD(rx_fcs_errors
);
11781 ESTAT_ADD(rx_align_errors
);
11782 ESTAT_ADD(rx_xon_pause_rcvd
);
11783 ESTAT_ADD(rx_xoff_pause_rcvd
);
11784 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11785 ESTAT_ADD(rx_xoff_entered
);
11786 ESTAT_ADD(rx_frame_too_long_errors
);
11787 ESTAT_ADD(rx_jabbers
);
11788 ESTAT_ADD(rx_undersize_packets
);
11789 ESTAT_ADD(rx_in_length_errors
);
11790 ESTAT_ADD(rx_out_length_errors
);
11791 ESTAT_ADD(rx_64_or_less_octet_packets
);
11792 ESTAT_ADD(rx_65_to_127_octet_packets
);
11793 ESTAT_ADD(rx_128_to_255_octet_packets
);
11794 ESTAT_ADD(rx_256_to_511_octet_packets
);
11795 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11796 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11797 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11798 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11799 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11800 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11802 ESTAT_ADD(tx_octets
);
11803 ESTAT_ADD(tx_collisions
);
11804 ESTAT_ADD(tx_xon_sent
);
11805 ESTAT_ADD(tx_xoff_sent
);
11806 ESTAT_ADD(tx_flow_control
);
11807 ESTAT_ADD(tx_mac_errors
);
11808 ESTAT_ADD(tx_single_collisions
);
11809 ESTAT_ADD(tx_mult_collisions
);
11810 ESTAT_ADD(tx_deferred
);
11811 ESTAT_ADD(tx_excessive_collisions
);
11812 ESTAT_ADD(tx_late_collisions
);
11813 ESTAT_ADD(tx_collide_2times
);
11814 ESTAT_ADD(tx_collide_3times
);
11815 ESTAT_ADD(tx_collide_4times
);
11816 ESTAT_ADD(tx_collide_5times
);
11817 ESTAT_ADD(tx_collide_6times
);
11818 ESTAT_ADD(tx_collide_7times
);
11819 ESTAT_ADD(tx_collide_8times
);
11820 ESTAT_ADD(tx_collide_9times
);
11821 ESTAT_ADD(tx_collide_10times
);
11822 ESTAT_ADD(tx_collide_11times
);
11823 ESTAT_ADD(tx_collide_12times
);
11824 ESTAT_ADD(tx_collide_13times
);
11825 ESTAT_ADD(tx_collide_14times
);
11826 ESTAT_ADD(tx_collide_15times
);
11827 ESTAT_ADD(tx_ucast_packets
);
11828 ESTAT_ADD(tx_mcast_packets
);
11829 ESTAT_ADD(tx_bcast_packets
);
11830 ESTAT_ADD(tx_carrier_sense_errors
);
11831 ESTAT_ADD(tx_discards
);
11832 ESTAT_ADD(tx_errors
);
11834 ESTAT_ADD(dma_writeq_full
);
11835 ESTAT_ADD(dma_write_prioq_full
);
11836 ESTAT_ADD(rxbds_empty
);
11837 ESTAT_ADD(rx_discards
);
11838 ESTAT_ADD(rx_errors
);
11839 ESTAT_ADD(rx_threshold_hit
);
11841 ESTAT_ADD(dma_readq_full
);
11842 ESTAT_ADD(dma_read_prioq_full
);
11843 ESTAT_ADD(tx_comp_queue_full
);
11845 ESTAT_ADD(ring_set_send_prod_index
);
11846 ESTAT_ADD(ring_status_update
);
11847 ESTAT_ADD(nic_irqs
);
11848 ESTAT_ADD(nic_avoided_irqs
);
11849 ESTAT_ADD(nic_tx_threshold_hit
);
11851 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11854 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11856 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11857 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11859 stats
->rx_packets
= old_stats
->rx_packets
+
11860 get_stat64(&hw_stats
->rx_ucast_packets
) +
11861 get_stat64(&hw_stats
->rx_mcast_packets
) +
11862 get_stat64(&hw_stats
->rx_bcast_packets
);
11864 stats
->tx_packets
= old_stats
->tx_packets
+
11865 get_stat64(&hw_stats
->tx_ucast_packets
) +
11866 get_stat64(&hw_stats
->tx_mcast_packets
) +
11867 get_stat64(&hw_stats
->tx_bcast_packets
);
11869 stats
->rx_bytes
= old_stats
->rx_bytes
+
11870 get_stat64(&hw_stats
->rx_octets
);
11871 stats
->tx_bytes
= old_stats
->tx_bytes
+
11872 get_stat64(&hw_stats
->tx_octets
);
11874 stats
->rx_errors
= old_stats
->rx_errors
+
11875 get_stat64(&hw_stats
->rx_errors
);
11876 stats
->tx_errors
= old_stats
->tx_errors
+
11877 get_stat64(&hw_stats
->tx_errors
) +
11878 get_stat64(&hw_stats
->tx_mac_errors
) +
11879 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11880 get_stat64(&hw_stats
->tx_discards
);
11882 stats
->multicast
= old_stats
->multicast
+
11883 get_stat64(&hw_stats
->rx_mcast_packets
);
11884 stats
->collisions
= old_stats
->collisions
+
11885 get_stat64(&hw_stats
->tx_collisions
);
11887 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11888 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11889 get_stat64(&hw_stats
->rx_undersize_packets
);
11891 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11892 get_stat64(&hw_stats
->rx_align_errors
);
11893 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11894 get_stat64(&hw_stats
->tx_discards
);
11895 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11896 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11898 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11899 tg3_calc_crc_errors(tp
);
11901 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11902 get_stat64(&hw_stats
->rx_discards
);
11904 stats
->rx_dropped
= tp
->rx_dropped
;
11905 stats
->tx_dropped
= tp
->tx_dropped
;
11908 static int tg3_get_regs_len(struct net_device
*dev
)
11910 return TG3_REG_BLK_SIZE
;
11913 static void tg3_get_regs(struct net_device
*dev
,
11914 struct ethtool_regs
*regs
, void *_p
)
11916 struct tg3
*tp
= netdev_priv(dev
);
11920 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11922 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11925 tg3_full_lock(tp
, 0);
11927 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11929 tg3_full_unlock(tp
);
11932 static int tg3_get_eeprom_len(struct net_device
*dev
)
11934 struct tg3
*tp
= netdev_priv(dev
);
11936 return tp
->nvram_size
;
11939 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11941 struct tg3
*tp
= netdev_priv(dev
);
11942 int ret
, cpmu_restore
= 0;
11944 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
11947 if (tg3_flag(tp
, NO_NVRAM
))
11950 offset
= eeprom
->offset
;
11954 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11956 /* Override clock, link aware and link idle modes */
11957 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11958 cpmu_val
= tr32(TG3_CPMU_CTRL
);
11959 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
11960 CPMU_CTRL_LINK_IDLE_MODE
)) {
11961 tw32(TG3_CPMU_CTRL
, cpmu_val
&
11962 ~(CPMU_CTRL_LINK_AWARE_MODE
|
11963 CPMU_CTRL_LINK_IDLE_MODE
));
11967 tg3_override_clk(tp
);
11970 /* adjustments to start on required 4 byte boundary */
11971 b_offset
= offset
& 3;
11972 b_count
= 4 - b_offset
;
11973 if (b_count
> len
) {
11974 /* i.e. offset=1 len=2 */
11977 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11980 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11983 eeprom
->len
+= b_count
;
11986 /* read bytes up to the last 4 byte boundary */
11987 pd
= &data
[eeprom
->len
];
11988 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11989 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11996 memcpy(pd
+ i
, &val
, 4);
11997 if (need_resched()) {
11998 if (signal_pending(current
)) {
12009 /* read last bytes not ending on 4 byte boundary */
12010 pd
= &data
[eeprom
->len
];
12012 b_offset
= offset
+ len
- b_count
;
12013 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12016 memcpy(pd
, &val
, b_count
);
12017 eeprom
->len
+= b_count
;
12022 /* Restore clock, link aware and link idle modes */
12023 tg3_restore_clk(tp
);
12025 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12030 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12032 struct tg3
*tp
= netdev_priv(dev
);
12034 u32 offset
, len
, b_offset
, odd_len
;
12036 __be32 start
= 0, end
;
12038 if (tg3_flag(tp
, NO_NVRAM
) ||
12039 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12042 offset
= eeprom
->offset
;
12045 if ((b_offset
= (offset
& 3))) {
12046 /* adjustments to start on required 4 byte boundary */
12047 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12058 /* adjustments to end on required 4 byte boundary */
12060 len
= (len
+ 3) & ~3;
12061 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12067 if (b_offset
|| odd_len
) {
12068 buf
= kmalloc(len
, GFP_KERNEL
);
12072 memcpy(buf
, &start
, 4);
12074 memcpy(buf
+len
-4, &end
, 4);
12075 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12078 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12086 static int tg3_get_link_ksettings(struct net_device
*dev
,
12087 struct ethtool_link_ksettings
*cmd
)
12089 struct tg3
*tp
= netdev_priv(dev
);
12090 u32 supported
, advertising
;
12092 if (tg3_flag(tp
, USE_PHYLIB
)) {
12093 struct phy_device
*phydev
;
12094 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12096 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12097 return phy_ethtool_ksettings_get(phydev
, cmd
);
12100 supported
= (SUPPORTED_Autoneg
);
12102 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12103 supported
|= (SUPPORTED_1000baseT_Half
|
12104 SUPPORTED_1000baseT_Full
);
12106 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12107 supported
|= (SUPPORTED_100baseT_Half
|
12108 SUPPORTED_100baseT_Full
|
12109 SUPPORTED_10baseT_Half
|
12110 SUPPORTED_10baseT_Full
|
12112 cmd
->base
.port
= PORT_TP
;
12114 supported
|= SUPPORTED_FIBRE
;
12115 cmd
->base
.port
= PORT_FIBRE
;
12117 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12120 advertising
= tp
->link_config
.advertising
;
12121 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12122 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12123 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12124 advertising
|= ADVERTISED_Pause
;
12126 advertising
|= ADVERTISED_Pause
|
12127 ADVERTISED_Asym_Pause
;
12129 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12130 advertising
|= ADVERTISED_Asym_Pause
;
12133 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12136 if (netif_running(dev
) && tp
->link_up
) {
12137 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12138 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12139 ethtool_convert_legacy_u32_to_link_mode(
12140 cmd
->link_modes
.lp_advertising
,
12141 tp
->link_config
.rmt_adv
);
12143 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12144 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12145 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12147 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12150 cmd
->base
.speed
= SPEED_UNKNOWN
;
12151 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12152 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12154 cmd
->base
.phy_address
= tp
->phy_addr
;
12155 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12159 static int tg3_set_link_ksettings(struct net_device
*dev
,
12160 const struct ethtool_link_ksettings
*cmd
)
12162 struct tg3
*tp
= netdev_priv(dev
);
12163 u32 speed
= cmd
->base
.speed
;
12166 if (tg3_flag(tp
, USE_PHYLIB
)) {
12167 struct phy_device
*phydev
;
12168 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12170 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12171 return phy_ethtool_ksettings_set(phydev
, cmd
);
12174 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12175 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12178 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12179 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12180 cmd
->base
.duplex
!= DUPLEX_HALF
)
12183 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12184 cmd
->link_modes
.advertising
);
12186 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12187 u32 mask
= ADVERTISED_Autoneg
|
12189 ADVERTISED_Asym_Pause
;
12191 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12192 mask
|= ADVERTISED_1000baseT_Half
|
12193 ADVERTISED_1000baseT_Full
;
12195 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12196 mask
|= ADVERTISED_100baseT_Half
|
12197 ADVERTISED_100baseT_Full
|
12198 ADVERTISED_10baseT_Half
|
12199 ADVERTISED_10baseT_Full
|
12202 mask
|= ADVERTISED_FIBRE
;
12204 if (advertising
& ~mask
)
12207 mask
&= (ADVERTISED_1000baseT_Half
|
12208 ADVERTISED_1000baseT_Full
|
12209 ADVERTISED_100baseT_Half
|
12210 ADVERTISED_100baseT_Full
|
12211 ADVERTISED_10baseT_Half
|
12212 ADVERTISED_10baseT_Full
);
12214 advertising
&= mask
;
12216 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12217 if (speed
!= SPEED_1000
)
12220 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12223 if (speed
!= SPEED_100
&&
12229 tg3_full_lock(tp
, 0);
12231 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12232 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12233 tp
->link_config
.advertising
= (advertising
|
12234 ADVERTISED_Autoneg
);
12235 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12236 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12238 tp
->link_config
.advertising
= 0;
12239 tp
->link_config
.speed
= speed
;
12240 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12243 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12245 tg3_warn_mgmt_link_flap(tp
);
12247 if (netif_running(dev
))
12248 tg3_setup_phy(tp
, true);
12250 tg3_full_unlock(tp
);
12255 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12257 struct tg3
*tp
= netdev_priv(dev
);
12259 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12260 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12261 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12262 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12265 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12267 struct tg3
*tp
= netdev_priv(dev
);
12269 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12270 wol
->supported
= WAKE_MAGIC
;
12272 wol
->supported
= 0;
12274 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12275 wol
->wolopts
= WAKE_MAGIC
;
12276 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12279 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12281 struct tg3
*tp
= netdev_priv(dev
);
12282 struct device
*dp
= &tp
->pdev
->dev
;
12284 if (wol
->wolopts
& ~WAKE_MAGIC
)
12286 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12287 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12290 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12292 if (device_may_wakeup(dp
))
12293 tg3_flag_set(tp
, WOL_ENABLE
);
12295 tg3_flag_clear(tp
, WOL_ENABLE
);
12300 static u32
tg3_get_msglevel(struct net_device
*dev
)
12302 struct tg3
*tp
= netdev_priv(dev
);
12303 return tp
->msg_enable
;
12306 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12308 struct tg3
*tp
= netdev_priv(dev
);
12309 tp
->msg_enable
= value
;
12312 static int tg3_nway_reset(struct net_device
*dev
)
12314 struct tg3
*tp
= netdev_priv(dev
);
12317 if (!netif_running(dev
))
12320 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12323 tg3_warn_mgmt_link_flap(tp
);
12325 if (tg3_flag(tp
, USE_PHYLIB
)) {
12326 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12328 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12332 spin_lock_bh(&tp
->lock
);
12334 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12335 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12336 ((bmcr
& BMCR_ANENABLE
) ||
12337 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12338 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12342 spin_unlock_bh(&tp
->lock
);
12348 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12350 struct tg3
*tp
= netdev_priv(dev
);
12352 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12353 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12354 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12356 ering
->rx_jumbo_max_pending
= 0;
12358 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12360 ering
->rx_pending
= tp
->rx_pending
;
12361 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12362 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12364 ering
->rx_jumbo_pending
= 0;
12366 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12369 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12371 struct tg3
*tp
= netdev_priv(dev
);
12372 int i
, irq_sync
= 0, err
= 0;
12374 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12375 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12376 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12377 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12378 (tg3_flag(tp
, TSO_BUG
) &&
12379 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12382 if (netif_running(dev
)) {
12384 tg3_netif_stop(tp
);
12388 tg3_full_lock(tp
, irq_sync
);
12390 tp
->rx_pending
= ering
->rx_pending
;
12392 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12393 tp
->rx_pending
> 63)
12394 tp
->rx_pending
= 63;
12396 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12397 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12399 for (i
= 0; i
< tp
->irq_max
; i
++)
12400 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12402 if (netif_running(dev
)) {
12403 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12404 err
= tg3_restart_hw(tp
, false);
12406 tg3_netif_start(tp
);
12409 tg3_full_unlock(tp
);
12411 if (irq_sync
&& !err
)
12417 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12419 struct tg3
*tp
= netdev_priv(dev
);
12421 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12423 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12424 epause
->rx_pause
= 1;
12426 epause
->rx_pause
= 0;
12428 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12429 epause
->tx_pause
= 1;
12431 epause
->tx_pause
= 0;
12434 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12436 struct tg3
*tp
= netdev_priv(dev
);
12439 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12440 tg3_warn_mgmt_link_flap(tp
);
12442 if (tg3_flag(tp
, USE_PHYLIB
)) {
12444 struct phy_device
*phydev
;
12446 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12448 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12449 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12450 (epause
->rx_pause
!= epause
->tx_pause
)))
12453 tp
->link_config
.flowctrl
= 0;
12454 if (epause
->rx_pause
) {
12455 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12457 if (epause
->tx_pause
) {
12458 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12459 newadv
= ADVERTISED_Pause
;
12461 newadv
= ADVERTISED_Pause
|
12462 ADVERTISED_Asym_Pause
;
12463 } else if (epause
->tx_pause
) {
12464 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12465 newadv
= ADVERTISED_Asym_Pause
;
12469 if (epause
->autoneg
)
12470 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12472 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12474 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12475 u32 oldadv
= phydev
->advertising
&
12476 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12477 if (oldadv
!= newadv
) {
12478 phydev
->advertising
&=
12479 ~(ADVERTISED_Pause
|
12480 ADVERTISED_Asym_Pause
);
12481 phydev
->advertising
|= newadv
;
12482 if (phydev
->autoneg
) {
12484 * Always renegotiate the link to
12485 * inform our link partner of our
12486 * flow control settings, even if the
12487 * flow control is forced. Let
12488 * tg3_adjust_link() do the final
12489 * flow control setup.
12491 return phy_start_aneg(phydev
);
12495 if (!epause
->autoneg
)
12496 tg3_setup_flow_control(tp
, 0, 0);
12498 tp
->link_config
.advertising
&=
12499 ~(ADVERTISED_Pause
|
12500 ADVERTISED_Asym_Pause
);
12501 tp
->link_config
.advertising
|= newadv
;
12506 if (netif_running(dev
)) {
12507 tg3_netif_stop(tp
);
12511 tg3_full_lock(tp
, irq_sync
);
12513 if (epause
->autoneg
)
12514 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12516 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12517 if (epause
->rx_pause
)
12518 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12520 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12521 if (epause
->tx_pause
)
12522 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12524 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12526 if (netif_running(dev
)) {
12527 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12528 err
= tg3_restart_hw(tp
, false);
12530 tg3_netif_start(tp
);
12533 tg3_full_unlock(tp
);
12536 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12541 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12545 return TG3_NUM_TEST
;
12547 return TG3_NUM_STATS
;
12549 return -EOPNOTSUPP
;
12553 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12554 u32
*rules __always_unused
)
12556 struct tg3
*tp
= netdev_priv(dev
);
12558 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12559 return -EOPNOTSUPP
;
12561 switch (info
->cmd
) {
12562 case ETHTOOL_GRXRINGS
:
12563 if (netif_running(tp
->dev
))
12564 info
->data
= tp
->rxq_cnt
;
12566 info
->data
= num_online_cpus();
12567 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12568 info
->data
= TG3_RSS_MAX_NUM_QS
;
12574 return -EOPNOTSUPP
;
12578 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12581 struct tg3
*tp
= netdev_priv(dev
);
12583 if (tg3_flag(tp
, SUPPORT_MSIX
))
12584 size
= TG3_RSS_INDIR_TBL_SIZE
;
12589 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12591 struct tg3
*tp
= netdev_priv(dev
);
12595 *hfunc
= ETH_RSS_HASH_TOP
;
12599 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12600 indir
[i
] = tp
->rss_ind_tbl
[i
];
12605 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12608 struct tg3
*tp
= netdev_priv(dev
);
12611 /* We require at least one supported parameter to be changed and no
12612 * change in any of the unsupported parameters
12615 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12616 return -EOPNOTSUPP
;
12621 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12622 tp
->rss_ind_tbl
[i
] = indir
[i
];
12624 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12627 /* It is legal to write the indirection
12628 * table while the device is running.
12630 tg3_full_lock(tp
, 0);
12631 tg3_rss_write_indir_tbl(tp
);
12632 tg3_full_unlock(tp
);
12637 static void tg3_get_channels(struct net_device
*dev
,
12638 struct ethtool_channels
*channel
)
12640 struct tg3
*tp
= netdev_priv(dev
);
12641 u32 deflt_qs
= netif_get_num_default_rss_queues();
12643 channel
->max_rx
= tp
->rxq_max
;
12644 channel
->max_tx
= tp
->txq_max
;
12646 if (netif_running(dev
)) {
12647 channel
->rx_count
= tp
->rxq_cnt
;
12648 channel
->tx_count
= tp
->txq_cnt
;
12651 channel
->rx_count
= tp
->rxq_req
;
12653 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12656 channel
->tx_count
= tp
->txq_req
;
12658 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12662 static int tg3_set_channels(struct net_device
*dev
,
12663 struct ethtool_channels
*channel
)
12665 struct tg3
*tp
= netdev_priv(dev
);
12667 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12668 return -EOPNOTSUPP
;
12670 if (channel
->rx_count
> tp
->rxq_max
||
12671 channel
->tx_count
> tp
->txq_max
)
12674 tp
->rxq_req
= channel
->rx_count
;
12675 tp
->txq_req
= channel
->tx_count
;
12677 if (!netif_running(dev
))
12682 tg3_carrier_off(tp
);
12684 tg3_start(tp
, true, false, false);
12689 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12691 switch (stringset
) {
12693 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12696 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12699 WARN_ON(1); /* we need a WARN() */
12704 static int tg3_set_phys_id(struct net_device
*dev
,
12705 enum ethtool_phys_id_state state
)
12707 struct tg3
*tp
= netdev_priv(dev
);
12709 if (!netif_running(tp
->dev
))
12713 case ETHTOOL_ID_ACTIVE
:
12714 return 1; /* cycle on/off once per second */
12716 case ETHTOOL_ID_ON
:
12717 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12718 LED_CTRL_1000MBPS_ON
|
12719 LED_CTRL_100MBPS_ON
|
12720 LED_CTRL_10MBPS_ON
|
12721 LED_CTRL_TRAFFIC_OVERRIDE
|
12722 LED_CTRL_TRAFFIC_BLINK
|
12723 LED_CTRL_TRAFFIC_LED
);
12726 case ETHTOOL_ID_OFF
:
12727 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12728 LED_CTRL_TRAFFIC_OVERRIDE
);
12731 case ETHTOOL_ID_INACTIVE
:
12732 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12739 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12740 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12742 struct tg3
*tp
= netdev_priv(dev
);
12745 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12747 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12750 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12754 u32 offset
= 0, len
= 0;
12757 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12760 if (magic
== TG3_EEPROM_MAGIC
) {
12761 for (offset
= TG3_NVM_DIR_START
;
12762 offset
< TG3_NVM_DIR_END
;
12763 offset
+= TG3_NVM_DIRENT_SIZE
) {
12764 if (tg3_nvram_read(tp
, offset
, &val
))
12767 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12768 TG3_NVM_DIRTYPE_EXTVPD
)
12772 if (offset
!= TG3_NVM_DIR_END
) {
12773 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12774 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12777 offset
= tg3_nvram_logical_addr(tp
, offset
);
12781 if (!offset
|| !len
) {
12782 offset
= TG3_NVM_VPD_OFF
;
12783 len
= TG3_NVM_VPD_LEN
;
12786 buf
= kmalloc(len
, GFP_KERNEL
);
12790 if (magic
== TG3_EEPROM_MAGIC
) {
12791 for (i
= 0; i
< len
; i
+= 4) {
12792 /* The data is in little-endian format in NVRAM.
12793 * Use the big-endian read routines to preserve
12794 * the byte order as it exists in NVRAM.
12796 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12802 unsigned int pos
= 0;
12804 ptr
= (u8
*)&buf
[0];
12805 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12806 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12808 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12826 #define NVRAM_TEST_SIZE 0x100
12827 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12828 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12829 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12830 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12831 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12832 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12833 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12834 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12836 static int tg3_test_nvram(struct tg3
*tp
)
12838 u32 csum
, magic
, len
;
12840 int i
, j
, k
, err
= 0, size
;
12842 if (tg3_flag(tp
, NO_NVRAM
))
12845 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12848 if (magic
== TG3_EEPROM_MAGIC
)
12849 size
= NVRAM_TEST_SIZE
;
12850 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12851 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12852 TG3_EEPROM_SB_FORMAT_1
) {
12853 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12854 case TG3_EEPROM_SB_REVISION_0
:
12855 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12857 case TG3_EEPROM_SB_REVISION_2
:
12858 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12860 case TG3_EEPROM_SB_REVISION_3
:
12861 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12863 case TG3_EEPROM_SB_REVISION_4
:
12864 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12866 case TG3_EEPROM_SB_REVISION_5
:
12867 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12869 case TG3_EEPROM_SB_REVISION_6
:
12870 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12877 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12878 size
= NVRAM_SELFBOOT_HW_SIZE
;
12882 buf
= kmalloc(size
, GFP_KERNEL
);
12887 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12888 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12895 /* Selfboot format */
12896 magic
= be32_to_cpu(buf
[0]);
12897 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12898 TG3_EEPROM_MAGIC_FW
) {
12899 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12901 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12902 TG3_EEPROM_SB_REVISION_2
) {
12903 /* For rev 2, the csum doesn't include the MBA. */
12904 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12906 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12909 for (i
= 0; i
< size
; i
++)
12922 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12923 TG3_EEPROM_MAGIC_HW
) {
12924 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12925 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12926 u8
*buf8
= (u8
*) buf
;
12928 /* Separate the parity bits and the data bytes. */
12929 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12930 if ((i
== 0) || (i
== 8)) {
12934 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12935 parity
[k
++] = buf8
[i
] & msk
;
12937 } else if (i
== 16) {
12941 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12942 parity
[k
++] = buf8
[i
] & msk
;
12945 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12946 parity
[k
++] = buf8
[i
] & msk
;
12949 data
[j
++] = buf8
[i
];
12953 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12954 u8 hw8
= hweight8(data
[i
]);
12956 if ((hw8
& 0x1) && parity
[i
])
12958 else if (!(hw8
& 0x1) && !parity
[i
])
12967 /* Bootstrap checksum at offset 0x10 */
12968 csum
= calc_crc((unsigned char *) buf
, 0x10);
12969 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12972 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12973 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12974 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12979 buf
= tg3_vpd_readblock(tp
, &len
);
12983 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12985 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12989 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12992 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12993 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12994 PCI_VPD_RO_KEYWORD_CHKSUM
);
12998 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13000 for (i
= 0; i
<= j
; i
++)
13001 csum8
+= ((u8
*)buf
)[i
];
13015 #define TG3_SERDES_TIMEOUT_SEC 2
13016 #define TG3_COPPER_TIMEOUT_SEC 6
13018 static int tg3_test_link(struct tg3
*tp
)
13022 if (!netif_running(tp
->dev
))
13025 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13026 max
= TG3_SERDES_TIMEOUT_SEC
;
13028 max
= TG3_COPPER_TIMEOUT_SEC
;
13030 for (i
= 0; i
< max
; i
++) {
13034 if (msleep_interruptible(1000))
13041 /* Only test the commonly used registers */
13042 static int tg3_test_registers(struct tg3
*tp
)
13044 int i
, is_5705
, is_5750
;
13045 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13049 #define TG3_FL_5705 0x1
13050 #define TG3_FL_NOT_5705 0x2
13051 #define TG3_FL_NOT_5788 0x4
13052 #define TG3_FL_NOT_5750 0x8
13056 /* MAC Control Registers */
13057 { MAC_MODE
, TG3_FL_NOT_5705
,
13058 0x00000000, 0x00ef6f8c },
13059 { MAC_MODE
, TG3_FL_5705
,
13060 0x00000000, 0x01ef6b8c },
13061 { MAC_STATUS
, TG3_FL_NOT_5705
,
13062 0x03800107, 0x00000000 },
13063 { MAC_STATUS
, TG3_FL_5705
,
13064 0x03800100, 0x00000000 },
13065 { MAC_ADDR_0_HIGH
, 0x0000,
13066 0x00000000, 0x0000ffff },
13067 { MAC_ADDR_0_LOW
, 0x0000,
13068 0x00000000, 0xffffffff },
13069 { MAC_RX_MTU_SIZE
, 0x0000,
13070 0x00000000, 0x0000ffff },
13071 { MAC_TX_MODE
, 0x0000,
13072 0x00000000, 0x00000070 },
13073 { MAC_TX_LENGTHS
, 0x0000,
13074 0x00000000, 0x00003fff },
13075 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13076 0x00000000, 0x000007fc },
13077 { MAC_RX_MODE
, TG3_FL_5705
,
13078 0x00000000, 0x000007dc },
13079 { MAC_HASH_REG_0
, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_1
, 0x0000,
13082 0x00000000, 0xffffffff },
13083 { MAC_HASH_REG_2
, 0x0000,
13084 0x00000000, 0xffffffff },
13085 { MAC_HASH_REG_3
, 0x0000,
13086 0x00000000, 0xffffffff },
13088 /* Receive Data and Receive BD Initiator Control Registers. */
13089 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13092 0x00000000, 0xffffffff },
13093 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13094 0x00000000, 0x00000003 },
13095 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD
+0, 0x0000,
13098 0x00000000, 0xffffffff },
13099 { RCVDBDI_STD_BD
+4, 0x0000,
13100 0x00000000, 0xffffffff },
13101 { RCVDBDI_STD_BD
+8, 0x0000,
13102 0x00000000, 0xffff0002 },
13103 { RCVDBDI_STD_BD
+0xc, 0x0000,
13104 0x00000000, 0xffffffff },
13106 /* Receive BD Initiator Control Registers. */
13107 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13108 0x00000000, 0xffffffff },
13109 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13110 0x00000000, 0x000003ff },
13111 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13112 0x00000000, 0xffffffff },
13114 /* Host Coalescing Control Registers. */
13115 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13116 0x00000000, 0x00000004 },
13117 { HOSTCC_MODE
, TG3_FL_5705
,
13118 0x00000000, 0x000000f6 },
13119 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13122 0x00000000, 0x000003ff },
13123 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13126 0x00000000, 0x000003ff },
13127 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13130 0x00000000, 0x000000ff },
13131 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13142 0x00000000, 0x000000ff },
13143 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13146 0x00000000, 0x000000ff },
13147 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13156 0x00000000, 0xffffffff },
13157 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13158 0xffffffff, 0x00000000 },
13159 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13160 0xffffffff, 0x00000000 },
13162 /* Buffer Manager Control Registers. */
13163 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13164 0x00000000, 0x007fff80 },
13165 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13166 0x00000000, 0x007fffff },
13167 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13168 0x00000000, 0x0000003f },
13169 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13170 0x00000000, 0x000001ff },
13171 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13172 0x00000000, 0x000001ff },
13173 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13174 0xffffffff, 0x00000000 },
13175 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13176 0xffffffff, 0x00000000 },
13178 /* Mailbox Registers */
13179 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13180 0x00000000, 0x000001ff },
13181 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13182 0x00000000, 0x000001ff },
13183 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13184 0x00000000, 0x000007ff },
13185 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13186 0x00000000, 0x000001ff },
13188 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13191 is_5705
= is_5750
= 0;
13192 if (tg3_flag(tp
, 5705_PLUS
)) {
13194 if (tg3_flag(tp
, 5750_PLUS
))
13198 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13199 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13202 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13205 if (tg3_flag(tp
, IS_5788
) &&
13206 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13209 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13212 offset
= (u32
) reg_tbl
[i
].offset
;
13213 read_mask
= reg_tbl
[i
].read_mask
;
13214 write_mask
= reg_tbl
[i
].write_mask
;
13216 /* Save the original register content */
13217 save_val
= tr32(offset
);
13219 /* Determine the read-only value. */
13220 read_val
= save_val
& read_mask
;
13222 /* Write zero to the register, then make sure the read-only bits
13223 * are not changed and the read/write bits are all zeros.
13227 val
= tr32(offset
);
13229 /* Test the read-only and read/write bits. */
13230 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13233 /* Write ones to all the bits defined by RdMask and WrMask, then
13234 * make sure the read-only bits are not changed and the
13235 * read/write bits are all ones.
13237 tw32(offset
, read_mask
| write_mask
);
13239 val
= tr32(offset
);
13241 /* Test the read-only bits. */
13242 if ((val
& read_mask
) != read_val
)
13245 /* Test the read/write bits. */
13246 if ((val
& write_mask
) != write_mask
)
13249 tw32(offset
, save_val
);
13255 if (netif_msg_hw(tp
))
13256 netdev_err(tp
->dev
,
13257 "Register test failed at offset %x\n", offset
);
13258 tw32(offset
, save_val
);
13262 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13264 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13268 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13269 for (j
= 0; j
< len
; j
+= 4) {
13272 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13273 tg3_read_mem(tp
, offset
+ j
, &val
);
13274 if (val
!= test_pattern
[i
])
13281 static int tg3_test_memory(struct tg3
*tp
)
13283 static struct mem_entry
{
13286 } mem_tbl_570x
[] = {
13287 { 0x00000000, 0x00b50},
13288 { 0x00002000, 0x1c000},
13289 { 0xffffffff, 0x00000}
13290 }, mem_tbl_5705
[] = {
13291 { 0x00000100, 0x0000c},
13292 { 0x00000200, 0x00008},
13293 { 0x00004000, 0x00800},
13294 { 0x00006000, 0x01000},
13295 { 0x00008000, 0x02000},
13296 { 0x00010000, 0x0e000},
13297 { 0xffffffff, 0x00000}
13298 }, mem_tbl_5755
[] = {
13299 { 0x00000200, 0x00008},
13300 { 0x00004000, 0x00800},
13301 { 0x00006000, 0x00800},
13302 { 0x00008000, 0x02000},
13303 { 0x00010000, 0x0c000},
13304 { 0xffffffff, 0x00000}
13305 }, mem_tbl_5906
[] = {
13306 { 0x00000200, 0x00008},
13307 { 0x00004000, 0x00400},
13308 { 0x00006000, 0x00400},
13309 { 0x00008000, 0x01000},
13310 { 0x00010000, 0x01000},
13311 { 0xffffffff, 0x00000}
13312 }, mem_tbl_5717
[] = {
13313 { 0x00000200, 0x00008},
13314 { 0x00010000, 0x0a000},
13315 { 0x00020000, 0x13c00},
13316 { 0xffffffff, 0x00000}
13317 }, mem_tbl_57765
[] = {
13318 { 0x00000200, 0x00008},
13319 { 0x00004000, 0x00800},
13320 { 0x00006000, 0x09800},
13321 { 0x00010000, 0x0a000},
13322 { 0xffffffff, 0x00000}
13324 struct mem_entry
*mem_tbl
;
13328 if (tg3_flag(tp
, 5717_PLUS
))
13329 mem_tbl
= mem_tbl_5717
;
13330 else if (tg3_flag(tp
, 57765_CLASS
) ||
13331 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13332 mem_tbl
= mem_tbl_57765
;
13333 else if (tg3_flag(tp
, 5755_PLUS
))
13334 mem_tbl
= mem_tbl_5755
;
13335 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13336 mem_tbl
= mem_tbl_5906
;
13337 else if (tg3_flag(tp
, 5705_PLUS
))
13338 mem_tbl
= mem_tbl_5705
;
13340 mem_tbl
= mem_tbl_570x
;
13342 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13343 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13351 #define TG3_TSO_MSS 500
13353 #define TG3_TSO_IP_HDR_LEN 20
13354 #define TG3_TSO_TCP_HDR_LEN 20
13355 #define TG3_TSO_TCP_OPT_LEN 12
13357 static const u8 tg3_tso_header
[] = {
13359 0x45, 0x00, 0x00, 0x00,
13360 0x00, 0x00, 0x40, 0x00,
13361 0x40, 0x06, 0x00, 0x00,
13362 0x0a, 0x00, 0x00, 0x01,
13363 0x0a, 0x00, 0x00, 0x02,
13364 0x0d, 0x00, 0xe0, 0x00,
13365 0x00, 0x00, 0x01, 0x00,
13366 0x00, 0x00, 0x02, 0x00,
13367 0x80, 0x10, 0x10, 0x00,
13368 0x14, 0x09, 0x00, 0x00,
13369 0x01, 0x01, 0x08, 0x0a,
13370 0x11, 0x11, 0x11, 0x11,
13371 0x11, 0x11, 0x11, 0x11,
13374 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13376 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13377 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13379 struct sk_buff
*skb
;
13380 u8
*tx_data
, *rx_data
;
13382 int num_pkts
, tx_len
, rx_len
, i
, err
;
13383 struct tg3_rx_buffer_desc
*desc
;
13384 struct tg3_napi
*tnapi
, *rnapi
;
13385 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13387 tnapi
= &tp
->napi
[0];
13388 rnapi
= &tp
->napi
[0];
13389 if (tp
->irq_cnt
> 1) {
13390 if (tg3_flag(tp
, ENABLE_RSS
))
13391 rnapi
= &tp
->napi
[1];
13392 if (tg3_flag(tp
, ENABLE_TSS
))
13393 tnapi
= &tp
->napi
[1];
13395 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13400 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13404 tx_data
= skb_put(skb
, tx_len
);
13405 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13406 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13408 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13410 if (tso_loopback
) {
13411 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13413 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13414 TG3_TSO_TCP_OPT_LEN
;
13416 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13417 sizeof(tg3_tso_header
));
13420 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13421 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13423 /* Set the total length field in the IP header */
13424 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13426 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13427 TXD_FLAG_CPU_POST_DMA
);
13429 if (tg3_flag(tp
, HW_TSO_1
) ||
13430 tg3_flag(tp
, HW_TSO_2
) ||
13431 tg3_flag(tp
, HW_TSO_3
)) {
13433 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13434 th
= (struct tcphdr
*)&tx_data
[val
];
13437 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13439 if (tg3_flag(tp
, HW_TSO_3
)) {
13440 mss
|= (hdr_len
& 0xc) << 12;
13441 if (hdr_len
& 0x10)
13442 base_flags
|= 0x00000010;
13443 base_flags
|= (hdr_len
& 0x3e0) << 5;
13444 } else if (tg3_flag(tp
, HW_TSO_2
))
13445 mss
|= hdr_len
<< 9;
13446 else if (tg3_flag(tp
, HW_TSO_1
) ||
13447 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13448 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13450 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13453 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13456 data_off
= ETH_HLEN
;
13458 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13459 tx_len
> VLAN_ETH_FRAME_LEN
)
13460 base_flags
|= TXD_FLAG_JMB_PKT
;
13463 for (i
= data_off
; i
< tx_len
; i
++)
13464 tx_data
[i
] = (u8
) (i
& 0xff);
13466 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13467 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13468 dev_kfree_skb(skb
);
13472 val
= tnapi
->tx_prod
;
13473 tnapi
->tx_buffers
[val
].skb
= skb
;
13474 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13476 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13481 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13483 budget
= tg3_tx_avail(tnapi
);
13484 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13485 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13486 tnapi
->tx_buffers
[val
].skb
= NULL
;
13487 dev_kfree_skb(skb
);
13493 /* Sync BD data before updating mailbox */
13496 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13497 tr32_mailbox(tnapi
->prodmbox
);
13501 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13502 for (i
= 0; i
< 35; i
++) {
13503 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13508 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13509 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13510 if ((tx_idx
== tnapi
->tx_prod
) &&
13511 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13515 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13516 dev_kfree_skb(skb
);
13518 if (tx_idx
!= tnapi
->tx_prod
)
13521 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13525 while (rx_idx
!= rx_start_idx
) {
13526 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13527 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13528 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13530 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13531 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13534 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13537 if (!tso_loopback
) {
13538 if (rx_len
!= tx_len
)
13541 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13542 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13545 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13548 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13549 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13550 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13554 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13555 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13556 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13558 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13559 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13560 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13565 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13566 PCI_DMA_FROMDEVICE
);
13568 rx_data
+= TG3_RX_OFFSET(tp
);
13569 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13570 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13577 /* tg3_free_rings will unmap and free the rx_data */
13582 #define TG3_STD_LOOPBACK_FAILED 1
13583 #define TG3_JMB_LOOPBACK_FAILED 2
13584 #define TG3_TSO_LOOPBACK_FAILED 4
13585 #define TG3_LOOPBACK_FAILED \
13586 (TG3_STD_LOOPBACK_FAILED | \
13587 TG3_JMB_LOOPBACK_FAILED | \
13588 TG3_TSO_LOOPBACK_FAILED)
13590 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13594 u32 jmb_pkt_sz
= 9000;
13597 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13599 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13600 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13602 if (!netif_running(tp
->dev
)) {
13603 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13604 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13606 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13610 err
= tg3_reset_hw(tp
, true);
13612 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13613 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13615 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13619 if (tg3_flag(tp
, ENABLE_RSS
)) {
13622 /* Reroute all rx packets to the 1st queue */
13623 for (i
= MAC_RSS_INDIR_TBL_0
;
13624 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13628 /* HW errata - mac loopback fails in some cases on 5780.
13629 * Normal traffic and PHY loopback are not affected by
13630 * errata. Also, the MAC loopback test is deprecated for
13631 * all newer ASIC revisions.
13633 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13634 !tg3_flag(tp
, CPMU_PRESENT
)) {
13635 tg3_mac_loopback(tp
, true);
13637 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13638 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13640 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13641 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13642 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13644 tg3_mac_loopback(tp
, false);
13647 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13648 !tg3_flag(tp
, USE_PHYLIB
)) {
13651 tg3_phy_lpbk_set(tp
, 0, false);
13653 /* Wait for link */
13654 for (i
= 0; i
< 100; i
++) {
13655 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13660 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13661 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13662 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13663 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13664 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13665 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13666 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13667 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13670 tg3_phy_lpbk_set(tp
, 0, true);
13672 /* All link indications report up, but the hardware
13673 * isn't really ready for about 20 msec. Double it
13678 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13679 data
[TG3_EXT_LOOPB_TEST
] |=
13680 TG3_STD_LOOPBACK_FAILED
;
13681 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13682 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13683 data
[TG3_EXT_LOOPB_TEST
] |=
13684 TG3_TSO_LOOPBACK_FAILED
;
13685 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13686 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13687 data
[TG3_EXT_LOOPB_TEST
] |=
13688 TG3_JMB_LOOPBACK_FAILED
;
13691 /* Re-enable gphy autopowerdown. */
13692 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13693 tg3_phy_toggle_apd(tp
, true);
13696 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13697 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13700 tp
->phy_flags
|= eee_cap
;
13705 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13708 struct tg3
*tp
= netdev_priv(dev
);
13709 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13711 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13712 if (tg3_power_up(tp
)) {
13713 etest
->flags
|= ETH_TEST_FL_FAILED
;
13714 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13717 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13720 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13722 if (tg3_test_nvram(tp
) != 0) {
13723 etest
->flags
|= ETH_TEST_FL_FAILED
;
13724 data
[TG3_NVRAM_TEST
] = 1;
13726 if (!doextlpbk
&& tg3_test_link(tp
)) {
13727 etest
->flags
|= ETH_TEST_FL_FAILED
;
13728 data
[TG3_LINK_TEST
] = 1;
13730 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13731 int err
, err2
= 0, irq_sync
= 0;
13733 if (netif_running(dev
)) {
13735 tg3_netif_stop(tp
);
13739 tg3_full_lock(tp
, irq_sync
);
13740 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13741 err
= tg3_nvram_lock(tp
);
13742 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13743 if (!tg3_flag(tp
, 5705_PLUS
))
13744 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13746 tg3_nvram_unlock(tp
);
13748 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13751 if (tg3_test_registers(tp
) != 0) {
13752 etest
->flags
|= ETH_TEST_FL_FAILED
;
13753 data
[TG3_REGISTER_TEST
] = 1;
13756 if (tg3_test_memory(tp
) != 0) {
13757 etest
->flags
|= ETH_TEST_FL_FAILED
;
13758 data
[TG3_MEMORY_TEST
] = 1;
13762 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13764 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13765 etest
->flags
|= ETH_TEST_FL_FAILED
;
13767 tg3_full_unlock(tp
);
13769 if (tg3_test_interrupt(tp
) != 0) {
13770 etest
->flags
|= ETH_TEST_FL_FAILED
;
13771 data
[TG3_INTERRUPT_TEST
] = 1;
13774 tg3_full_lock(tp
, 0);
13776 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13777 if (netif_running(dev
)) {
13778 tg3_flag_set(tp
, INIT_COMPLETE
);
13779 err2
= tg3_restart_hw(tp
, true);
13781 tg3_netif_start(tp
);
13784 tg3_full_unlock(tp
);
13786 if (irq_sync
&& !err2
)
13789 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13790 tg3_power_down_prepare(tp
);
13794 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13796 struct tg3
*tp
= netdev_priv(dev
);
13797 struct hwtstamp_config stmpconf
;
13799 if (!tg3_flag(tp
, PTP_CAPABLE
))
13800 return -EOPNOTSUPP
;
13802 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13805 if (stmpconf
.flags
)
13808 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13809 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13812 switch (stmpconf
.rx_filter
) {
13813 case HWTSTAMP_FILTER_NONE
:
13816 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13817 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13818 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13820 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13821 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13822 TG3_RX_PTP_CTL_SYNC_EVNT
;
13824 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13825 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13826 TG3_RX_PTP_CTL_DELAY_REQ
;
13828 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13829 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13830 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13832 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13833 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13834 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13836 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13837 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13838 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13840 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13841 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13842 TG3_RX_PTP_CTL_SYNC_EVNT
;
13844 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13845 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13846 TG3_RX_PTP_CTL_SYNC_EVNT
;
13848 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13849 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13850 TG3_RX_PTP_CTL_SYNC_EVNT
;
13852 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13853 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13854 TG3_RX_PTP_CTL_DELAY_REQ
;
13856 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13857 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13858 TG3_RX_PTP_CTL_DELAY_REQ
;
13860 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13861 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13862 TG3_RX_PTP_CTL_DELAY_REQ
;
13868 if (netif_running(dev
) && tp
->rxptpctl
)
13869 tw32(TG3_RX_PTP_CTL
,
13870 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13872 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13873 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13875 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13877 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13881 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13883 struct tg3
*tp
= netdev_priv(dev
);
13884 struct hwtstamp_config stmpconf
;
13886 if (!tg3_flag(tp
, PTP_CAPABLE
))
13887 return -EOPNOTSUPP
;
13889 stmpconf
.flags
= 0;
13890 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13891 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13893 switch (tp
->rxptpctl
) {
13895 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13897 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13898 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13900 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13901 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13903 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13904 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13906 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13907 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13909 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13910 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13912 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13913 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13915 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13916 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13918 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13919 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13921 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13922 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13924 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13925 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13927 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13928 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13930 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13931 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13938 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13942 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13944 struct mii_ioctl_data
*data
= if_mii(ifr
);
13945 struct tg3
*tp
= netdev_priv(dev
);
13948 if (tg3_flag(tp
, USE_PHYLIB
)) {
13949 struct phy_device
*phydev
;
13950 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13952 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
13953 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13958 data
->phy_id
= tp
->phy_addr
;
13961 case SIOCGMIIREG
: {
13964 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13965 break; /* We have no PHY */
13967 if (!netif_running(dev
))
13970 spin_lock_bh(&tp
->lock
);
13971 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13972 data
->reg_num
& 0x1f, &mii_regval
);
13973 spin_unlock_bh(&tp
->lock
);
13975 data
->val_out
= mii_regval
;
13981 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13982 break; /* We have no PHY */
13984 if (!netif_running(dev
))
13987 spin_lock_bh(&tp
->lock
);
13988 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13989 data
->reg_num
& 0x1f, data
->val_in
);
13990 spin_unlock_bh(&tp
->lock
);
13994 case SIOCSHWTSTAMP
:
13995 return tg3_hwtstamp_set(dev
, ifr
);
13997 case SIOCGHWTSTAMP
:
13998 return tg3_hwtstamp_get(dev
, ifr
);
14004 return -EOPNOTSUPP
;
14007 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14009 struct tg3
*tp
= netdev_priv(dev
);
14011 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14015 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14017 struct tg3
*tp
= netdev_priv(dev
);
14018 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14019 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14021 if (!tg3_flag(tp
, 5705_PLUS
)) {
14022 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14023 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14024 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14025 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14028 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14029 (!ec
->rx_coalesce_usecs
) ||
14030 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14031 (!ec
->tx_coalesce_usecs
) ||
14032 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14033 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14034 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14035 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14036 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14037 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14038 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14039 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14042 /* Only copy relevant parameters, ignore all others. */
14043 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14044 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14045 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14046 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14047 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14048 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14049 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14050 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14051 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14053 if (netif_running(dev
)) {
14054 tg3_full_lock(tp
, 0);
14055 __tg3_set_coalesce(tp
, &tp
->coal
);
14056 tg3_full_unlock(tp
);
14061 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14063 struct tg3
*tp
= netdev_priv(dev
);
14065 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14066 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14067 return -EOPNOTSUPP
;
14070 if (edata
->advertised
!= tp
->eee
.advertised
) {
14071 netdev_warn(tp
->dev
,
14072 "Direct manipulation of EEE advertisement is not supported\n");
14076 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14077 netdev_warn(tp
->dev
,
14078 "Maximal Tx Lpi timer supported is %#x(u)\n",
14079 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14085 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14086 tg3_warn_mgmt_link_flap(tp
);
14088 if (netif_running(tp
->dev
)) {
14089 tg3_full_lock(tp
, 0);
14092 tg3_full_unlock(tp
);
14098 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14100 struct tg3
*tp
= netdev_priv(dev
);
14102 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14103 netdev_warn(tp
->dev
,
14104 "Board does not support EEE!\n");
14105 return -EOPNOTSUPP
;
14112 static const struct ethtool_ops tg3_ethtool_ops
= {
14113 .get_drvinfo
= tg3_get_drvinfo
,
14114 .get_regs_len
= tg3_get_regs_len
,
14115 .get_regs
= tg3_get_regs
,
14116 .get_wol
= tg3_get_wol
,
14117 .set_wol
= tg3_set_wol
,
14118 .get_msglevel
= tg3_get_msglevel
,
14119 .set_msglevel
= tg3_set_msglevel
,
14120 .nway_reset
= tg3_nway_reset
,
14121 .get_link
= ethtool_op_get_link
,
14122 .get_eeprom_len
= tg3_get_eeprom_len
,
14123 .get_eeprom
= tg3_get_eeprom
,
14124 .set_eeprom
= tg3_set_eeprom
,
14125 .get_ringparam
= tg3_get_ringparam
,
14126 .set_ringparam
= tg3_set_ringparam
,
14127 .get_pauseparam
= tg3_get_pauseparam
,
14128 .set_pauseparam
= tg3_set_pauseparam
,
14129 .self_test
= tg3_self_test
,
14130 .get_strings
= tg3_get_strings
,
14131 .set_phys_id
= tg3_set_phys_id
,
14132 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14133 .get_coalesce
= tg3_get_coalesce
,
14134 .set_coalesce
= tg3_set_coalesce
,
14135 .get_sset_count
= tg3_get_sset_count
,
14136 .get_rxnfc
= tg3_get_rxnfc
,
14137 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14138 .get_rxfh
= tg3_get_rxfh
,
14139 .set_rxfh
= tg3_set_rxfh
,
14140 .get_channels
= tg3_get_channels
,
14141 .set_channels
= tg3_set_channels
,
14142 .get_ts_info
= tg3_get_ts_info
,
14143 .get_eee
= tg3_get_eee
,
14144 .set_eee
= tg3_set_eee
,
14145 .get_link_ksettings
= tg3_get_link_ksettings
,
14146 .set_link_ksettings
= tg3_set_link_ksettings
,
14149 static void tg3_get_stats64(struct net_device
*dev
,
14150 struct rtnl_link_stats64
*stats
)
14152 struct tg3
*tp
= netdev_priv(dev
);
14154 spin_lock_bh(&tp
->lock
);
14155 if (!tp
->hw_stats
) {
14156 *stats
= tp
->net_stats_prev
;
14157 spin_unlock_bh(&tp
->lock
);
14161 tg3_get_nstats(tp
, stats
);
14162 spin_unlock_bh(&tp
->lock
);
14165 static void tg3_set_rx_mode(struct net_device
*dev
)
14167 struct tg3
*tp
= netdev_priv(dev
);
14169 if (!netif_running(dev
))
14172 tg3_full_lock(tp
, 0);
14173 __tg3_set_rx_mode(dev
);
14174 tg3_full_unlock(tp
);
14177 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14180 dev
->mtu
= new_mtu
;
14182 if (new_mtu
> ETH_DATA_LEN
) {
14183 if (tg3_flag(tp
, 5780_CLASS
)) {
14184 netdev_update_features(dev
);
14185 tg3_flag_clear(tp
, TSO_CAPABLE
);
14187 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14190 if (tg3_flag(tp
, 5780_CLASS
)) {
14191 tg3_flag_set(tp
, TSO_CAPABLE
);
14192 netdev_update_features(dev
);
14194 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14198 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14200 struct tg3
*tp
= netdev_priv(dev
);
14202 bool reset_phy
= false;
14204 if (!netif_running(dev
)) {
14205 /* We'll just catch it later when the
14208 tg3_set_mtu(dev
, tp
, new_mtu
);
14214 tg3_netif_stop(tp
);
14216 tg3_set_mtu(dev
, tp
, new_mtu
);
14218 tg3_full_lock(tp
, 1);
14220 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14222 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14223 * breaks all requests to 256 bytes.
14225 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
14228 err
= tg3_restart_hw(tp
, reset_phy
);
14231 tg3_netif_start(tp
);
14233 tg3_full_unlock(tp
);
14241 static const struct net_device_ops tg3_netdev_ops
= {
14242 .ndo_open
= tg3_open
,
14243 .ndo_stop
= tg3_close
,
14244 .ndo_start_xmit
= tg3_start_xmit
,
14245 .ndo_get_stats64
= tg3_get_stats64
,
14246 .ndo_validate_addr
= eth_validate_addr
,
14247 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14248 .ndo_set_mac_address
= tg3_set_mac_addr
,
14249 .ndo_do_ioctl
= tg3_ioctl
,
14250 .ndo_tx_timeout
= tg3_tx_timeout
,
14251 .ndo_change_mtu
= tg3_change_mtu
,
14252 .ndo_fix_features
= tg3_fix_features
,
14253 .ndo_set_features
= tg3_set_features
,
14254 #ifdef CONFIG_NET_POLL_CONTROLLER
14255 .ndo_poll_controller
= tg3_poll_controller
,
14259 static void tg3_get_eeprom_size(struct tg3
*tp
)
14261 u32 cursize
, val
, magic
;
14263 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14265 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14268 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14269 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14270 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14274 * Size the chip by reading offsets at increasing powers of two.
14275 * When we encounter our validation signature, we know the addressing
14276 * has wrapped around, and thus have our chip size.
14280 while (cursize
< tp
->nvram_size
) {
14281 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14290 tp
->nvram_size
= cursize
;
14293 static void tg3_get_nvram_size(struct tg3
*tp
)
14297 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14300 /* Selfboot format */
14301 if (val
!= TG3_EEPROM_MAGIC
) {
14302 tg3_get_eeprom_size(tp
);
14306 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14308 /* This is confusing. We want to operate on the
14309 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14310 * call will read from NVRAM and byteswap the data
14311 * according to the byteswapping settings for all
14312 * other register accesses. This ensures the data we
14313 * want will always reside in the lower 16-bits.
14314 * However, the data in NVRAM is in LE format, which
14315 * means the data from the NVRAM read will always be
14316 * opposite the endianness of the CPU. The 16-bit
14317 * byteswap then brings the data to CPU endianness.
14319 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14323 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14326 static void tg3_get_nvram_info(struct tg3
*tp
)
14330 nvcfg1
= tr32(NVRAM_CFG1
);
14331 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14332 tg3_flag_set(tp
, FLASH
);
14334 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14335 tw32(NVRAM_CFG1
, nvcfg1
);
14338 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14339 tg3_flag(tp
, 5780_CLASS
)) {
14340 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14341 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14342 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14343 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14344 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14346 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14347 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14348 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14350 case FLASH_VENDOR_ATMEL_EEPROM
:
14351 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14352 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14353 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14355 case FLASH_VENDOR_ST
:
14356 tp
->nvram_jedecnum
= JEDEC_ST
;
14357 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14358 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14360 case FLASH_VENDOR_SAIFUN
:
14361 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14362 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14364 case FLASH_VENDOR_SST_SMALL
:
14365 case FLASH_VENDOR_SST_LARGE
:
14366 tp
->nvram_jedecnum
= JEDEC_SST
;
14367 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14371 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14372 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14373 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14377 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14379 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14380 case FLASH_5752PAGE_SIZE_256
:
14381 tp
->nvram_pagesize
= 256;
14383 case FLASH_5752PAGE_SIZE_512
:
14384 tp
->nvram_pagesize
= 512;
14386 case FLASH_5752PAGE_SIZE_1K
:
14387 tp
->nvram_pagesize
= 1024;
14389 case FLASH_5752PAGE_SIZE_2K
:
14390 tp
->nvram_pagesize
= 2048;
14392 case FLASH_5752PAGE_SIZE_4K
:
14393 tp
->nvram_pagesize
= 4096;
14395 case FLASH_5752PAGE_SIZE_264
:
14396 tp
->nvram_pagesize
= 264;
14398 case FLASH_5752PAGE_SIZE_528
:
14399 tp
->nvram_pagesize
= 528;
14404 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14408 nvcfg1
= tr32(NVRAM_CFG1
);
14410 /* NVRAM protection for TPM */
14411 if (nvcfg1
& (1 << 27))
14412 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14414 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14415 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14416 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14417 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14418 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14420 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14421 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14422 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14423 tg3_flag_set(tp
, FLASH
);
14425 case FLASH_5752VENDOR_ST_M45PE10
:
14426 case FLASH_5752VENDOR_ST_M45PE20
:
14427 case FLASH_5752VENDOR_ST_M45PE40
:
14428 tp
->nvram_jedecnum
= JEDEC_ST
;
14429 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14430 tg3_flag_set(tp
, FLASH
);
14434 if (tg3_flag(tp
, FLASH
)) {
14435 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14437 /* For eeprom, set pagesize to maximum eeprom size */
14438 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14440 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14441 tw32(NVRAM_CFG1
, nvcfg1
);
14445 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14447 u32 nvcfg1
, protect
= 0;
14449 nvcfg1
= tr32(NVRAM_CFG1
);
14451 /* NVRAM protection for TPM */
14452 if (nvcfg1
& (1 << 27)) {
14453 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14457 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14459 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14460 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14461 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14462 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14463 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14464 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14465 tg3_flag_set(tp
, FLASH
);
14466 tp
->nvram_pagesize
= 264;
14467 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14468 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14469 tp
->nvram_size
= (protect
? 0x3e200 :
14470 TG3_NVRAM_SIZE_512KB
);
14471 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14472 tp
->nvram_size
= (protect
? 0x1f200 :
14473 TG3_NVRAM_SIZE_256KB
);
14475 tp
->nvram_size
= (protect
? 0x1f200 :
14476 TG3_NVRAM_SIZE_128KB
);
14478 case FLASH_5752VENDOR_ST_M45PE10
:
14479 case FLASH_5752VENDOR_ST_M45PE20
:
14480 case FLASH_5752VENDOR_ST_M45PE40
:
14481 tp
->nvram_jedecnum
= JEDEC_ST
;
14482 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14483 tg3_flag_set(tp
, FLASH
);
14484 tp
->nvram_pagesize
= 256;
14485 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14486 tp
->nvram_size
= (protect
?
14487 TG3_NVRAM_SIZE_64KB
:
14488 TG3_NVRAM_SIZE_128KB
);
14489 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14490 tp
->nvram_size
= (protect
?
14491 TG3_NVRAM_SIZE_64KB
:
14492 TG3_NVRAM_SIZE_256KB
);
14494 tp
->nvram_size
= (protect
?
14495 TG3_NVRAM_SIZE_128KB
:
14496 TG3_NVRAM_SIZE_512KB
);
14501 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14505 nvcfg1
= tr32(NVRAM_CFG1
);
14507 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14508 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14509 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14510 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14511 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14512 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14513 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14514 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14516 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14517 tw32(NVRAM_CFG1
, nvcfg1
);
14519 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14520 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14521 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14522 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14523 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14524 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14525 tg3_flag_set(tp
, FLASH
);
14526 tp
->nvram_pagesize
= 264;
14528 case FLASH_5752VENDOR_ST_M45PE10
:
14529 case FLASH_5752VENDOR_ST_M45PE20
:
14530 case FLASH_5752VENDOR_ST_M45PE40
:
14531 tp
->nvram_jedecnum
= JEDEC_ST
;
14532 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14533 tg3_flag_set(tp
, FLASH
);
14534 tp
->nvram_pagesize
= 256;
14539 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14541 u32 nvcfg1
, protect
= 0;
14543 nvcfg1
= tr32(NVRAM_CFG1
);
14545 /* NVRAM protection for TPM */
14546 if (nvcfg1
& (1 << 27)) {
14547 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14551 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14553 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14554 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14555 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14556 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14557 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14558 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14559 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14560 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14561 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14562 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14563 tg3_flag_set(tp
, FLASH
);
14564 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14565 tp
->nvram_pagesize
= 256;
14567 case FLASH_5761VENDOR_ST_A_M45PE20
:
14568 case FLASH_5761VENDOR_ST_A_M45PE40
:
14569 case FLASH_5761VENDOR_ST_A_M45PE80
:
14570 case FLASH_5761VENDOR_ST_A_M45PE16
:
14571 case FLASH_5761VENDOR_ST_M_M45PE20
:
14572 case FLASH_5761VENDOR_ST_M_M45PE40
:
14573 case FLASH_5761VENDOR_ST_M_M45PE80
:
14574 case FLASH_5761VENDOR_ST_M_M45PE16
:
14575 tp
->nvram_jedecnum
= JEDEC_ST
;
14576 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14577 tg3_flag_set(tp
, FLASH
);
14578 tp
->nvram_pagesize
= 256;
14583 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14586 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14587 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14588 case FLASH_5761VENDOR_ST_A_M45PE16
:
14589 case FLASH_5761VENDOR_ST_M_M45PE16
:
14590 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14592 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14593 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14594 case FLASH_5761VENDOR_ST_A_M45PE80
:
14595 case FLASH_5761VENDOR_ST_M_M45PE80
:
14596 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14598 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14599 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14600 case FLASH_5761VENDOR_ST_A_M45PE40
:
14601 case FLASH_5761VENDOR_ST_M_M45PE40
:
14602 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14604 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14605 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14606 case FLASH_5761VENDOR_ST_A_M45PE20
:
14607 case FLASH_5761VENDOR_ST_M_M45PE20
:
14608 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14614 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14616 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14617 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14618 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14621 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14625 nvcfg1
= tr32(NVRAM_CFG1
);
14627 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14628 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14629 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14630 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14631 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14632 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14634 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14635 tw32(NVRAM_CFG1
, nvcfg1
);
14637 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14638 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14639 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14640 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14641 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14642 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14643 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14644 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14645 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14646 tg3_flag_set(tp
, FLASH
);
14648 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14649 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14652 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14654 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14655 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14656 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14658 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14659 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14660 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14664 case FLASH_5752VENDOR_ST_M45PE10
:
14665 case FLASH_5752VENDOR_ST_M45PE20
:
14666 case FLASH_5752VENDOR_ST_M45PE40
:
14667 tp
->nvram_jedecnum
= JEDEC_ST
;
14668 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14669 tg3_flag_set(tp
, FLASH
);
14671 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14672 case FLASH_5752VENDOR_ST_M45PE10
:
14673 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14675 case FLASH_5752VENDOR_ST_M45PE20
:
14676 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14678 case FLASH_5752VENDOR_ST_M45PE40
:
14679 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14684 tg3_flag_set(tp
, NO_NVRAM
);
14688 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14689 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14690 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14694 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14698 nvcfg1
= tr32(NVRAM_CFG1
);
14700 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14701 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14702 case FLASH_5717VENDOR_MICRO_EEPROM
:
14703 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14704 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14705 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14707 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14708 tw32(NVRAM_CFG1
, nvcfg1
);
14710 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14711 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14712 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14713 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14714 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14715 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14716 case FLASH_5717VENDOR_ATMEL_45USPT
:
14717 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14718 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14719 tg3_flag_set(tp
, FLASH
);
14721 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14722 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14723 /* Detect size with tg3_nvram_get_size() */
14725 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14726 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14727 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14730 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14734 case FLASH_5717VENDOR_ST_M_M25PE10
:
14735 case FLASH_5717VENDOR_ST_A_M25PE10
:
14736 case FLASH_5717VENDOR_ST_M_M45PE10
:
14737 case FLASH_5717VENDOR_ST_A_M45PE10
:
14738 case FLASH_5717VENDOR_ST_M_M25PE20
:
14739 case FLASH_5717VENDOR_ST_A_M25PE20
:
14740 case FLASH_5717VENDOR_ST_M_M45PE20
:
14741 case FLASH_5717VENDOR_ST_A_M45PE20
:
14742 case FLASH_5717VENDOR_ST_25USPT
:
14743 case FLASH_5717VENDOR_ST_45USPT
:
14744 tp
->nvram_jedecnum
= JEDEC_ST
;
14745 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14746 tg3_flag_set(tp
, FLASH
);
14748 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14749 case FLASH_5717VENDOR_ST_M_M25PE20
:
14750 case FLASH_5717VENDOR_ST_M_M45PE20
:
14751 /* Detect size with tg3_nvram_get_size() */
14753 case FLASH_5717VENDOR_ST_A_M25PE20
:
14754 case FLASH_5717VENDOR_ST_A_M45PE20
:
14755 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14758 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14763 tg3_flag_set(tp
, NO_NVRAM
);
14767 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14768 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14769 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14772 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14774 u32 nvcfg1
, nvmpinstrp
;
14776 nvcfg1
= tr32(NVRAM_CFG1
);
14777 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14779 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14780 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14781 tg3_flag_set(tp
, NO_NVRAM
);
14785 switch (nvmpinstrp
) {
14786 case FLASH_5762_EEPROM_HD
:
14787 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14789 case FLASH_5762_EEPROM_LD
:
14790 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14792 case FLASH_5720VENDOR_M_ST_M45PE20
:
14793 /* This pinstrap supports multiple sizes, so force it
14794 * to read the actual size from location 0xf0.
14796 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14801 switch (nvmpinstrp
) {
14802 case FLASH_5720_EEPROM_HD
:
14803 case FLASH_5720_EEPROM_LD
:
14804 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14805 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14807 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14808 tw32(NVRAM_CFG1
, nvcfg1
);
14809 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14810 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14812 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14814 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14815 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14816 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14817 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14818 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14819 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14820 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14821 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14822 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14823 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14824 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14825 case FLASH_5720VENDOR_ATMEL_45USPT
:
14826 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14827 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14828 tg3_flag_set(tp
, FLASH
);
14830 switch (nvmpinstrp
) {
14831 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14832 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14833 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14834 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14836 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14837 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14838 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14839 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14841 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14842 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14843 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14846 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14847 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14851 case FLASH_5720VENDOR_M_ST_M25PE10
:
14852 case FLASH_5720VENDOR_M_ST_M45PE10
:
14853 case FLASH_5720VENDOR_A_ST_M25PE10
:
14854 case FLASH_5720VENDOR_A_ST_M45PE10
:
14855 case FLASH_5720VENDOR_M_ST_M25PE20
:
14856 case FLASH_5720VENDOR_M_ST_M45PE20
:
14857 case FLASH_5720VENDOR_A_ST_M25PE20
:
14858 case FLASH_5720VENDOR_A_ST_M45PE20
:
14859 case FLASH_5720VENDOR_M_ST_M25PE40
:
14860 case FLASH_5720VENDOR_M_ST_M45PE40
:
14861 case FLASH_5720VENDOR_A_ST_M25PE40
:
14862 case FLASH_5720VENDOR_A_ST_M45PE40
:
14863 case FLASH_5720VENDOR_M_ST_M25PE80
:
14864 case FLASH_5720VENDOR_M_ST_M45PE80
:
14865 case FLASH_5720VENDOR_A_ST_M25PE80
:
14866 case FLASH_5720VENDOR_A_ST_M45PE80
:
14867 case FLASH_5720VENDOR_ST_25USPT
:
14868 case FLASH_5720VENDOR_ST_45USPT
:
14869 tp
->nvram_jedecnum
= JEDEC_ST
;
14870 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14871 tg3_flag_set(tp
, FLASH
);
14873 switch (nvmpinstrp
) {
14874 case FLASH_5720VENDOR_M_ST_M25PE20
:
14875 case FLASH_5720VENDOR_M_ST_M45PE20
:
14876 case FLASH_5720VENDOR_A_ST_M25PE20
:
14877 case FLASH_5720VENDOR_A_ST_M45PE20
:
14878 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14880 case FLASH_5720VENDOR_M_ST_M25PE40
:
14881 case FLASH_5720VENDOR_M_ST_M45PE40
:
14882 case FLASH_5720VENDOR_A_ST_M25PE40
:
14883 case FLASH_5720VENDOR_A_ST_M45PE40
:
14884 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14886 case FLASH_5720VENDOR_M_ST_M25PE80
:
14887 case FLASH_5720VENDOR_M_ST_M45PE80
:
14888 case FLASH_5720VENDOR_A_ST_M25PE80
:
14889 case FLASH_5720VENDOR_A_ST_M45PE80
:
14890 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14893 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14894 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14899 tg3_flag_set(tp
, NO_NVRAM
);
14903 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14904 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14905 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14907 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14910 if (tg3_nvram_read(tp
, 0, &val
))
14913 if (val
!= TG3_EEPROM_MAGIC
&&
14914 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14915 tg3_flag_set(tp
, NO_NVRAM
);
14919 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14920 static void tg3_nvram_init(struct tg3
*tp
)
14922 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14923 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14924 tg3_flag_clear(tp
, NVRAM
);
14925 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14926 tg3_flag_set(tp
, NO_NVRAM
);
14930 tw32_f(GRC_EEPROM_ADDR
,
14931 (EEPROM_ADDR_FSM_RESET
|
14932 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14933 EEPROM_ADDR_CLKPERD_SHIFT
)));
14937 /* Enable seeprom accesses. */
14938 tw32_f(GRC_LOCAL_CTRL
,
14939 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14942 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14943 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14944 tg3_flag_set(tp
, NVRAM
);
14946 if (tg3_nvram_lock(tp
)) {
14947 netdev_warn(tp
->dev
,
14948 "Cannot get nvram lock, %s failed\n",
14952 tg3_enable_nvram_access(tp
);
14954 tp
->nvram_size
= 0;
14956 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14957 tg3_get_5752_nvram_info(tp
);
14958 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14959 tg3_get_5755_nvram_info(tp
);
14960 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14961 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14962 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14963 tg3_get_5787_nvram_info(tp
);
14964 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14965 tg3_get_5761_nvram_info(tp
);
14966 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14967 tg3_get_5906_nvram_info(tp
);
14968 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14969 tg3_flag(tp
, 57765_CLASS
))
14970 tg3_get_57780_nvram_info(tp
);
14971 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14972 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14973 tg3_get_5717_nvram_info(tp
);
14974 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14975 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14976 tg3_get_5720_nvram_info(tp
);
14978 tg3_get_nvram_info(tp
);
14980 if (tp
->nvram_size
== 0)
14981 tg3_get_nvram_size(tp
);
14983 tg3_disable_nvram_access(tp
);
14984 tg3_nvram_unlock(tp
);
14987 tg3_flag_clear(tp
, NVRAM
);
14988 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14990 tg3_get_eeprom_size(tp
);
14994 struct subsys_tbl_ent
{
14995 u16 subsys_vendor
, subsys_devid
;
14999 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15000 /* Broadcom boards. */
15001 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15002 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15003 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15004 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15005 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15006 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15007 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15008 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15009 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15010 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15011 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15012 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15013 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15014 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15015 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15016 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15017 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15018 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15019 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15020 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15021 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15022 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15025 { TG3PCI_SUBVENDOR_ID_3COM
,
15026 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15027 { TG3PCI_SUBVENDOR_ID_3COM
,
15028 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15029 { TG3PCI_SUBVENDOR_ID_3COM
,
15030 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15031 { TG3PCI_SUBVENDOR_ID_3COM
,
15032 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15033 { TG3PCI_SUBVENDOR_ID_3COM
,
15034 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15037 { TG3PCI_SUBVENDOR_ID_DELL
,
15038 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15039 { TG3PCI_SUBVENDOR_ID_DELL
,
15040 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15041 { TG3PCI_SUBVENDOR_ID_DELL
,
15042 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15043 { TG3PCI_SUBVENDOR_ID_DELL
,
15044 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15046 /* Compaq boards. */
15047 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15048 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15049 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15050 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15051 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15052 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15053 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15054 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15055 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15056 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15059 { TG3PCI_SUBVENDOR_ID_IBM
,
15060 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15063 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15067 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15068 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15069 tp
->pdev
->subsystem_vendor
) &&
15070 (subsys_id_to_phy_id
[i
].subsys_devid
==
15071 tp
->pdev
->subsystem_device
))
15072 return &subsys_id_to_phy_id
[i
];
15077 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15081 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15082 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15084 /* Assume an onboard device and WOL capable by default. */
15085 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15086 tg3_flag_set(tp
, WOL_CAP
);
15088 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15089 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15090 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15091 tg3_flag_set(tp
, IS_NIC
);
15093 val
= tr32(VCPU_CFGSHDW
);
15094 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15095 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15096 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15097 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15098 tg3_flag_set(tp
, WOL_ENABLE
);
15099 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15104 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15105 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15106 u32 nic_cfg
, led_cfg
;
15107 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15108 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15109 int eeprom_phy_serdes
= 0;
15111 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15112 tp
->nic_sram_data_cfg
= nic_cfg
;
15114 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15115 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15116 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15117 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15118 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15119 (ver
> 0) && (ver
< 0x100))
15120 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15122 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15123 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15125 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15126 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15127 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15128 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15130 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15131 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15132 eeprom_phy_serdes
= 1;
15134 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15135 if (nic_phy_id
!= 0) {
15136 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15137 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15139 eeprom_phy_id
= (id1
>> 16) << 10;
15140 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15141 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15145 tp
->phy_id
= eeprom_phy_id
;
15146 if (eeprom_phy_serdes
) {
15147 if (!tg3_flag(tp
, 5705_PLUS
))
15148 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15150 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15153 if (tg3_flag(tp
, 5750_PLUS
))
15154 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15155 SHASTA_EXT_LED_MODE_MASK
);
15157 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15161 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15162 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15165 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15166 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15169 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15170 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15172 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15173 * read on some older 5700/5701 bootcode.
15175 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15176 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15177 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15181 case SHASTA_EXT_LED_SHARED
:
15182 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15183 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15184 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15185 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15186 LED_CTRL_MODE_PHY_2
);
15188 if (tg3_flag(tp
, 5717_PLUS
) ||
15189 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15190 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15191 LED_CTRL_BLINK_RATE_MASK
;
15195 case SHASTA_EXT_LED_MAC
:
15196 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15199 case SHASTA_EXT_LED_COMBO
:
15200 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15201 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15202 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15203 LED_CTRL_MODE_PHY_2
);
15208 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15209 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15210 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15211 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15213 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15214 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15216 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15217 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15218 if ((tp
->pdev
->subsystem_vendor
==
15219 PCI_VENDOR_ID_ARIMA
) &&
15220 (tp
->pdev
->subsystem_device
== 0x205a ||
15221 tp
->pdev
->subsystem_device
== 0x2063))
15222 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15224 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15225 tg3_flag_set(tp
, IS_NIC
);
15228 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15229 tg3_flag_set(tp
, ENABLE_ASF
);
15230 if (tg3_flag(tp
, 5750_PLUS
))
15231 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15234 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15235 tg3_flag(tp
, 5750_PLUS
))
15236 tg3_flag_set(tp
, ENABLE_APE
);
15238 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15239 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15240 tg3_flag_clear(tp
, WOL_CAP
);
15242 if (tg3_flag(tp
, WOL_CAP
) &&
15243 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15244 tg3_flag_set(tp
, WOL_ENABLE
);
15245 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15248 if (cfg2
& (1 << 17))
15249 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15251 /* serdes signal pre-emphasis in register 0x590 set by */
15252 /* bootcode if bit 18 is set */
15253 if (cfg2
& (1 << 18))
15254 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15256 if ((tg3_flag(tp
, 57765_PLUS
) ||
15257 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15258 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15259 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15260 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15262 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15265 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15266 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15267 !tg3_flag(tp
, 57765_PLUS
) &&
15268 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15269 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15270 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15271 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15272 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15273 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15276 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15277 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15278 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15279 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15280 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15281 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15283 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15284 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15287 if (tg3_flag(tp
, WOL_CAP
))
15288 device_set_wakeup_enable(&tp
->pdev
->dev
,
15289 tg3_flag(tp
, WOL_ENABLE
));
15291 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15294 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15297 u32 val2
, off
= offset
* 8;
15299 err
= tg3_nvram_lock(tp
);
15303 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15304 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15305 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15306 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15309 for (i
= 0; i
< 100; i
++) {
15310 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15311 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15312 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15318 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15320 tg3_nvram_unlock(tp
);
15321 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15327 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15332 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15333 tw32(OTP_CTRL
, cmd
);
15335 /* Wait for up to 1 ms for command to execute. */
15336 for (i
= 0; i
< 100; i
++) {
15337 val
= tr32(OTP_STATUS
);
15338 if (val
& OTP_STATUS_CMD_DONE
)
15343 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15346 /* Read the gphy configuration from the OTP region of the chip. The gphy
15347 * configuration is a 32-bit value that straddles the alignment boundary.
15348 * We do two 32-bit reads and then shift and merge the results.
15350 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15352 u32 bhalf_otp
, thalf_otp
;
15354 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15356 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15359 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15361 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15364 thalf_otp
= tr32(OTP_READ_DATA
);
15366 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15368 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15371 bhalf_otp
= tr32(OTP_READ_DATA
);
15373 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15376 static void tg3_phy_init_link_config(struct tg3
*tp
)
15378 u32 adv
= ADVERTISED_Autoneg
;
15380 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15381 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15382 adv
|= ADVERTISED_1000baseT_Half
;
15383 adv
|= ADVERTISED_1000baseT_Full
;
15386 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15387 adv
|= ADVERTISED_100baseT_Half
|
15388 ADVERTISED_100baseT_Full
|
15389 ADVERTISED_10baseT_Half
|
15390 ADVERTISED_10baseT_Full
|
15393 adv
|= ADVERTISED_FIBRE
;
15395 tp
->link_config
.advertising
= adv
;
15396 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15397 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15398 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15399 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15400 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15405 static int tg3_phy_probe(struct tg3
*tp
)
15407 u32 hw_phy_id_1
, hw_phy_id_2
;
15408 u32 hw_phy_id
, hw_phy_id_masked
;
15411 /* flow control autonegotiation is default behavior */
15412 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15413 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15415 if (tg3_flag(tp
, ENABLE_APE
)) {
15416 switch (tp
->pci_fn
) {
15418 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15421 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15424 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15427 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15432 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15433 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15434 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15435 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15436 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15438 if (tg3_flag(tp
, USE_PHYLIB
))
15439 return tg3_phy_init(tp
);
15441 /* Reading the PHY ID register can conflict with ASF
15442 * firmware access to the PHY hardware.
15445 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15446 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15448 /* Now read the physical PHY_ID from the chip and verify
15449 * that it is sane. If it doesn't look good, we fall back
15450 * to either the hard-coded table based PHY_ID and failing
15451 * that the value found in the eeprom area.
15453 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15454 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15456 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15457 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15458 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15460 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15463 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15464 tp
->phy_id
= hw_phy_id
;
15465 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15466 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15468 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15470 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15471 /* Do nothing, phy ID already set up in
15472 * tg3_get_eeprom_hw_cfg().
15475 struct subsys_tbl_ent
*p
;
15477 /* No eeprom signature? Try the hardcoded
15478 * subsys device table.
15480 p
= tg3_lookup_by_subsys(tp
);
15482 tp
->phy_id
= p
->phy_id
;
15483 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15484 /* For now we saw the IDs 0xbc050cd0,
15485 * 0xbc050f80 and 0xbc050c30 on devices
15486 * connected to an BCM4785 and there are
15487 * probably more. Just assume that the phy is
15488 * supported when it is connected to a SSB core
15495 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15496 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15500 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15501 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15502 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15503 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15504 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15505 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15506 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15507 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15508 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15509 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15511 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15512 SUPPORTED_1000baseT_Full
;
15513 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15514 ADVERTISED_1000baseT_Full
;
15515 tp
->eee
.eee_enabled
= 1;
15516 tp
->eee
.tx_lpi_enabled
= 1;
15517 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15520 tg3_phy_init_link_config(tp
);
15522 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15523 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15524 !tg3_flag(tp
, ENABLE_APE
) &&
15525 !tg3_flag(tp
, ENABLE_ASF
)) {
15528 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15529 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15530 (bmsr
& BMSR_LSTATUS
))
15531 goto skip_phy_reset
;
15533 err
= tg3_phy_reset(tp
);
15537 tg3_phy_set_wirespeed(tp
);
15539 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15540 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15541 tp
->link_config
.flowctrl
);
15543 tg3_writephy(tp
, MII_BMCR
,
15544 BMCR_ANENABLE
| BMCR_ANRESTART
);
15549 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15550 err
= tg3_init_5401phy_dsp(tp
);
15554 err
= tg3_init_5401phy_dsp(tp
);
15560 static void tg3_read_vpd(struct tg3
*tp
)
15563 unsigned int block_end
, rosize
, len
;
15567 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15571 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15573 goto out_not_found
;
15575 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15576 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15577 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15579 if (block_end
> vpdlen
)
15580 goto out_not_found
;
15582 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15583 PCI_VPD_RO_KEYWORD_MFR_ID
);
15585 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15587 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15588 if (j
+ len
> block_end
|| len
!= 4 ||
15589 memcmp(&vpd_data
[j
], "1028", 4))
15592 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15593 PCI_VPD_RO_KEYWORD_VENDOR0
);
15597 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15599 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15600 if (j
+ len
> block_end
)
15603 if (len
>= sizeof(tp
->fw_ver
))
15604 len
= sizeof(tp
->fw_ver
) - 1;
15605 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15606 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15611 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15612 PCI_VPD_RO_KEYWORD_PARTNO
);
15614 goto out_not_found
;
15616 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15618 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15619 if (len
> TG3_BPN_SIZE
||
15620 (len
+ i
) > vpdlen
)
15621 goto out_not_found
;
15623 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15627 if (tp
->board_part_number
[0])
15631 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15632 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15633 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15634 strcpy(tp
->board_part_number
, "BCM5717");
15635 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15636 strcpy(tp
->board_part_number
, "BCM5718");
15639 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15640 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15641 strcpy(tp
->board_part_number
, "BCM57780");
15642 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15643 strcpy(tp
->board_part_number
, "BCM57760");
15644 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15645 strcpy(tp
->board_part_number
, "BCM57790");
15646 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15647 strcpy(tp
->board_part_number
, "BCM57788");
15650 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15651 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15652 strcpy(tp
->board_part_number
, "BCM57761");
15653 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15654 strcpy(tp
->board_part_number
, "BCM57765");
15655 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15656 strcpy(tp
->board_part_number
, "BCM57781");
15657 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15658 strcpy(tp
->board_part_number
, "BCM57785");
15659 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15660 strcpy(tp
->board_part_number
, "BCM57791");
15661 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15662 strcpy(tp
->board_part_number
, "BCM57795");
15665 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15666 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15667 strcpy(tp
->board_part_number
, "BCM57762");
15668 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15669 strcpy(tp
->board_part_number
, "BCM57766");
15670 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15671 strcpy(tp
->board_part_number
, "BCM57782");
15672 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15673 strcpy(tp
->board_part_number
, "BCM57786");
15676 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15677 strcpy(tp
->board_part_number
, "BCM95906");
15680 strcpy(tp
->board_part_number
, "none");
15684 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15688 if (tg3_nvram_read(tp
, offset
, &val
) ||
15689 (val
& 0xfc000000) != 0x0c000000 ||
15690 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15697 static void tg3_read_bc_ver(struct tg3
*tp
)
15699 u32 val
, offset
, start
, ver_offset
;
15701 bool newver
= false;
15703 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15704 tg3_nvram_read(tp
, 0x4, &start
))
15707 offset
= tg3_nvram_logical_addr(tp
, offset
);
15709 if (tg3_nvram_read(tp
, offset
, &val
))
15712 if ((val
& 0xfc000000) == 0x0c000000) {
15713 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15720 dst_off
= strlen(tp
->fw_ver
);
15723 if (TG3_VER_SIZE
- dst_off
< 16 ||
15724 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15727 offset
= offset
+ ver_offset
- start
;
15728 for (i
= 0; i
< 16; i
+= 4) {
15730 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15733 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15738 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15741 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15742 TG3_NVM_BCVER_MAJSFT
;
15743 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15744 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15745 "v%d.%02d", major
, minor
);
15749 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15751 u32 val
, major
, minor
;
15753 /* Use native endian representation */
15754 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15757 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15758 TG3_NVM_HWSB_CFG1_MAJSFT
;
15759 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15760 TG3_NVM_HWSB_CFG1_MINSFT
;
15762 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15765 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15767 u32 offset
, major
, minor
, build
;
15769 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15771 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15774 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15775 case TG3_EEPROM_SB_REVISION_0
:
15776 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15778 case TG3_EEPROM_SB_REVISION_2
:
15779 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15781 case TG3_EEPROM_SB_REVISION_3
:
15782 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15784 case TG3_EEPROM_SB_REVISION_4
:
15785 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15787 case TG3_EEPROM_SB_REVISION_5
:
15788 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15790 case TG3_EEPROM_SB_REVISION_6
:
15791 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15797 if (tg3_nvram_read(tp
, offset
, &val
))
15800 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15801 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15802 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15803 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15804 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15806 if (minor
> 99 || build
> 26)
15809 offset
= strlen(tp
->fw_ver
);
15810 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15811 " v%d.%02d", major
, minor
);
15814 offset
= strlen(tp
->fw_ver
);
15815 if (offset
< TG3_VER_SIZE
- 1)
15816 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15820 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15822 u32 val
, offset
, start
;
15825 for (offset
= TG3_NVM_DIR_START
;
15826 offset
< TG3_NVM_DIR_END
;
15827 offset
+= TG3_NVM_DIRENT_SIZE
) {
15828 if (tg3_nvram_read(tp
, offset
, &val
))
15831 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15835 if (offset
== TG3_NVM_DIR_END
)
15838 if (!tg3_flag(tp
, 5705_PLUS
))
15839 start
= 0x08000000;
15840 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15843 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15844 !tg3_fw_img_is_valid(tp
, offset
) ||
15845 tg3_nvram_read(tp
, offset
+ 8, &val
))
15848 offset
+= val
- start
;
15850 vlen
= strlen(tp
->fw_ver
);
15852 tp
->fw_ver
[vlen
++] = ',';
15853 tp
->fw_ver
[vlen
++] = ' ';
15855 for (i
= 0; i
< 4; i
++) {
15857 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15860 offset
+= sizeof(v
);
15862 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15863 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15867 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15872 static void tg3_probe_ncsi(struct tg3
*tp
)
15876 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15877 if (apedata
!= APE_SEG_SIG_MAGIC
)
15880 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15881 if (!(apedata
& APE_FW_STATUS_READY
))
15884 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15885 tg3_flag_set(tp
, APE_HAS_NCSI
);
15888 static void tg3_read_dash_ver(struct tg3
*tp
)
15894 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15896 if (tg3_flag(tp
, APE_HAS_NCSI
))
15898 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15903 vlen
= strlen(tp
->fw_ver
);
15905 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15907 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15908 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15909 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15910 (apedata
& APE_FW_VERSION_BLDMSK
));
15913 static void tg3_read_otp_ver(struct tg3
*tp
)
15917 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15920 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15921 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15922 TG3_OTP_MAGIC0_VALID(val
)) {
15923 u64 val64
= (u64
) val
<< 32 | val2
;
15927 for (i
= 0; i
< 7; i
++) {
15928 if ((val64
& 0xff) == 0)
15930 ver
= val64
& 0xff;
15933 vlen
= strlen(tp
->fw_ver
);
15934 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15938 static void tg3_read_fw_ver(struct tg3
*tp
)
15941 bool vpd_vers
= false;
15943 if (tp
->fw_ver
[0] != 0)
15946 if (tg3_flag(tp
, NO_NVRAM
)) {
15947 strcat(tp
->fw_ver
, "sb");
15948 tg3_read_otp_ver(tp
);
15952 if (tg3_nvram_read(tp
, 0, &val
))
15955 if (val
== TG3_EEPROM_MAGIC
)
15956 tg3_read_bc_ver(tp
);
15957 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15958 tg3_read_sb_ver(tp
, val
);
15959 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15960 tg3_read_hwsb_ver(tp
);
15962 if (tg3_flag(tp
, ENABLE_ASF
)) {
15963 if (tg3_flag(tp
, ENABLE_APE
)) {
15964 tg3_probe_ncsi(tp
);
15966 tg3_read_dash_ver(tp
);
15967 } else if (!vpd_vers
) {
15968 tg3_read_mgmtfw_ver(tp
);
15972 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15975 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15977 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15978 return TG3_RX_RET_MAX_SIZE_5717
;
15979 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15980 return TG3_RX_RET_MAX_SIZE_5700
;
15982 return TG3_RX_RET_MAX_SIZE_5705
;
15985 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
15986 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15992 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15994 struct pci_dev
*peer
;
15995 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15997 for (func
= 0; func
< 8; func
++) {
15998 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15999 if (peer
&& peer
!= tp
->pdev
)
16003 /* 5704 can be configured in single-port mode, set peer to
16004 * tp->pdev in that case.
16012 * We don't need to keep the refcount elevated; there's no way
16013 * to remove one half of this device without removing the other
16020 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16022 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16023 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16026 /* All devices that use the alternate
16027 * ASIC REV location have a CPMU.
16029 tg3_flag_set(tp
, CPMU_PRESENT
);
16031 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16032 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16033 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16034 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16035 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16036 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16037 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16038 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16039 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16040 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16041 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16042 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16043 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16044 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16045 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16046 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16047 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16048 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16049 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16050 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16051 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16052 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16053 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16055 reg
= TG3PCI_PRODID_ASICREV
;
16057 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16060 /* Wrong chip ID in 5752 A0. This code can be removed later
16061 * as A0 is not in production.
16063 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16064 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16066 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16067 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16069 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16070 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16071 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16072 tg3_flag_set(tp
, 5717_PLUS
);
16074 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16075 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16076 tg3_flag_set(tp
, 57765_CLASS
);
16078 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16079 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16080 tg3_flag_set(tp
, 57765_PLUS
);
16082 /* Intentionally exclude ASIC_REV_5906 */
16083 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16084 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16085 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16086 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16087 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16088 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16089 tg3_flag(tp
, 57765_PLUS
))
16090 tg3_flag_set(tp
, 5755_PLUS
);
16092 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16093 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16094 tg3_flag_set(tp
, 5780_CLASS
);
16096 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16097 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16098 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16099 tg3_flag(tp
, 5755_PLUS
) ||
16100 tg3_flag(tp
, 5780_CLASS
))
16101 tg3_flag_set(tp
, 5750_PLUS
);
16103 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16104 tg3_flag(tp
, 5750_PLUS
))
16105 tg3_flag_set(tp
, 5705_PLUS
);
16108 static bool tg3_10_100_only_device(struct tg3
*tp
,
16109 const struct pci_device_id
*ent
)
16111 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16113 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16114 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16115 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16118 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16119 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16120 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16130 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16133 u32 pci_state_reg
, grc_misc_cfg
;
16138 /* Force memory write invalidate off. If we leave it on,
16139 * then on 5700_BX chips we have to enable a workaround.
16140 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16141 * to match the cacheline size. The Broadcom driver have this
16142 * workaround but turns MWI off all the times so never uses
16143 * it. This seems to suggest that the workaround is insufficient.
16145 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16146 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16147 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16149 /* Important! -- Make sure register accesses are byteswapped
16150 * correctly. Also, for those chips that require it, make
16151 * sure that indirect register accesses are enabled before
16152 * the first operation.
16154 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16156 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16157 MISC_HOST_CTRL_CHIPREV
);
16158 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16159 tp
->misc_host_ctrl
);
16161 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16163 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16164 * we need to disable memory and use config. cycles
16165 * only to access all registers. The 5702/03 chips
16166 * can mistakenly decode the special cycles from the
16167 * ICH chipsets as memory write cycles, causing corruption
16168 * of register and memory space. Only certain ICH bridges
16169 * will drive special cycles with non-zero data during the
16170 * address phase which can fall within the 5703's address
16171 * range. This is not an ICH bug as the PCI spec allows
16172 * non-zero address during special cycles. However, only
16173 * these ICH bridges are known to drive non-zero addresses
16174 * during special cycles.
16176 * Since special cycles do not cross PCI bridges, we only
16177 * enable this workaround if the 5703 is on the secondary
16178 * bus of these ICH bridges.
16180 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16181 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16182 static struct tg3_dev_id
{
16186 } ich_chipsets
[] = {
16187 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16189 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16191 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16193 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16197 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16198 struct pci_dev
*bridge
= NULL
;
16200 while (pci_id
->vendor
!= 0) {
16201 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16207 if (pci_id
->rev
!= PCI_ANY_ID
) {
16208 if (bridge
->revision
> pci_id
->rev
)
16211 if (bridge
->subordinate
&&
16212 (bridge
->subordinate
->number
==
16213 tp
->pdev
->bus
->number
)) {
16214 tg3_flag_set(tp
, ICH_WORKAROUND
);
16215 pci_dev_put(bridge
);
16221 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16222 static struct tg3_dev_id
{
16225 } bridge_chipsets
[] = {
16226 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16227 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16230 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16231 struct pci_dev
*bridge
= NULL
;
16233 while (pci_id
->vendor
!= 0) {
16234 bridge
= pci_get_device(pci_id
->vendor
,
16241 if (bridge
->subordinate
&&
16242 (bridge
->subordinate
->number
<=
16243 tp
->pdev
->bus
->number
) &&
16244 (bridge
->subordinate
->busn_res
.end
>=
16245 tp
->pdev
->bus
->number
)) {
16246 tg3_flag_set(tp
, 5701_DMA_BUG
);
16247 pci_dev_put(bridge
);
16253 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16254 * DMA addresses > 40-bit. This bridge may have other additional
16255 * 57xx devices behind it in some 4-port NIC designs for example.
16256 * Any tg3 device found behind the bridge will also need the 40-bit
16259 if (tg3_flag(tp
, 5780_CLASS
)) {
16260 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16261 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16263 struct pci_dev
*bridge
= NULL
;
16266 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16267 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16269 if (bridge
&& bridge
->subordinate
&&
16270 (bridge
->subordinate
->number
<=
16271 tp
->pdev
->bus
->number
) &&
16272 (bridge
->subordinate
->busn_res
.end
>=
16273 tp
->pdev
->bus
->number
)) {
16274 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16275 pci_dev_put(bridge
);
16281 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16282 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16283 tp
->pdev_peer
= tg3_find_peer(tp
);
16285 /* Determine TSO capabilities */
16286 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16287 ; /* Do nothing. HW bug. */
16288 else if (tg3_flag(tp
, 57765_PLUS
))
16289 tg3_flag_set(tp
, HW_TSO_3
);
16290 else if (tg3_flag(tp
, 5755_PLUS
) ||
16291 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16292 tg3_flag_set(tp
, HW_TSO_2
);
16293 else if (tg3_flag(tp
, 5750_PLUS
)) {
16294 tg3_flag_set(tp
, HW_TSO_1
);
16295 tg3_flag_set(tp
, TSO_BUG
);
16296 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16297 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16298 tg3_flag_clear(tp
, TSO_BUG
);
16299 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16300 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16301 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16302 tg3_flag_set(tp
, FW_TSO
);
16303 tg3_flag_set(tp
, TSO_BUG
);
16304 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16305 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16307 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16310 /* Selectively allow TSO based on operating conditions */
16311 if (tg3_flag(tp
, HW_TSO_1
) ||
16312 tg3_flag(tp
, HW_TSO_2
) ||
16313 tg3_flag(tp
, HW_TSO_3
) ||
16314 tg3_flag(tp
, FW_TSO
)) {
16315 /* For firmware TSO, assume ASF is disabled.
16316 * We'll disable TSO later if we discover ASF
16317 * is enabled in tg3_get_eeprom_hw_cfg().
16319 tg3_flag_set(tp
, TSO_CAPABLE
);
16321 tg3_flag_clear(tp
, TSO_CAPABLE
);
16322 tg3_flag_clear(tp
, TSO_BUG
);
16323 tp
->fw_needed
= NULL
;
16326 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16327 tp
->fw_needed
= FIRMWARE_TG3
;
16329 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16330 tp
->fw_needed
= FIRMWARE_TG357766
;
16334 if (tg3_flag(tp
, 5750_PLUS
)) {
16335 tg3_flag_set(tp
, SUPPORT_MSI
);
16336 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16337 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16338 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16339 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16340 tp
->pdev_peer
== tp
->pdev
))
16341 tg3_flag_clear(tp
, SUPPORT_MSI
);
16343 if (tg3_flag(tp
, 5755_PLUS
) ||
16344 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16345 tg3_flag_set(tp
, 1SHOT_MSI
);
16348 if (tg3_flag(tp
, 57765_PLUS
)) {
16349 tg3_flag_set(tp
, SUPPORT_MSIX
);
16350 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16356 if (tp
->irq_max
> 1) {
16357 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16358 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16360 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16361 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16362 tp
->txq_max
= tp
->irq_max
- 1;
16365 if (tg3_flag(tp
, 5755_PLUS
) ||
16366 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16367 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16369 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16370 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16372 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16373 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16374 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16375 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16376 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16378 if (tg3_flag(tp
, 57765_PLUS
) &&
16379 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16380 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16382 if (!tg3_flag(tp
, 5705_PLUS
) ||
16383 tg3_flag(tp
, 5780_CLASS
) ||
16384 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16385 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16387 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16390 if (pci_is_pcie(tp
->pdev
)) {
16393 tg3_flag_set(tp
, PCI_EXPRESS
);
16395 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16396 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16397 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16398 tg3_flag_clear(tp
, HW_TSO_2
);
16399 tg3_flag_clear(tp
, TSO_CAPABLE
);
16401 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16402 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16403 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16404 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16405 tg3_flag_set(tp
, CLKREQ_BUG
);
16406 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16407 tg3_flag_set(tp
, L1PLLPD_EN
);
16409 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16410 /* BCM5785 devices are effectively PCIe devices, and should
16411 * follow PCIe codepaths, but do not have a PCIe capabilities
16414 tg3_flag_set(tp
, PCI_EXPRESS
);
16415 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16416 tg3_flag(tp
, 5780_CLASS
)) {
16417 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16418 if (!tp
->pcix_cap
) {
16419 dev_err(&tp
->pdev
->dev
,
16420 "Cannot find PCI-X capability, aborting\n");
16424 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16425 tg3_flag_set(tp
, PCIX_MODE
);
16428 /* If we have an AMD 762 or VIA K8T800 chipset, write
16429 * reordering to the mailbox registers done by the host
16430 * controller can cause major troubles. We read back from
16431 * every mailbox register write to force the writes to be
16432 * posted to the chip in order.
16434 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16435 !tg3_flag(tp
, PCI_EXPRESS
))
16436 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16438 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16439 &tp
->pci_cacheline_sz
);
16440 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16441 &tp
->pci_lat_timer
);
16442 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16443 tp
->pci_lat_timer
< 64) {
16444 tp
->pci_lat_timer
= 64;
16445 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16446 tp
->pci_lat_timer
);
16449 /* Important! -- It is critical that the PCI-X hw workaround
16450 * situation is decided before the first MMIO register access.
16452 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16453 /* 5700 BX chips need to have their TX producer index
16454 * mailboxes written twice to workaround a bug.
16456 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16458 /* If we are in PCI-X mode, enable register write workaround.
16460 * The workaround is to use indirect register accesses
16461 * for all chip writes not to mailbox registers.
16463 if (tg3_flag(tp
, PCIX_MODE
)) {
16466 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16468 /* The chip can have it's power management PCI config
16469 * space registers clobbered due to this bug.
16470 * So explicitly force the chip into D0 here.
16472 pci_read_config_dword(tp
->pdev
,
16473 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16475 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16476 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16477 pci_write_config_dword(tp
->pdev
,
16478 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16481 /* Also, force SERR#/PERR# in PCI command. */
16482 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16483 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16484 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16488 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16489 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16490 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16491 tg3_flag_set(tp
, PCI_32BIT
);
16493 /* Chip-specific fixup from Broadcom driver */
16494 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16495 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16496 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16497 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16500 /* Default fast path register access methods */
16501 tp
->read32
= tg3_read32
;
16502 tp
->write32
= tg3_write32
;
16503 tp
->read32_mbox
= tg3_read32
;
16504 tp
->write32_mbox
= tg3_write32
;
16505 tp
->write32_tx_mbox
= tg3_write32
;
16506 tp
->write32_rx_mbox
= tg3_write32
;
16508 /* Various workaround register access methods */
16509 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16510 tp
->write32
= tg3_write_indirect_reg32
;
16511 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16512 (tg3_flag(tp
, PCI_EXPRESS
) &&
16513 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16515 * Back to back register writes can cause problems on these
16516 * chips, the workaround is to read back all reg writes
16517 * except those to mailbox regs.
16519 * See tg3_write_indirect_reg32().
16521 tp
->write32
= tg3_write_flush_reg32
;
16524 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16525 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16526 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16527 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16530 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16531 tp
->read32
= tg3_read_indirect_reg32
;
16532 tp
->write32
= tg3_write_indirect_reg32
;
16533 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16534 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16535 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16536 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16541 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16542 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16543 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16545 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16546 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16547 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16548 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16549 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16552 if (tp
->write32
== tg3_write_indirect_reg32
||
16553 (tg3_flag(tp
, PCIX_MODE
) &&
16554 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16555 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16556 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16558 /* The memory arbiter has to be enabled in order for SRAM accesses
16559 * to succeed. Normally on powerup the tg3 chip firmware will make
16560 * sure it is enabled, but other entities such as system netboot
16561 * code might disable it.
16563 val
= tr32(MEMARB_MODE
);
16564 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16566 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16567 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16568 tg3_flag(tp
, 5780_CLASS
)) {
16569 if (tg3_flag(tp
, PCIX_MODE
)) {
16570 pci_read_config_dword(tp
->pdev
,
16571 tp
->pcix_cap
+ PCI_X_STATUS
,
16573 tp
->pci_fn
= val
& 0x7;
16575 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16576 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16577 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16578 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16579 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16580 val
= tr32(TG3_CPMU_STATUS
);
16582 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16583 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16585 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16586 TG3_CPMU_STATUS_FSHFT_5719
;
16589 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16590 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16591 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16594 /* Get eeprom hw config before calling tg3_set_power_state().
16595 * In particular, the TG3_FLAG_IS_NIC flag must be
16596 * determined before calling tg3_set_power_state() so that
16597 * we know whether or not to switch out of Vaux power.
16598 * When the flag is set, it means that GPIO1 is used for eeprom
16599 * write protect and also implies that it is a LOM where GPIOs
16600 * are not used to switch power.
16602 tg3_get_eeprom_hw_cfg(tp
);
16604 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16605 tg3_flag_clear(tp
, TSO_CAPABLE
);
16606 tg3_flag_clear(tp
, TSO_BUG
);
16607 tp
->fw_needed
= NULL
;
16610 if (tg3_flag(tp
, ENABLE_APE
)) {
16611 /* Allow reads and writes to the
16612 * APE register and memory space.
16614 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16615 PCISTATE_ALLOW_APE_SHMEM_WR
|
16616 PCISTATE_ALLOW_APE_PSPACE_WR
;
16617 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16620 tg3_ape_lock_init(tp
);
16623 /* Set up tp->grc_local_ctrl before calling
16624 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16625 * will bring 5700's external PHY out of reset.
16626 * It is also used as eeprom write protect on LOMs.
16628 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16629 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16630 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16631 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16632 GRC_LCLCTRL_GPIO_OUTPUT1
);
16633 /* Unused GPIO3 must be driven as output on 5752 because there
16634 * are no pull-up resistors on unused GPIO pins.
16636 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16637 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16639 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16640 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16641 tg3_flag(tp
, 57765_CLASS
))
16642 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16644 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16645 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16646 /* Turn off the debug UART. */
16647 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16648 if (tg3_flag(tp
, IS_NIC
))
16649 /* Keep VMain power. */
16650 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16651 GRC_LCLCTRL_GPIO_OUTPUT0
;
16654 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16655 tp
->grc_local_ctrl
|=
16656 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16658 /* Switch out of Vaux if it is a NIC */
16659 tg3_pwrsrc_switch_to_vmain(tp
);
16661 /* Derive initial jumbo mode from MTU assigned in
16662 * ether_setup() via the alloc_etherdev() call
16664 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16665 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16667 /* Determine WakeOnLan speed to use. */
16668 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16669 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16670 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16671 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16672 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16674 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16677 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16678 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16680 /* A few boards don't want Ethernet@WireSpeed phy feature */
16681 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16682 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16683 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16684 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16685 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16686 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16687 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16689 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16690 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16691 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16692 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16693 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16695 if (tg3_flag(tp
, 5705_PLUS
) &&
16696 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16697 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16698 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16699 !tg3_flag(tp
, 57765_PLUS
)) {
16700 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16701 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16702 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16703 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16704 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16705 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16706 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16707 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16708 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16710 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16713 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16714 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16715 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16716 if (tp
->phy_otp
== 0)
16717 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16720 if (tg3_flag(tp
, CPMU_PRESENT
))
16721 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16723 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16725 tp
->coalesce_mode
= 0;
16726 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16727 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16728 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16730 /* Set these bits to enable statistics workaround. */
16731 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16732 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16733 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16734 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16735 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16736 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16739 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16740 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16741 tg3_flag_set(tp
, USE_PHYLIB
);
16743 err
= tg3_mdio_init(tp
);
16747 /* Initialize data/descriptor byte/word swapping. */
16748 val
= tr32(GRC_MODE
);
16749 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16750 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16751 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16752 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16753 GRC_MODE_B2HRX_ENABLE
|
16754 GRC_MODE_HTX2B_ENABLE
|
16755 GRC_MODE_HOST_STACKUP
);
16757 val
&= GRC_MODE_HOST_STACKUP
;
16759 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16761 tg3_switch_clocks(tp
);
16763 /* Clear this out for sanity. */
16764 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16766 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16767 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16769 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16771 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16772 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16773 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16774 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16775 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16776 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16777 void __iomem
*sram_base
;
16779 /* Write some dummy words into the SRAM status block
16780 * area, see if it reads back correctly. If the return
16781 * value is bad, force enable the PCIX workaround.
16783 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16785 writel(0x00000000, sram_base
);
16786 writel(0x00000000, sram_base
+ 4);
16787 writel(0xffffffff, sram_base
+ 4);
16788 if (readl(sram_base
) != 0x00000000)
16789 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16794 tg3_nvram_init(tp
);
16796 /* If the device has an NVRAM, no need to load patch firmware */
16797 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16798 !tg3_flag(tp
, NO_NVRAM
))
16799 tp
->fw_needed
= NULL
;
16801 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16802 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16804 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16805 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16806 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16807 tg3_flag_set(tp
, IS_5788
);
16809 if (!tg3_flag(tp
, IS_5788
) &&
16810 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16811 tg3_flag_set(tp
, TAGGED_STATUS
);
16812 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16813 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16814 HOSTCC_MODE_CLRTICK_TXBD
);
16816 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16817 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16818 tp
->misc_host_ctrl
);
16821 /* Preserve the APE MAC_MODE bits */
16822 if (tg3_flag(tp
, ENABLE_APE
))
16823 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16827 if (tg3_10_100_only_device(tp
, ent
))
16828 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16830 err
= tg3_phy_probe(tp
);
16832 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16833 /* ... but do not return immediately ... */
16838 tg3_read_fw_ver(tp
);
16840 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16841 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16843 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16844 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16846 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16849 /* 5700 {AX,BX} chips have a broken status block link
16850 * change bit implementation, so we must use the
16851 * status register in those cases.
16853 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16854 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16856 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16858 /* The led_ctrl is set during tg3_phy_probe, here we might
16859 * have to force the link status polling mechanism based
16860 * upon subsystem IDs.
16862 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16863 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16864 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16865 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16866 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16869 /* For all SERDES we poll the MAC status register. */
16870 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16871 tg3_flag_set(tp
, POLL_SERDES
);
16873 tg3_flag_clear(tp
, POLL_SERDES
);
16875 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16876 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16878 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16879 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16880 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16881 tg3_flag(tp
, PCIX_MODE
)) {
16882 tp
->rx_offset
= NET_SKB_PAD
;
16883 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16884 tp
->rx_copy_thresh
= ~(u16
)0;
16888 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16889 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16890 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16892 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16894 /* Increment the rx prod index on the rx std ring by at most
16895 * 8 for these chips to workaround hw errata.
16897 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16898 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16899 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16900 tp
->rx_std_max_post
= 8;
16902 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16903 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16904 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16909 #ifdef CONFIG_SPARC
16910 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16912 struct net_device
*dev
= tp
->dev
;
16913 struct pci_dev
*pdev
= tp
->pdev
;
16914 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16915 const unsigned char *addr
;
16918 addr
= of_get_property(dp
, "local-mac-address", &len
);
16919 if (addr
&& len
== ETH_ALEN
) {
16920 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
16926 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16928 struct net_device
*dev
= tp
->dev
;
16930 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, ETH_ALEN
);
16935 static int tg3_get_device_address(struct tg3
*tp
)
16937 struct net_device
*dev
= tp
->dev
;
16938 u32 hi
, lo
, mac_offset
;
16942 #ifdef CONFIG_SPARC
16943 if (!tg3_get_macaddr_sparc(tp
))
16947 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16948 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16949 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16954 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16955 tg3_flag(tp
, 5780_CLASS
)) {
16956 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16958 if (tg3_nvram_lock(tp
))
16959 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16961 tg3_nvram_unlock(tp
);
16962 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16963 if (tp
->pci_fn
& 1)
16965 if (tp
->pci_fn
> 1)
16966 mac_offset
+= 0x18c;
16967 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16970 /* First try to get it from MAC address mailbox. */
16971 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16972 if ((hi
>> 16) == 0x484b) {
16973 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16974 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16976 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16977 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16978 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16979 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16980 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16982 /* Some old bootcode may report a 0 MAC address in SRAM */
16983 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16986 /* Next, try NVRAM. */
16987 if (!tg3_flag(tp
, NO_NVRAM
) &&
16988 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16989 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16990 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16991 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16993 /* Finally just fetch it out of the MAC control regs. */
16995 hi
= tr32(MAC_ADDR_0_HIGH
);
16996 lo
= tr32(MAC_ADDR_0_LOW
);
16998 dev
->dev_addr
[5] = lo
& 0xff;
16999 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17000 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17001 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17002 dev
->dev_addr
[1] = hi
& 0xff;
17003 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17007 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
17008 #ifdef CONFIG_SPARC
17009 if (!tg3_get_default_macaddr_sparc(tp
))
17017 #define BOUNDARY_SINGLE_CACHELINE 1
17018 #define BOUNDARY_MULTI_CACHELINE 2
17020 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17022 int cacheline_size
;
17026 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17028 cacheline_size
= 1024;
17030 cacheline_size
= (int) byte
* 4;
17032 /* On 5703 and later chips, the boundary bits have no
17035 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17036 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17037 !tg3_flag(tp
, PCI_EXPRESS
))
17040 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17041 goal
= BOUNDARY_MULTI_CACHELINE
;
17043 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17044 goal
= BOUNDARY_SINGLE_CACHELINE
;
17050 if (tg3_flag(tp
, 57765_PLUS
)) {
17051 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17058 /* PCI controllers on most RISC systems tend to disconnect
17059 * when a device tries to burst across a cache-line boundary.
17060 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17062 * Unfortunately, for PCI-E there are only limited
17063 * write-side controls for this, and thus for reads
17064 * we will still get the disconnects. We'll also waste
17065 * these PCI cycles for both read and write for chips
17066 * other than 5700 and 5701 which do not implement the
17069 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17070 switch (cacheline_size
) {
17075 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17076 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17077 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17079 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17080 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17085 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17086 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17090 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17091 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17094 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17095 switch (cacheline_size
) {
17099 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17100 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17101 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17107 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17108 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17112 switch (cacheline_size
) {
17114 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17115 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17116 DMA_RWCTRL_WRITE_BNDRY_16
);
17121 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17122 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17123 DMA_RWCTRL_WRITE_BNDRY_32
);
17128 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17129 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17130 DMA_RWCTRL_WRITE_BNDRY_64
);
17135 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17136 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17137 DMA_RWCTRL_WRITE_BNDRY_128
);
17142 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17143 DMA_RWCTRL_WRITE_BNDRY_256
);
17146 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17147 DMA_RWCTRL_WRITE_BNDRY_512
);
17151 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17152 DMA_RWCTRL_WRITE_BNDRY_1024
);
17161 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17162 int size
, bool to_device
)
17164 struct tg3_internal_buffer_desc test_desc
;
17165 u32 sram_dma_descs
;
17168 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17170 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17171 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17172 tw32(RDMAC_STATUS
, 0);
17173 tw32(WDMAC_STATUS
, 0);
17175 tw32(BUFMGR_MODE
, 0);
17176 tw32(FTQ_RESET
, 0);
17178 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17179 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17180 test_desc
.nic_mbuf
= 0x00002100;
17181 test_desc
.len
= size
;
17184 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17185 * the *second* time the tg3 driver was getting loaded after an
17188 * Broadcom tells me:
17189 * ...the DMA engine is connected to the GRC block and a DMA
17190 * reset may affect the GRC block in some unpredictable way...
17191 * The behavior of resets to individual blocks has not been tested.
17193 * Broadcom noted the GRC reset will also reset all sub-components.
17196 test_desc
.cqid_sqid
= (13 << 8) | 2;
17198 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17201 test_desc
.cqid_sqid
= (16 << 8) | 7;
17203 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17206 test_desc
.flags
= 0x00000005;
17208 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17211 val
= *(((u32
*)&test_desc
) + i
);
17212 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17213 sram_dma_descs
+ (i
* sizeof(u32
)));
17214 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17216 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17219 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17221 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17224 for (i
= 0; i
< 40; i
++) {
17228 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17230 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17231 if ((val
& 0xffff) == sram_dma_descs
) {
17242 #define TEST_BUFFER_SIZE 0x2000
17244 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17245 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17249 static int tg3_test_dma(struct tg3
*tp
)
17251 dma_addr_t buf_dma
;
17252 u32
*buf
, saved_dma_rwctrl
;
17255 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17256 &buf_dma
, GFP_KERNEL
);
17262 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17263 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17265 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17267 if (tg3_flag(tp
, 57765_PLUS
))
17270 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17271 /* DMA read watermark not used on PCIE */
17272 tp
->dma_rwctrl
|= 0x00180000;
17273 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17274 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17275 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17276 tp
->dma_rwctrl
|= 0x003f0000;
17278 tp
->dma_rwctrl
|= 0x003f000f;
17280 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17281 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17282 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17283 u32 read_water
= 0x7;
17285 /* If the 5704 is behind the EPB bridge, we can
17286 * do the less restrictive ONE_DMA workaround for
17287 * better performance.
17289 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17290 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17291 tp
->dma_rwctrl
|= 0x8000;
17292 else if (ccval
== 0x6 || ccval
== 0x7)
17293 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17295 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17297 /* Set bit 23 to enable PCIX hw bug fix */
17299 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17300 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17302 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17303 /* 5780 always in PCIX mode */
17304 tp
->dma_rwctrl
|= 0x00144000;
17305 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17306 /* 5714 always in PCIX mode */
17307 tp
->dma_rwctrl
|= 0x00148000;
17309 tp
->dma_rwctrl
|= 0x001b000f;
17312 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17313 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17315 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17316 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17317 tp
->dma_rwctrl
&= 0xfffffff0;
17319 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17320 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17321 /* Remove this if it causes problems for some boards. */
17322 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17324 /* On 5700/5701 chips, we need to set this bit.
17325 * Otherwise the chip will issue cacheline transactions
17326 * to streamable DMA memory with not all the byte
17327 * enables turned on. This is an error on several
17328 * RISC PCI controllers, in particular sparc64.
17330 * On 5703/5704 chips, this bit has been reassigned
17331 * a different meaning. In particular, it is used
17332 * on those chips to enable a PCI-X workaround.
17334 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17337 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17340 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17341 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17344 /* It is best to perform DMA test with maximum write burst size
17345 * to expose the 5700/5701 write DMA bug.
17347 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17348 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17349 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17354 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17357 /* Send the buffer to the chip. */
17358 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17360 dev_err(&tp
->pdev
->dev
,
17361 "%s: Buffer write failed. err = %d\n",
17366 /* Now read it back. */
17367 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17369 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17370 "err = %d\n", __func__
, ret
);
17375 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17379 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17380 DMA_RWCTRL_WRITE_BNDRY_16
) {
17381 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17382 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17383 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17386 dev_err(&tp
->pdev
->dev
,
17387 "%s: Buffer corrupted on read back! "
17388 "(%d != %d)\n", __func__
, p
[i
], i
);
17394 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17400 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17401 DMA_RWCTRL_WRITE_BNDRY_16
) {
17402 /* DMA test passed without adjusting DMA boundary,
17403 * now look for chipsets that are known to expose the
17404 * DMA bug without failing the test.
17406 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17407 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17408 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17410 /* Safe to use the calculated DMA boundary. */
17411 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17414 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17418 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17423 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17425 if (tg3_flag(tp
, 57765_PLUS
)) {
17426 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17427 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17428 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17429 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17430 tp
->bufmgr_config
.mbuf_high_water
=
17431 DEFAULT_MB_HIGH_WATER_57765
;
17433 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17434 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17435 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17436 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17437 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17438 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17439 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17440 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17441 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17442 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17443 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17444 tp
->bufmgr_config
.mbuf_high_water
=
17445 DEFAULT_MB_HIGH_WATER_5705
;
17446 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17447 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17448 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17449 tp
->bufmgr_config
.mbuf_high_water
=
17450 DEFAULT_MB_HIGH_WATER_5906
;
17453 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17454 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17455 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17456 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17457 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17458 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17460 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17461 DEFAULT_MB_RDMA_LOW_WATER
;
17462 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17463 DEFAULT_MB_MACRX_LOW_WATER
;
17464 tp
->bufmgr_config
.mbuf_high_water
=
17465 DEFAULT_MB_HIGH_WATER
;
17467 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17468 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17469 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17470 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17471 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17472 DEFAULT_MB_HIGH_WATER_JUMBO
;
17475 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17476 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17479 static char *tg3_phy_string(struct tg3
*tp
)
17481 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17482 case TG3_PHY_ID_BCM5400
: return "5400";
17483 case TG3_PHY_ID_BCM5401
: return "5401";
17484 case TG3_PHY_ID_BCM5411
: return "5411";
17485 case TG3_PHY_ID_BCM5701
: return "5701";
17486 case TG3_PHY_ID_BCM5703
: return "5703";
17487 case TG3_PHY_ID_BCM5704
: return "5704";
17488 case TG3_PHY_ID_BCM5705
: return "5705";
17489 case TG3_PHY_ID_BCM5750
: return "5750";
17490 case TG3_PHY_ID_BCM5752
: return "5752";
17491 case TG3_PHY_ID_BCM5714
: return "5714";
17492 case TG3_PHY_ID_BCM5780
: return "5780";
17493 case TG3_PHY_ID_BCM5755
: return "5755";
17494 case TG3_PHY_ID_BCM5787
: return "5787";
17495 case TG3_PHY_ID_BCM5784
: return "5784";
17496 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17497 case TG3_PHY_ID_BCM5906
: return "5906";
17498 case TG3_PHY_ID_BCM5761
: return "5761";
17499 case TG3_PHY_ID_BCM5718C
: return "5718C";
17500 case TG3_PHY_ID_BCM5718S
: return "5718S";
17501 case TG3_PHY_ID_BCM57765
: return "57765";
17502 case TG3_PHY_ID_BCM5719C
: return "5719C";
17503 case TG3_PHY_ID_BCM5720C
: return "5720C";
17504 case TG3_PHY_ID_BCM5762
: return "5762C";
17505 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17506 case 0: return "serdes";
17507 default: return "unknown";
17511 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17513 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17514 strcpy(str
, "PCI Express");
17516 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17517 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17519 strcpy(str
, "PCIX:");
17521 if ((clock_ctrl
== 7) ||
17522 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17523 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17524 strcat(str
, "133MHz");
17525 else if (clock_ctrl
== 0)
17526 strcat(str
, "33MHz");
17527 else if (clock_ctrl
== 2)
17528 strcat(str
, "50MHz");
17529 else if (clock_ctrl
== 4)
17530 strcat(str
, "66MHz");
17531 else if (clock_ctrl
== 6)
17532 strcat(str
, "100MHz");
17534 strcpy(str
, "PCI:");
17535 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17536 strcat(str
, "66MHz");
17538 strcat(str
, "33MHz");
17540 if (tg3_flag(tp
, PCI_32BIT
))
17541 strcat(str
, ":32-bit");
17543 strcat(str
, ":64-bit");
17547 static void tg3_init_coal(struct tg3
*tp
)
17549 struct ethtool_coalesce
*ec
= &tp
->coal
;
17551 memset(ec
, 0, sizeof(*ec
));
17552 ec
->cmd
= ETHTOOL_GCOALESCE
;
17553 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17554 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17555 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17556 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17557 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17558 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17559 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17560 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17561 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17563 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17564 HOSTCC_MODE_CLRTICK_TXBD
)) {
17565 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17566 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17567 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17568 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17571 if (tg3_flag(tp
, 5705_PLUS
)) {
17572 ec
->rx_coalesce_usecs_irq
= 0;
17573 ec
->tx_coalesce_usecs_irq
= 0;
17574 ec
->stats_block_coalesce_usecs
= 0;
17578 static int tg3_init_one(struct pci_dev
*pdev
,
17579 const struct pci_device_id
*ent
)
17581 struct net_device
*dev
;
17584 u32 sndmbx
, rcvmbx
, intmbx
;
17586 u64 dma_mask
, persist_dma_mask
;
17587 netdev_features_t features
= 0;
17589 printk_once(KERN_INFO
"%s\n", version
);
17591 err
= pci_enable_device(pdev
);
17593 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17597 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17599 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17600 goto err_out_disable_pdev
;
17603 pci_set_master(pdev
);
17605 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17608 goto err_out_free_res
;
17611 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17613 tp
= netdev_priv(dev
);
17616 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17617 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17619 tp
->pcierr_recovery
= false;
17622 tp
->msg_enable
= tg3_debug
;
17624 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17626 if (pdev_is_ssb_gige_core(pdev
)) {
17627 tg3_flag_set(tp
, IS_SSB_CORE
);
17628 if (ssb_gige_must_flush_posted_writes(pdev
))
17629 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17630 if (ssb_gige_one_dma_at_once(pdev
))
17631 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17632 if (ssb_gige_have_roboswitch(pdev
)) {
17633 tg3_flag_set(tp
, USE_PHYLIB
);
17634 tg3_flag_set(tp
, ROBOSWITCH
);
17636 if (ssb_gige_is_rgmii(pdev
))
17637 tg3_flag_set(tp
, RGMII_MODE
);
17640 /* The word/byte swap controls here control register access byte
17641 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17644 tp
->misc_host_ctrl
=
17645 MISC_HOST_CTRL_MASK_PCI_INT
|
17646 MISC_HOST_CTRL_WORD_SWAP
|
17647 MISC_HOST_CTRL_INDIR_ACCESS
|
17648 MISC_HOST_CTRL_PCISTATE_RW
;
17650 /* The NONFRM (non-frame) byte/word swap controls take effect
17651 * on descriptor entries, anything which isn't packet data.
17653 * The StrongARM chips on the board (one for tx, one for rx)
17654 * are running in big-endian mode.
17656 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17657 GRC_MODE_WSWAP_NONFRM_DATA
);
17658 #ifdef __BIG_ENDIAN
17659 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17661 spin_lock_init(&tp
->lock
);
17662 spin_lock_init(&tp
->indirect_lock
);
17663 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17665 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17667 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17669 goto err_out_free_dev
;
17672 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17673 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17674 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17675 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17676 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17677 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17678 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17679 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17680 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17681 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17682 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17683 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17684 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17685 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17686 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17687 tg3_flag_set(tp
, ENABLE_APE
);
17688 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17689 if (!tp
->aperegs
) {
17690 dev_err(&pdev
->dev
,
17691 "Cannot map APE registers, aborting\n");
17693 goto err_out_iounmap
;
17697 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17698 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17700 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17701 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17702 dev
->netdev_ops
= &tg3_netdev_ops
;
17703 dev
->irq
= pdev
->irq
;
17705 err
= tg3_get_invariants(tp
, ent
);
17707 dev_err(&pdev
->dev
,
17708 "Problem fetching invariants of chip, aborting\n");
17709 goto err_out_apeunmap
;
17712 /* The EPB bridge inside 5714, 5715, and 5780 and any
17713 * device behind the EPB cannot support DMA addresses > 40-bit.
17714 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17715 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17716 * do DMA address check in tg3_start_xmit().
17718 if (tg3_flag(tp
, IS_5788
))
17719 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17720 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17721 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17722 #ifdef CONFIG_HIGHMEM
17723 dma_mask
= DMA_BIT_MASK(64);
17726 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17728 /* Configure DMA attributes. */
17729 if (dma_mask
> DMA_BIT_MASK(32)) {
17730 err
= pci_set_dma_mask(pdev
, dma_mask
);
17732 features
|= NETIF_F_HIGHDMA
;
17733 err
= pci_set_consistent_dma_mask(pdev
,
17736 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17737 "DMA for consistent allocations\n");
17738 goto err_out_apeunmap
;
17742 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17743 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17745 dev_err(&pdev
->dev
,
17746 "No usable DMA configuration, aborting\n");
17747 goto err_out_apeunmap
;
17751 tg3_init_bufmgr_config(tp
);
17753 /* 5700 B0 chips do not support checksumming correctly due
17754 * to hardware bugs.
17756 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17757 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17759 if (tg3_flag(tp
, 5755_PLUS
))
17760 features
|= NETIF_F_IPV6_CSUM
;
17763 /* TSO is on by default on chips that support hardware TSO.
17764 * Firmware TSO on older chips gives lower performance, so it
17765 * is off by default, but can be enabled using ethtool.
17767 if ((tg3_flag(tp
, HW_TSO_1
) ||
17768 tg3_flag(tp
, HW_TSO_2
) ||
17769 tg3_flag(tp
, HW_TSO_3
)) &&
17770 (features
& NETIF_F_IP_CSUM
))
17771 features
|= NETIF_F_TSO
;
17772 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17773 if (features
& NETIF_F_IPV6_CSUM
)
17774 features
|= NETIF_F_TSO6
;
17775 if (tg3_flag(tp
, HW_TSO_3
) ||
17776 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17777 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17778 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17779 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17780 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17781 features
|= NETIF_F_TSO_ECN
;
17784 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17785 NETIF_F_HW_VLAN_CTAG_RX
;
17786 dev
->vlan_features
|= features
;
17789 * Add loopback capability only for a subset of devices that support
17790 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17791 * loopback for the remaining devices.
17793 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17794 !tg3_flag(tp
, CPMU_PRESENT
))
17795 /* Add the loopback capability */
17796 features
|= NETIF_F_LOOPBACK
;
17798 dev
->hw_features
|= features
;
17799 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17801 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17802 dev
->min_mtu
= TG3_MIN_MTU
;
17803 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17805 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17806 !tg3_flag(tp
, TSO_CAPABLE
) &&
17807 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17808 tg3_flag_set(tp
, MAX_RXPEND_64
);
17809 tp
->rx_pending
= 63;
17812 err
= tg3_get_device_address(tp
);
17814 dev_err(&pdev
->dev
,
17815 "Could not obtain valid ethernet address, aborting\n");
17816 goto err_out_apeunmap
;
17819 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17820 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17821 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17822 for (i
= 0; i
< tp
->irq_max
; i
++) {
17823 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17826 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17828 tnapi
->int_mbox
= intmbx
;
17834 tnapi
->consmbox
= rcvmbx
;
17835 tnapi
->prodmbox
= sndmbx
;
17838 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17840 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17842 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17846 * If we support MSIX, we'll be using RSS. If we're using
17847 * RSS, the first vector only handles link interrupts and the
17848 * remaining vectors handle rx and tx interrupts. Reuse the
17849 * mailbox values for the next iteration. The values we setup
17850 * above are still useful for the single vectored mode.
17864 * Reset chip in case UNDI or EFI driver did not shutdown
17865 * DMA self test will enable WDMAC and we'll see (spurious)
17866 * pending DMA on the PCI bus at that point.
17868 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17869 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17870 tg3_full_lock(tp
, 0);
17871 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17872 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17873 tg3_full_unlock(tp
);
17876 err
= tg3_test_dma(tp
);
17878 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17879 goto err_out_apeunmap
;
17884 pci_set_drvdata(pdev
, dev
);
17886 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17887 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17888 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17889 tg3_flag_set(tp
, PTP_CAPABLE
);
17891 tg3_timer_init(tp
);
17893 tg3_carrier_off(tp
);
17895 err
= register_netdev(dev
);
17897 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17898 goto err_out_apeunmap
;
17901 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17903 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17905 if (IS_ERR(tp
->ptp_clock
))
17906 tp
->ptp_clock
= NULL
;
17909 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17910 tp
->board_part_number
,
17911 tg3_chip_rev_id(tp
),
17912 tg3_bus_string(tp
, str
),
17915 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
17918 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17919 ethtype
= "10/100Base-TX";
17920 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17921 ethtype
= "1000Base-SX";
17923 ethtype
= "10/100/1000Base-T";
17925 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17926 "(WireSpeed[%d], EEE[%d])\n",
17927 tg3_phy_string(tp
), ethtype
,
17928 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17929 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17932 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17933 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17934 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17935 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17936 tg3_flag(tp
, ENABLE_ASF
) != 0,
17937 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17938 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17940 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17941 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17943 pci_save_state(pdev
);
17949 iounmap(tp
->aperegs
);
17950 tp
->aperegs
= NULL
;
17963 pci_release_regions(pdev
);
17965 err_out_disable_pdev
:
17966 if (pci_is_enabled(pdev
))
17967 pci_disable_device(pdev
);
17971 static void tg3_remove_one(struct pci_dev
*pdev
)
17973 struct net_device
*dev
= pci_get_drvdata(pdev
);
17976 struct tg3
*tp
= netdev_priv(dev
);
17980 release_firmware(tp
->fw
);
17982 tg3_reset_task_cancel(tp
);
17984 if (tg3_flag(tp
, USE_PHYLIB
)) {
17989 unregister_netdev(dev
);
17991 iounmap(tp
->aperegs
);
17992 tp
->aperegs
= NULL
;
17999 pci_release_regions(pdev
);
18000 pci_disable_device(pdev
);
18004 #ifdef CONFIG_PM_SLEEP
18005 static int tg3_suspend(struct device
*device
)
18007 struct pci_dev
*pdev
= to_pci_dev(device
);
18008 struct net_device
*dev
= pci_get_drvdata(pdev
);
18009 struct tg3
*tp
= netdev_priv(dev
);
18014 if (!netif_running(dev
))
18017 tg3_reset_task_cancel(tp
);
18019 tg3_netif_stop(tp
);
18021 tg3_timer_stop(tp
);
18023 tg3_full_lock(tp
, 1);
18024 tg3_disable_ints(tp
);
18025 tg3_full_unlock(tp
);
18027 netif_device_detach(dev
);
18029 tg3_full_lock(tp
, 0);
18030 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18031 tg3_flag_clear(tp
, INIT_COMPLETE
);
18032 tg3_full_unlock(tp
);
18034 err
= tg3_power_down_prepare(tp
);
18038 tg3_full_lock(tp
, 0);
18040 tg3_flag_set(tp
, INIT_COMPLETE
);
18041 err2
= tg3_restart_hw(tp
, true);
18045 tg3_timer_start(tp
);
18047 netif_device_attach(dev
);
18048 tg3_netif_start(tp
);
18051 tg3_full_unlock(tp
);
18062 static int tg3_resume(struct device
*device
)
18064 struct pci_dev
*pdev
= to_pci_dev(device
);
18065 struct net_device
*dev
= pci_get_drvdata(pdev
);
18066 struct tg3
*tp
= netdev_priv(dev
);
18071 if (!netif_running(dev
))
18074 netif_device_attach(dev
);
18076 tg3_full_lock(tp
, 0);
18078 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18080 tg3_flag_set(tp
, INIT_COMPLETE
);
18081 err
= tg3_restart_hw(tp
,
18082 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18086 tg3_timer_start(tp
);
18088 tg3_netif_start(tp
);
18091 tg3_full_unlock(tp
);
18100 #endif /* CONFIG_PM_SLEEP */
18102 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18104 static void tg3_shutdown(struct pci_dev
*pdev
)
18106 struct net_device
*dev
= pci_get_drvdata(pdev
);
18107 struct tg3
*tp
= netdev_priv(dev
);
18110 netif_device_detach(dev
);
18112 if (netif_running(dev
))
18115 if (system_state
== SYSTEM_POWER_OFF
)
18116 tg3_power_down(tp
);
18122 * tg3_io_error_detected - called when PCI error is detected
18123 * @pdev: Pointer to PCI device
18124 * @state: The current pci connection state
18126 * This function is called after a PCI bus error affecting
18127 * this device has been detected.
18129 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18130 pci_channel_state_t state
)
18132 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18133 struct tg3
*tp
= netdev_priv(netdev
);
18134 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18136 netdev_info(netdev
, "PCI I/O error detected\n");
18140 /* We probably don't have netdev yet */
18141 if (!netdev
|| !netif_running(netdev
))
18144 /* We needn't recover from permanent error */
18145 if (state
== pci_channel_io_frozen
)
18146 tp
->pcierr_recovery
= true;
18150 tg3_netif_stop(tp
);
18152 tg3_timer_stop(tp
);
18154 /* Want to make sure that the reset task doesn't run */
18155 tg3_reset_task_cancel(tp
);
18157 netif_device_detach(netdev
);
18159 /* Clean up software state, even if MMIO is blocked */
18160 tg3_full_lock(tp
, 0);
18161 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18162 tg3_full_unlock(tp
);
18165 if (state
== pci_channel_io_perm_failure
) {
18167 tg3_napi_enable(tp
);
18170 err
= PCI_ERS_RESULT_DISCONNECT
;
18172 pci_disable_device(pdev
);
18181 * tg3_io_slot_reset - called after the pci bus has been reset.
18182 * @pdev: Pointer to PCI device
18184 * Restart the card from scratch, as if from a cold-boot.
18185 * At this point, the card has exprienced a hard reset,
18186 * followed by fixups by BIOS, and has its config space
18187 * set up identically to what it was at cold boot.
18189 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18191 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18192 struct tg3
*tp
= netdev_priv(netdev
);
18193 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18198 if (pci_enable_device(pdev
)) {
18199 dev_err(&pdev
->dev
,
18200 "Cannot re-enable PCI device after reset.\n");
18204 pci_set_master(pdev
);
18205 pci_restore_state(pdev
);
18206 pci_save_state(pdev
);
18208 if (!netdev
|| !netif_running(netdev
)) {
18209 rc
= PCI_ERS_RESULT_RECOVERED
;
18213 err
= tg3_power_up(tp
);
18217 rc
= PCI_ERS_RESULT_RECOVERED
;
18220 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18221 tg3_napi_enable(tp
);
18230 * tg3_io_resume - called when traffic can start flowing again.
18231 * @pdev: Pointer to PCI device
18233 * This callback is called when the error recovery driver tells
18234 * us that its OK to resume normal operation.
18236 static void tg3_io_resume(struct pci_dev
*pdev
)
18238 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18239 struct tg3
*tp
= netdev_priv(netdev
);
18244 if (!netdev
|| !netif_running(netdev
))
18247 tg3_full_lock(tp
, 0);
18248 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18249 tg3_flag_set(tp
, INIT_COMPLETE
);
18250 err
= tg3_restart_hw(tp
, true);
18252 tg3_full_unlock(tp
);
18253 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18257 netif_device_attach(netdev
);
18259 tg3_timer_start(tp
);
18261 tg3_netif_start(tp
);
18263 tg3_full_unlock(tp
);
18268 tp
->pcierr_recovery
= false;
18272 static const struct pci_error_handlers tg3_err_handler
= {
18273 .error_detected
= tg3_io_error_detected
,
18274 .slot_reset
= tg3_io_slot_reset
,
18275 .resume
= tg3_io_resume
18278 static struct pci_driver tg3_driver
= {
18279 .name
= DRV_MODULE_NAME
,
18280 .id_table
= tg3_pci_tbl
,
18281 .probe
= tg3_init_one
,
18282 .remove
= tg3_remove_one
,
18283 .err_handler
= &tg3_err_handler
,
18284 .driver
.pm
= &tg3_pm_ops
,
18285 .shutdown
= tg3_shutdown
,
18288 module_pci_driver(tg3_driver
);