2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
41 #include <linux/if_vlan.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
52 #include <net/checksum.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
63 #include <asm/idprom.h>
72 /* Functions & macros to verify TG3_FLAGS types */
74 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
76 return test_bit(flag
, bits
);
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
86 clear_bit(flag
, bits
);
89 #define tg3_flag(tp, flag) \
90 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag) \
92 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag) \
94 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define DRV_MODULE_NAME "tg3"
98 #define TG3_MIN_NUM 137
99 #define DRV_MODULE_VERSION \
100 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE "May 11, 2014"
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223 static char version
[] =
224 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION
);
230 MODULE_FIRMWARE(FIRMWARE_TG3
);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
234 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug
, int, 0);
236 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
241 static const struct pci_device_id tg3_pci_tbl
[] = {
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
261 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
262 TG3_DRV_DATA_FLAG_5705_10_100
},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
268 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
269 TG3_DRV_DATA_FLAG_5705_10_100
},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
276 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
282 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
290 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
291 PCI_VENDOR_ID_LENOVO
,
292 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
293 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
296 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
315 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
316 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
317 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
318 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
319 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
324 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
334 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
336 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
355 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
356 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
360 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
362 static const struct {
363 const char string
[ETH_GSTRING_LEN
];
364 } ethtool_stats_keys
[] = {
367 { "rx_ucast_packets" },
368 { "rx_mcast_packets" },
369 { "rx_bcast_packets" },
371 { "rx_align_errors" },
372 { "rx_xon_pause_rcvd" },
373 { "rx_xoff_pause_rcvd" },
374 { "rx_mac_ctrl_rcvd" },
375 { "rx_xoff_entered" },
376 { "rx_frame_too_long_errors" },
378 { "rx_undersize_packets" },
379 { "rx_in_length_errors" },
380 { "rx_out_length_errors" },
381 { "rx_64_or_less_octet_packets" },
382 { "rx_65_to_127_octet_packets" },
383 { "rx_128_to_255_octet_packets" },
384 { "rx_256_to_511_octet_packets" },
385 { "rx_512_to_1023_octet_packets" },
386 { "rx_1024_to_1522_octet_packets" },
387 { "rx_1523_to_2047_octet_packets" },
388 { "rx_2048_to_4095_octet_packets" },
389 { "rx_4096_to_8191_octet_packets" },
390 { "rx_8192_to_9022_octet_packets" },
397 { "tx_flow_control" },
399 { "tx_single_collisions" },
400 { "tx_mult_collisions" },
402 { "tx_excessive_collisions" },
403 { "tx_late_collisions" },
404 { "tx_collide_2times" },
405 { "tx_collide_3times" },
406 { "tx_collide_4times" },
407 { "tx_collide_5times" },
408 { "tx_collide_6times" },
409 { "tx_collide_7times" },
410 { "tx_collide_8times" },
411 { "tx_collide_9times" },
412 { "tx_collide_10times" },
413 { "tx_collide_11times" },
414 { "tx_collide_12times" },
415 { "tx_collide_13times" },
416 { "tx_collide_14times" },
417 { "tx_collide_15times" },
418 { "tx_ucast_packets" },
419 { "tx_mcast_packets" },
420 { "tx_bcast_packets" },
421 { "tx_carrier_sense_errors" },
425 { "dma_writeq_full" },
426 { "dma_write_prioq_full" },
430 { "rx_threshold_hit" },
432 { "dma_readq_full" },
433 { "dma_read_prioq_full" },
434 { "tx_comp_queue_full" },
436 { "ring_set_send_prod_index" },
437 { "ring_status_update" },
439 { "nic_avoided_irqs" },
440 { "nic_tx_threshold_hit" },
442 { "mbuf_lwm_thresh_hit" },
445 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST 0
447 #define TG3_LINK_TEST 1
448 #define TG3_REGISTER_TEST 2
449 #define TG3_MEMORY_TEST 3
450 #define TG3_MAC_LOOPB_TEST 4
451 #define TG3_PHY_LOOPB_TEST 5
452 #define TG3_EXT_LOOPB_TEST 6
453 #define TG3_INTERRUPT_TEST 7
456 static const struct {
457 const char string
[ETH_GSTRING_LEN
];
458 } ethtool_test_keys
[] = {
459 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
460 [TG3_LINK_TEST
] = { "link test (online) " },
461 [TG3_REGISTER_TEST
] = { "register test (offline)" },
462 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
463 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
464 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
465 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
466 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
469 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
472 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->regs
+ off
);
477 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->regs
+ off
);
482 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
484 writel(val
, tp
->aperegs
+ off
);
487 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
489 return readl(tp
->aperegs
+ off
);
492 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
496 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
497 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
498 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
499 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
502 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
504 writel(val
, tp
->regs
+ off
);
505 readl(tp
->regs
+ off
);
508 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
513 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
514 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
515 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
516 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
520 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
524 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
525 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
526 TG3_64BIT_REG_LOW
, val
);
529 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
530 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
531 TG3_64BIT_REG_LOW
, val
);
535 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
536 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
537 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
538 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
540 /* In indirect mode when disabling interrupts, we also need
541 * to clear the interrupt bit in the GRC local ctrl register.
543 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
545 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
546 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
550 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
555 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
556 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
557 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
558 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563 * where it is unsafe to read back the register without some delay.
564 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
567 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
569 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
570 /* Non-posted methods */
571 tp
->write32(tp
, off
, val
);
574 tg3_write32(tp
, off
, val
);
579 /* Wait again after the read for the posted method to guarantee that
580 * the wait time is met.
586 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
588 tp
->write32_mbox(tp
, off
, val
);
589 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
590 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
591 !tg3_flag(tp
, ICH_WORKAROUND
)))
592 tp
->read32_mbox(tp
, off
);
595 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
597 void __iomem
*mbox
= tp
->regs
+ off
;
599 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
601 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
602 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
606 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
608 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
611 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
613 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
616 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
622 #define tw32(reg, val) tp->write32(tp, reg, val)
623 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg) tp->read32(tp, reg)
627 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
631 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
632 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
635 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
636 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
637 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
638 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
640 /* Always leave this as zero. */
641 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
644 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
646 /* Always leave this as zero. */
647 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
649 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
652 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
656 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
657 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
662 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
663 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
664 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
665 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
667 /* Always leave this as zero. */
668 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
671 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
673 /* Always leave this as zero. */
674 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
676 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
679 static void tg3_ape_lock_init(struct tg3
*tp
)
684 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
685 regbase
= TG3_APE_LOCK_GRANT
;
687 regbase
= TG3_APE_PER_LOCK_GRANT
;
689 /* Make sure the driver hasn't any stale locks. */
690 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
692 case TG3_APE_LOCK_PHY0
:
693 case TG3_APE_LOCK_PHY1
:
694 case TG3_APE_LOCK_PHY2
:
695 case TG3_APE_LOCK_PHY3
:
696 bit
= APE_LOCK_GRANT_DRIVER
;
700 bit
= APE_LOCK_GRANT_DRIVER
;
702 bit
= 1 << tp
->pci_fn
;
704 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
709 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
713 u32 status
, req
, gnt
, bit
;
715 if (!tg3_flag(tp
, ENABLE_APE
))
719 case TG3_APE_LOCK_GPIO
:
720 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
722 case TG3_APE_LOCK_GRC
:
723 case TG3_APE_LOCK_MEM
:
725 bit
= APE_LOCK_REQ_DRIVER
;
727 bit
= 1 << tp
->pci_fn
;
729 case TG3_APE_LOCK_PHY0
:
730 case TG3_APE_LOCK_PHY1
:
731 case TG3_APE_LOCK_PHY2
:
732 case TG3_APE_LOCK_PHY3
:
733 bit
= APE_LOCK_REQ_DRIVER
;
739 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
740 req
= TG3_APE_LOCK_REQ
;
741 gnt
= TG3_APE_LOCK_GRANT
;
743 req
= TG3_APE_PER_LOCK_REQ
;
744 gnt
= TG3_APE_PER_LOCK_GRANT
;
749 tg3_ape_write32(tp
, req
+ off
, bit
);
751 /* Wait for up to 1 millisecond to acquire lock. */
752 for (i
= 0; i
< 100; i
++) {
753 status
= tg3_ape_read32(tp
, gnt
+ off
);
756 if (pci_channel_offline(tp
->pdev
))
763 /* Revoke the lock request. */
764 tg3_ape_write32(tp
, gnt
+ off
, bit
);
771 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
775 if (!tg3_flag(tp
, ENABLE_APE
))
779 case TG3_APE_LOCK_GPIO
:
780 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
782 case TG3_APE_LOCK_GRC
:
783 case TG3_APE_LOCK_MEM
:
785 bit
= APE_LOCK_GRANT_DRIVER
;
787 bit
= 1 << tp
->pci_fn
;
789 case TG3_APE_LOCK_PHY0
:
790 case TG3_APE_LOCK_PHY1
:
791 case TG3_APE_LOCK_PHY2
:
792 case TG3_APE_LOCK_PHY3
:
793 bit
= APE_LOCK_GRANT_DRIVER
;
799 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
800 gnt
= TG3_APE_LOCK_GRANT
;
802 gnt
= TG3_APE_PER_LOCK_GRANT
;
804 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
807 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
812 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
815 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
816 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
819 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
822 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
825 return timeout_us
? 0 : -EBUSY
;
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
833 for (i
= 0; i
< timeout_us
/ 10; i
++) {
834 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
836 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
842 return i
== timeout_us
/ 10;
845 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
849 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
851 if (!tg3_flag(tp
, APE_HAS_NCSI
))
854 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
855 if (apedata
!= APE_SEG_SIG_MAGIC
)
858 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
859 if (!(apedata
& APE_FW_STATUS_READY
))
862 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
864 msgoff
= bufoff
+ 2 * sizeof(u32
);
865 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
870 /* Cap xfer sizes to scratchpad limits. */
871 length
= (len
> maxlen
) ? maxlen
: len
;
874 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
875 if (!(apedata
& APE_FW_STATUS_READY
))
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err
= tg3_ape_event_lock(tp
, 1000);
883 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
884 APE_EVENT_STATUS_SCRTCHPD_READ
|
885 APE_EVENT_STATUS_EVENT_PENDING
;
886 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
888 tg3_ape_write32(tp
, bufoff
, base_off
);
889 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
891 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
892 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
896 if (tg3_ape_wait_for_event(tp
, 30000))
899 for (i
= 0; length
; i
+= 4, length
-= 4) {
900 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
901 memcpy(data
, &val
, sizeof(u32
));
910 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
915 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
916 if (apedata
!= APE_SEG_SIG_MAGIC
)
919 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
920 if (!(apedata
& APE_FW_STATUS_READY
))
923 /* Wait for up to 1 millisecond for APE to service previous event. */
924 err
= tg3_ape_event_lock(tp
, 1000);
928 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
929 event
| APE_EVENT_STATUS_EVENT_PENDING
);
931 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
932 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
937 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
942 if (!tg3_flag(tp
, ENABLE_APE
))
946 case RESET_KIND_INIT
:
947 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
948 APE_HOST_SEG_SIG_MAGIC
);
949 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
950 APE_HOST_SEG_LEN_MAGIC
);
951 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
952 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
953 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
954 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
955 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
956 APE_HOST_BEHAV_NO_PHYLOCK
);
957 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
958 TG3_APE_HOST_DRVR_STATE_START
);
960 event
= APE_EVENT_STATUS_STATE_START
;
962 case RESET_KIND_SHUTDOWN
:
963 /* With the interface we are currently using,
964 * APE does not track driver state. Wiping
965 * out the HOST SEGMENT SIGNATURE forces
966 * the APE to assume OS absent status.
968 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
970 if (device_may_wakeup(&tp
->pdev
->dev
) &&
971 tg3_flag(tp
, WOL_ENABLE
)) {
972 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
973 TG3_APE_HOST_WOL_SPEED_AUTO
);
974 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
976 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
978 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
980 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
986 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
988 tg3_ape_send_event(tp
, event
);
991 static void tg3_disable_ints(struct tg3
*tp
)
995 tw32(TG3PCI_MISC_HOST_CTRL
,
996 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
997 for (i
= 0; i
< tp
->irq_max
; i
++)
998 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
1001 static void tg3_enable_ints(struct tg3
*tp
)
1008 tw32(TG3PCI_MISC_HOST_CTRL
,
1009 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1011 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1012 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1013 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1015 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1016 if (tg3_flag(tp
, 1SHOT_MSI
))
1017 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1019 tp
->coal_now
|= tnapi
->coal_now
;
1022 /* Force an initial interrupt */
1023 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1024 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1025 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1027 tw32(HOSTCC_MODE
, tp
->coal_now
);
1029 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1032 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1034 struct tg3
*tp
= tnapi
->tp
;
1035 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1036 unsigned int work_exists
= 0;
1038 /* check for phy events */
1039 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1040 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1044 /* check for TX work to do */
1045 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1048 /* check for RX work to do */
1049 if (tnapi
->rx_rcb_prod_idx
&&
1050 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1057 * similar to tg3_enable_ints, but it accurately determines whether there
1058 * is new work pending and can return without flushing the PIO write
1059 * which reenables interrupts
1061 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1063 struct tg3
*tp
= tnapi
->tp
;
1065 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1068 /* When doing tagged status, this work check is unnecessary.
1069 * The last_tag we write above tells the chip which piece of
1070 * work we've completed.
1072 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1073 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1074 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1077 static void tg3_switch_clocks(struct tg3
*tp
)
1080 u32 orig_clock_ctrl
;
1082 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1085 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1087 orig_clock_ctrl
= clock_ctrl
;
1088 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1089 CLOCK_CTRL_CLKRUN_OENABLE
|
1091 tp
->pci_clock_ctrl
= clock_ctrl
;
1093 if (tg3_flag(tp
, 5705_PLUS
)) {
1094 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1096 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1098 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1101 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1103 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1104 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1107 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1110 #define PHY_BUSY_LOOPS 5000
1112 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1119 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1121 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1125 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1129 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1130 MI_COM_PHY_ADDR_MASK
);
1131 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1132 MI_COM_REG_ADDR_MASK
);
1133 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1135 tw32_f(MAC_MI_COM
, frame_val
);
1137 loops
= PHY_BUSY_LOOPS
;
1138 while (loops
!= 0) {
1140 frame_val
= tr32(MAC_MI_COM
);
1142 if ((frame_val
& MI_COM_BUSY
) == 0) {
1144 frame_val
= tr32(MAC_MI_COM
);
1152 *val
= frame_val
& MI_COM_DATA_MASK
;
1156 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1157 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1161 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1166 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1168 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1171 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1178 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1179 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1182 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1184 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1188 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1190 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1191 MI_COM_PHY_ADDR_MASK
);
1192 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1193 MI_COM_REG_ADDR_MASK
);
1194 frame_val
|= (val
& MI_COM_DATA_MASK
);
1195 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1197 tw32_f(MAC_MI_COM
, frame_val
);
1199 loops
= PHY_BUSY_LOOPS
;
1200 while (loops
!= 0) {
1202 frame_val
= tr32(MAC_MI_COM
);
1203 if ((frame_val
& MI_COM_BUSY
) == 0) {
1205 frame_val
= tr32(MAC_MI_COM
);
1215 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1216 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1220 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1225 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1227 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1230 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1234 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1238 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1242 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1243 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1247 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1253 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1257 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1261 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1265 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1266 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1270 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1276 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1280 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1282 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1287 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1291 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1293 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1298 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1302 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1303 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1304 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1306 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1311 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1313 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1314 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1316 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1319 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1324 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1330 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1332 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1334 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1335 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1340 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1342 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1343 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1346 static int tg3_bmcr_reset(struct tg3
*tp
)
1351 /* OK, reset it, and poll the BMCR_RESET bit until it
1352 * clears or we time out.
1354 phy_control
= BMCR_RESET
;
1355 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1361 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1365 if ((phy_control
& BMCR_RESET
) == 0) {
1377 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1379 struct tg3
*tp
= bp
->priv
;
1382 spin_lock_bh(&tp
->lock
);
1384 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1387 spin_unlock_bh(&tp
->lock
);
1392 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1394 struct tg3
*tp
= bp
->priv
;
1397 spin_lock_bh(&tp
->lock
);
1399 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1402 spin_unlock_bh(&tp
->lock
);
1407 static void tg3_mdio_config_5785(struct tg3
*tp
)
1410 struct phy_device
*phydev
;
1412 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1413 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1414 case PHY_ID_BCM50610
:
1415 case PHY_ID_BCM50610M
:
1416 val
= MAC_PHYCFG2_50610_LED_MODES
;
1418 case PHY_ID_BCMAC131
:
1419 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1421 case PHY_ID_RTL8211C
:
1422 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1424 case PHY_ID_RTL8201E
:
1425 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1431 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1432 tw32(MAC_PHYCFG2
, val
);
1434 val
= tr32(MAC_PHYCFG1
);
1435 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1436 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1437 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1438 tw32(MAC_PHYCFG1
, val
);
1443 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1444 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1445 MAC_PHYCFG2_FMODE_MASK_MASK
|
1446 MAC_PHYCFG2_GMODE_MASK_MASK
|
1447 MAC_PHYCFG2_ACT_MASK_MASK
|
1448 MAC_PHYCFG2_QUAL_MASK_MASK
|
1449 MAC_PHYCFG2_INBAND_ENABLE
;
1451 tw32(MAC_PHYCFG2
, val
);
1453 val
= tr32(MAC_PHYCFG1
);
1454 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1455 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1456 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1457 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1458 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1459 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1460 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1462 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1463 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1464 tw32(MAC_PHYCFG1
, val
);
1466 val
= tr32(MAC_EXT_RGMII_MODE
);
1467 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1468 MAC_RGMII_MODE_RX_QUALITY
|
1469 MAC_RGMII_MODE_RX_ACTIVITY
|
1470 MAC_RGMII_MODE_RX_ENG_DET
|
1471 MAC_RGMII_MODE_TX_ENABLE
|
1472 MAC_RGMII_MODE_TX_LOWPWR
|
1473 MAC_RGMII_MODE_TX_RESET
);
1474 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1475 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1476 val
|= MAC_RGMII_MODE_RX_INT_B
|
1477 MAC_RGMII_MODE_RX_QUALITY
|
1478 MAC_RGMII_MODE_RX_ACTIVITY
|
1479 MAC_RGMII_MODE_RX_ENG_DET
;
1480 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1481 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1482 MAC_RGMII_MODE_TX_LOWPWR
|
1483 MAC_RGMII_MODE_TX_RESET
;
1485 tw32(MAC_EXT_RGMII_MODE
, val
);
1488 static void tg3_mdio_start(struct tg3
*tp
)
1490 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1491 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1494 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1495 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1496 tg3_mdio_config_5785(tp
);
1499 static int tg3_mdio_init(struct tg3
*tp
)
1503 struct phy_device
*phydev
;
1505 if (tg3_flag(tp
, 5717_PLUS
)) {
1508 tp
->phy_addr
= tp
->pci_fn
+ 1;
1510 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1511 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1513 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1514 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1517 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1520 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1523 tp
->phy_addr
= addr
;
1525 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1529 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1532 tp
->mdio_bus
= mdiobus_alloc();
1533 if (tp
->mdio_bus
== NULL
)
1536 tp
->mdio_bus
->name
= "tg3 mdio bus";
1537 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1538 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1539 tp
->mdio_bus
->priv
= tp
;
1540 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1541 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1542 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1543 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1545 /* The bus registration will look for all the PHYs on the mdio bus.
1546 * Unfortunately, it does not ensure the PHY is powered up before
1547 * accessing the PHY ID registers. A chip reset is the
1548 * quickest way to bring the device back to an operational state..
1550 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1553 i
= mdiobus_register(tp
->mdio_bus
);
1555 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1556 mdiobus_free(tp
->mdio_bus
);
1560 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
1562 if (!phydev
|| !phydev
->drv
) {
1563 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1564 mdiobus_unregister(tp
->mdio_bus
);
1565 mdiobus_free(tp
->mdio_bus
);
1569 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1570 case PHY_ID_BCM57780
:
1571 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1572 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1574 case PHY_ID_BCM50610
:
1575 case PHY_ID_BCM50610M
:
1576 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1577 PHY_BRCM_RX_REFCLK_UNUSED
|
1578 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1579 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1580 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1581 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1582 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1583 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1584 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1585 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1587 case PHY_ID_RTL8211C
:
1588 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1590 case PHY_ID_RTL8201E
:
1591 case PHY_ID_BCMAC131
:
1592 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1593 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1594 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1598 tg3_flag_set(tp
, MDIOBUS_INITED
);
1600 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1601 tg3_mdio_config_5785(tp
);
1606 static void tg3_mdio_fini(struct tg3
*tp
)
1608 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1609 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1610 mdiobus_unregister(tp
->mdio_bus
);
1611 mdiobus_free(tp
->mdio_bus
);
1615 /* tp->lock is held. */
1616 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1620 val
= tr32(GRC_RX_CPU_EVENT
);
1621 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1622 tw32_f(GRC_RX_CPU_EVENT
, val
);
1624 tp
->last_event_jiffies
= jiffies
;
1627 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1629 /* tp->lock is held. */
1630 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1633 unsigned int delay_cnt
;
1636 /* If enough time has passed, no wait is necessary. */
1637 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1638 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1640 if (time_remain
< 0)
1643 /* Check if we can shorten the wait time. */
1644 delay_cnt
= jiffies_to_usecs(time_remain
);
1645 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1646 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1647 delay_cnt
= (delay_cnt
>> 3) + 1;
1649 for (i
= 0; i
< delay_cnt
; i
++) {
1650 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1652 if (pci_channel_offline(tp
->pdev
))
1659 /* tp->lock is held. */
1660 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1665 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1667 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1668 val
|= (reg
& 0xffff);
1672 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1674 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1675 val
|= (reg
& 0xffff);
1679 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1680 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1682 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1683 val
|= (reg
& 0xffff);
1687 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1694 /* tp->lock is held. */
1695 static void tg3_ump_link_report(struct tg3
*tp
)
1699 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1702 tg3_phy_gather_ump_data(tp
, data
);
1704 tg3_wait_for_event_ack(tp
);
1706 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1707 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1708 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1709 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1710 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1711 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1713 tg3_generate_fw_event(tp
);
1716 /* tp->lock is held. */
1717 static void tg3_stop_fw(struct tg3
*tp
)
1719 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1720 /* Wait for RX cpu to ACK the previous event. */
1721 tg3_wait_for_event_ack(tp
);
1723 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1725 tg3_generate_fw_event(tp
);
1727 /* Wait for RX cpu to ACK this event. */
1728 tg3_wait_for_event_ack(tp
);
1732 /* tp->lock is held. */
1733 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1735 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1736 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1738 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1740 case RESET_KIND_INIT
:
1741 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1745 case RESET_KIND_SHUTDOWN
:
1746 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1750 case RESET_KIND_SUSPEND
:
1751 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1761 /* tp->lock is held. */
1762 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1764 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1766 case RESET_KIND_INIT
:
1767 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1768 DRV_STATE_START_DONE
);
1771 case RESET_KIND_SHUTDOWN
:
1772 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1773 DRV_STATE_UNLOAD_DONE
);
1782 /* tp->lock is held. */
1783 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1785 if (tg3_flag(tp
, ENABLE_ASF
)) {
1787 case RESET_KIND_INIT
:
1788 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1792 case RESET_KIND_SHUTDOWN
:
1793 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1797 case RESET_KIND_SUSPEND
:
1798 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1808 static int tg3_poll_fw(struct tg3
*tp
)
1813 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1816 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1817 /* We don't use firmware. */
1821 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1822 /* Wait up to 20ms for init done. */
1823 for (i
= 0; i
< 200; i
++) {
1824 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1826 if (pci_channel_offline(tp
->pdev
))
1834 /* Wait for firmware initialization to complete. */
1835 for (i
= 0; i
< 100000; i
++) {
1836 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1837 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1839 if (pci_channel_offline(tp
->pdev
)) {
1840 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1841 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1842 netdev_info(tp
->dev
, "No firmware running\n");
1851 /* Chip might not be fitted with firmware. Some Sun onboard
1852 * parts are configured like that. So don't signal the timeout
1853 * of the above loop as an error, but do report the lack of
1854 * running firmware once.
1856 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1857 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1859 netdev_info(tp
->dev
, "No firmware running\n");
1862 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1863 /* The 57765 A0 needs a little more
1864 * time to do some important work.
1872 static void tg3_link_report(struct tg3
*tp
)
1874 if (!netif_carrier_ok(tp
->dev
)) {
1875 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1876 tg3_ump_link_report(tp
);
1877 } else if (netif_msg_link(tp
)) {
1878 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1879 (tp
->link_config
.active_speed
== SPEED_1000
?
1881 (tp
->link_config
.active_speed
== SPEED_100
?
1883 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1886 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1887 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1889 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1892 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1893 netdev_info(tp
->dev
, "EEE is %s\n",
1894 tp
->setlpicnt
? "enabled" : "disabled");
1896 tg3_ump_link_report(tp
);
1899 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1902 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1906 if (adv
& ADVERTISE_PAUSE_CAP
) {
1907 flowctrl
|= FLOW_CTRL_RX
;
1908 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1909 flowctrl
|= FLOW_CTRL_TX
;
1910 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1911 flowctrl
|= FLOW_CTRL_TX
;
1916 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1920 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1921 miireg
= ADVERTISE_1000XPAUSE
;
1922 else if (flow_ctrl
& FLOW_CTRL_TX
)
1923 miireg
= ADVERTISE_1000XPSE_ASYM
;
1924 else if (flow_ctrl
& FLOW_CTRL_RX
)
1925 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1932 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1936 if (adv
& ADVERTISE_1000XPAUSE
) {
1937 flowctrl
|= FLOW_CTRL_RX
;
1938 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1939 flowctrl
|= FLOW_CTRL_TX
;
1940 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1941 flowctrl
|= FLOW_CTRL_TX
;
1946 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1950 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1951 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1952 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1953 if (lcladv
& ADVERTISE_1000XPAUSE
)
1955 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1962 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1966 u32 old_rx_mode
= tp
->rx_mode
;
1967 u32 old_tx_mode
= tp
->tx_mode
;
1969 if (tg3_flag(tp
, USE_PHYLIB
))
1970 autoneg
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
)->autoneg
;
1972 autoneg
= tp
->link_config
.autoneg
;
1974 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1975 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1976 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1978 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1980 flowctrl
= tp
->link_config
.flowctrl
;
1982 tp
->link_config
.active_flowctrl
= flowctrl
;
1984 if (flowctrl
& FLOW_CTRL_RX
)
1985 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1987 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1989 if (old_rx_mode
!= tp
->rx_mode
)
1990 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1992 if (flowctrl
& FLOW_CTRL_TX
)
1993 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1995 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1997 if (old_tx_mode
!= tp
->tx_mode
)
1998 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2001 static void tg3_adjust_link(struct net_device
*dev
)
2003 u8 oldflowctrl
, linkmesg
= 0;
2004 u32 mac_mode
, lcl_adv
, rmt_adv
;
2005 struct tg3
*tp
= netdev_priv(dev
);
2006 struct phy_device
*phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2008 spin_lock_bh(&tp
->lock
);
2010 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2011 MAC_MODE_HALF_DUPLEX
);
2013 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2019 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2020 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2021 else if (phydev
->speed
== SPEED_1000
||
2022 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2023 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2025 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2027 if (phydev
->duplex
== DUPLEX_HALF
)
2028 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2030 lcl_adv
= mii_advertise_flowctrl(
2031 tp
->link_config
.flowctrl
);
2034 rmt_adv
= LPA_PAUSE_CAP
;
2035 if (phydev
->asym_pause
)
2036 rmt_adv
|= LPA_PAUSE_ASYM
;
2039 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2041 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2043 if (mac_mode
!= tp
->mac_mode
) {
2044 tp
->mac_mode
= mac_mode
;
2045 tw32_f(MAC_MODE
, tp
->mac_mode
);
2049 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2050 if (phydev
->speed
== SPEED_10
)
2052 MAC_MI_STAT_10MBPS_MODE
|
2053 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2055 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2058 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2059 tw32(MAC_TX_LENGTHS
,
2060 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2061 (6 << TX_LENGTHS_IPG_SHIFT
) |
2062 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2064 tw32(MAC_TX_LENGTHS
,
2065 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2066 (6 << TX_LENGTHS_IPG_SHIFT
) |
2067 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2069 if (phydev
->link
!= tp
->old_link
||
2070 phydev
->speed
!= tp
->link_config
.active_speed
||
2071 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2072 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2075 tp
->old_link
= phydev
->link
;
2076 tp
->link_config
.active_speed
= phydev
->speed
;
2077 tp
->link_config
.active_duplex
= phydev
->duplex
;
2079 spin_unlock_bh(&tp
->lock
);
2082 tg3_link_report(tp
);
2085 static int tg3_phy_init(struct tg3
*tp
)
2087 struct phy_device
*phydev
;
2089 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2092 /* Bring the PHY back to a known state. */
2095 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2097 /* Attach the MAC to the PHY. */
2098 phydev
= phy_connect(tp
->dev
, phydev_name(phydev
),
2099 tg3_adjust_link
, phydev
->interface
);
2100 if (IS_ERR(phydev
)) {
2101 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2102 return PTR_ERR(phydev
);
2105 /* Mask with MAC supported features. */
2106 switch (phydev
->interface
) {
2107 case PHY_INTERFACE_MODE_GMII
:
2108 case PHY_INTERFACE_MODE_RGMII
:
2109 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2110 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2112 SUPPORTED_Asym_Pause
);
2116 case PHY_INTERFACE_MODE_MII
:
2117 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2119 SUPPORTED_Asym_Pause
);
2122 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2126 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2128 phydev
->advertising
= phydev
->supported
;
2130 phy_attached_info(phydev
);
2135 static void tg3_phy_start(struct tg3
*tp
)
2137 struct phy_device
*phydev
;
2139 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2142 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
2144 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2145 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2146 phydev
->speed
= tp
->link_config
.speed
;
2147 phydev
->duplex
= tp
->link_config
.duplex
;
2148 phydev
->autoneg
= tp
->link_config
.autoneg
;
2149 phydev
->advertising
= tp
->link_config
.advertising
;
2154 phy_start_aneg(phydev
);
2157 static void tg3_phy_stop(struct tg3
*tp
)
2159 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2162 phy_stop(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2165 static void tg3_phy_fini(struct tg3
*tp
)
2167 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2168 phy_disconnect(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
2169 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2173 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2178 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2181 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2182 /* Cannot do read-modify-write on 5401 */
2183 err
= tg3_phy_auxctl_write(tp
,
2184 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2185 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2190 err
= tg3_phy_auxctl_read(tp
,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2195 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2196 err
= tg3_phy_auxctl_write(tp
,
2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2203 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2207 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2210 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2211 phytest
| MII_TG3_FET_SHADOW_EN
);
2212 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2214 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2216 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2217 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2219 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2223 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2227 if (!tg3_flag(tp
, 5705_PLUS
) ||
2228 (tg3_flag(tp
, 5717_PLUS
) &&
2229 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2232 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2233 tg3_phy_fet_toggle_apd(tp
, enable
);
2237 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2238 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2239 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2240 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2241 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2242 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2244 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2247 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2249 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2251 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2254 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2258 if (!tg3_flag(tp
, 5705_PLUS
) ||
2259 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2262 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2265 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2266 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2268 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2269 ephy
| MII_TG3_FET_SHADOW_EN
);
2270 if (!tg3_readphy(tp
, reg
, &phy
)) {
2272 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2274 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2275 tg3_writephy(tp
, reg
, phy
);
2277 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2282 ret
= tg3_phy_auxctl_read(tp
,
2283 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2286 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2288 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2289 tg3_phy_auxctl_write(tp
,
2290 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2295 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2300 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2303 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2305 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2306 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2309 static void tg3_phy_apply_otp(struct tg3
*tp
)
2318 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2321 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2322 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2323 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2325 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2326 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2327 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2329 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2330 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2331 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2333 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2334 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2336 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2337 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2339 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2340 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2341 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2343 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2346 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2349 struct ethtool_eee
*dest
= &tp
->eee
;
2351 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2357 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2360 /* Pull eee_active */
2361 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2362 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2363 dest
->eee_active
= 1;
2365 dest
->eee_active
= 0;
2367 /* Pull lp advertised settings */
2368 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2370 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2372 /* Pull advertised and eee_enabled settings */
2373 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2375 dest
->eee_enabled
= !!val
;
2376 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2378 /* Pull tx_lpi_enabled */
2379 val
= tr32(TG3_CPMU_EEE_MODE
);
2380 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2382 /* Pull lpi timer value */
2383 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2386 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2390 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2395 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2397 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2398 (tp
->link_config
.active_speed
== SPEED_100
||
2399 tp
->link_config
.active_speed
== SPEED_1000
)) {
2402 if (tp
->link_config
.active_speed
== SPEED_1000
)
2403 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2405 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2407 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2409 tg3_eee_pull_config(tp
, NULL
);
2410 if (tp
->eee
.eee_active
)
2414 if (!tp
->setlpicnt
) {
2415 if (current_link_up
&&
2416 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2417 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2418 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2421 val
= tr32(TG3_CPMU_EEE_MODE
);
2422 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2426 static void tg3_phy_eee_enable(struct tg3
*tp
)
2430 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2431 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2432 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2433 tg3_flag(tp
, 57765_CLASS
)) &&
2434 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2435 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2436 MII_TG3_DSP_TAP26_RMRXSTO
;
2437 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2438 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2441 val
= tr32(TG3_CPMU_EEE_MODE
);
2442 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2445 static int tg3_wait_macro_done(struct tg3
*tp
)
2452 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2453 if ((tmp32
& 0x1000) == 0)
2463 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2465 static const u32 test_pat
[4][6] = {
2466 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2473 for (chan
= 0; chan
< 4; chan
++) {
2476 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2477 (chan
* 0x2000) | 0x0200);
2478 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2480 for (i
= 0; i
< 6; i
++)
2481 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2484 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2485 if (tg3_wait_macro_done(tp
)) {
2490 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2491 (chan
* 0x2000) | 0x0200);
2492 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2493 if (tg3_wait_macro_done(tp
)) {
2498 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2499 if (tg3_wait_macro_done(tp
)) {
2504 for (i
= 0; i
< 6; i
+= 2) {
2507 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2508 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2509 tg3_wait_macro_done(tp
)) {
2515 if (low
!= test_pat
[chan
][i
] ||
2516 high
!= test_pat
[chan
][i
+1]) {
2517 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2518 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2519 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2529 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2533 for (chan
= 0; chan
< 4; chan
++) {
2536 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2537 (chan
* 0x2000) | 0x0200);
2538 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2539 for (i
= 0; i
< 6; i
++)
2540 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2541 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2542 if (tg3_wait_macro_done(tp
))
2549 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2551 u32 reg32
, phy9_orig
;
2552 int retries
, do_phy_reset
, err
;
2558 err
= tg3_bmcr_reset(tp
);
2564 /* Disable transmitter and interrupt. */
2565 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2569 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2571 /* Set full-duplex, 1000 mbps. */
2572 tg3_writephy(tp
, MII_BMCR
,
2573 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2575 /* Set to master mode. */
2576 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2579 tg3_writephy(tp
, MII_CTRL1000
,
2580 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2582 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2586 /* Block the PHY control access. */
2587 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2589 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2592 } while (--retries
);
2594 err
= tg3_phy_reset_chanpat(tp
);
2598 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2600 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2601 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2603 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2605 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2607 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2612 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2617 static void tg3_carrier_off(struct tg3
*tp
)
2619 netif_carrier_off(tp
->dev
);
2620 tp
->link_up
= false;
2623 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2625 if (tg3_flag(tp
, ENABLE_ASF
))
2626 netdev_warn(tp
->dev
,
2627 "Management side-band traffic will be interrupted during phy settings change\n");
2630 /* This will reset the tigon3 PHY if there is no valid
2631 * link unless the FORCE argument is non-zero.
2633 static int tg3_phy_reset(struct tg3
*tp
)
2638 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2639 val
= tr32(GRC_MISC_CFG
);
2640 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2643 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2644 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2648 if (netif_running(tp
->dev
) && tp
->link_up
) {
2649 netif_carrier_off(tp
->dev
);
2650 tg3_link_report(tp
);
2653 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2654 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2655 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2656 err
= tg3_phy_reset_5703_4_5(tp
);
2663 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2664 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2665 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2666 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2668 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2671 err
= tg3_bmcr_reset(tp
);
2675 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2676 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2677 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2679 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2682 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2683 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2684 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2685 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2686 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2687 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2689 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2693 if (tg3_flag(tp
, 5717_PLUS
) &&
2694 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2697 tg3_phy_apply_otp(tp
);
2699 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2700 tg3_phy_toggle_apd(tp
, true);
2702 tg3_phy_toggle_apd(tp
, false);
2705 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2706 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2707 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2708 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2709 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2712 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2713 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2714 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2717 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2718 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2719 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2720 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2721 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2722 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2724 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2725 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2726 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2727 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2728 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2729 tg3_writephy(tp
, MII_TG3_TEST1
,
2730 MII_TG3_TEST1_TRIM_EN
| 0x4);
2732 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2734 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2738 /* Set Extended packet length bit (bit 14) on all chips that */
2739 /* support jumbo frames */
2740 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2741 /* Cannot do read-modify-write on 5401 */
2742 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2743 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2744 /* Set bit 14 with read-modify-write to preserve other bits */
2745 err
= tg3_phy_auxctl_read(tp
,
2746 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2748 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2749 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2752 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753 * jumbo frames transmission.
2755 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2756 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2757 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2758 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2761 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2762 /* adjust output voltage */
2763 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2766 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2767 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2769 tg3_phy_toggle_automdix(tp
, true);
2770 tg3_phy_set_wirespeed(tp
);
2774 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2776 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2777 TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2794 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2795 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2796 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2798 status
= tr32(TG3_CPMU_DRV_STATUS
);
2800 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2801 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2802 status
|= (newstat
<< shift
);
2804 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2805 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2806 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2808 tw32(TG3_CPMU_DRV_STATUS
, status
);
2810 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2815 if (!tg3_flag(tp
, IS_NIC
))
2818 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2819 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2820 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2821 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2824 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2826 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2829 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2831 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2842 if (!tg3_flag(tp
, IS_NIC
) ||
2843 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2844 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2847 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2849 tw32_wait_f(GRC_LOCAL_CTRL
,
2850 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2853 tw32_wait_f(GRC_LOCAL_CTRL
,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2857 tw32_wait_f(GRC_LOCAL_CTRL
,
2858 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2864 if (!tg3_flag(tp
, IS_NIC
))
2867 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2868 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2869 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2870 (GRC_LCLCTRL_GPIO_OE0
|
2871 GRC_LCLCTRL_GPIO_OE1
|
2872 GRC_LCLCTRL_GPIO_OE2
|
2873 GRC_LCLCTRL_GPIO_OUTPUT0
|
2874 GRC_LCLCTRL_GPIO_OUTPUT1
),
2875 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2876 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2877 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2878 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2880 GRC_LCLCTRL_GPIO_OE1
|
2881 GRC_LCLCTRL_GPIO_OE2
|
2882 GRC_LCLCTRL_GPIO_OUTPUT0
|
2883 GRC_LCLCTRL_GPIO_OUTPUT1
|
2885 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2888 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2889 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2892 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2893 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2897 u32 grc_local_ctrl
= 0;
2899 /* Workaround to prevent overdrawing Amps. */
2900 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2901 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2902 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2904 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2907 /* On 5753 and variants, GPIO2 cannot be used. */
2908 no_gpio2
= tp
->nic_sram_data_cfg
&
2909 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2911 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2912 GRC_LCLCTRL_GPIO_OE1
|
2913 GRC_LCLCTRL_GPIO_OE2
|
2914 GRC_LCLCTRL_GPIO_OUTPUT1
|
2915 GRC_LCLCTRL_GPIO_OUTPUT2
;
2917 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2918 GRC_LCLCTRL_GPIO_OUTPUT2
);
2920 tw32_wait_f(GRC_LOCAL_CTRL
,
2921 tp
->grc_local_ctrl
| grc_local_ctrl
,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2924 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2926 tw32_wait_f(GRC_LOCAL_CTRL
,
2927 tp
->grc_local_ctrl
| grc_local_ctrl
,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2931 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2932 tw32_wait_f(GRC_LOCAL_CTRL
,
2933 tp
->grc_local_ctrl
| grc_local_ctrl
,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2939 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2943 /* Serialize power state transitions */
2944 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2947 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2948 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2950 msg
= tg3_set_function_status(tp
, msg
);
2952 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2955 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2956 tg3_pwrsrc_switch_to_vaux(tp
);
2958 tg3_pwrsrc_die_with_vmain(tp
);
2961 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2964 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2966 bool need_vaux
= false;
2968 /* The GPIOs do something completely different on 57765. */
2969 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2972 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2973 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2974 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2975 tg3_frob_aux_power_5717(tp
, include_wol
?
2976 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2980 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2981 struct net_device
*dev_peer
;
2983 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2985 /* remove_one() may have been run on the peer. */
2987 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2989 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2992 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2993 tg3_flag(tp_peer
, ENABLE_ASF
))
2998 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2999 tg3_flag(tp
, ENABLE_ASF
))
3003 tg3_pwrsrc_switch_to_vaux(tp
);
3005 tg3_pwrsrc_die_with_vmain(tp
);
3008 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3010 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3012 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3013 if (speed
!= SPEED_10
)
3015 } else if (speed
== SPEED_10
)
3021 static bool tg3_phy_power_bug(struct tg3
*tp
)
3023 switch (tg3_asic_rev(tp
)) {
3028 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3037 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3046 static bool tg3_phy_led_bug(struct tg3
*tp
)
3048 switch (tg3_asic_rev(tp
)) {
3051 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3060 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3064 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3067 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3068 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3069 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3070 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3073 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3074 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3075 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3080 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3082 val
= tr32(GRC_MISC_CFG
);
3083 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3086 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3088 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3091 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3092 tg3_writephy(tp
, MII_BMCR
,
3093 BMCR_ANENABLE
| BMCR_ANRESTART
);
3095 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3096 phytest
| MII_TG3_FET_SHADOW_EN
);
3097 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3098 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3100 MII_TG3_FET_SHDW_AUXMODE4
,
3103 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3106 } else if (do_low_power
) {
3107 if (!tg3_phy_led_bug(tp
))
3108 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3109 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3111 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3112 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3113 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3114 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3117 /* The PHY should not be powered down on some chips because
3120 if (tg3_phy_power_bug(tp
))
3123 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3124 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3125 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3126 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3127 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3128 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3131 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3
*tp
)
3137 if (tg3_flag(tp
, NVRAM
)) {
3140 if (tp
->nvram_lock_cnt
== 0) {
3141 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3142 for (i
= 0; i
< 8000; i
++) {
3143 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3148 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3152 tp
->nvram_lock_cnt
++;
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3
*tp
)
3160 if (tg3_flag(tp
, NVRAM
)) {
3161 if (tp
->nvram_lock_cnt
> 0)
3162 tp
->nvram_lock_cnt
--;
3163 if (tp
->nvram_lock_cnt
== 0)
3164 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3
*tp
)
3171 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3172 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3174 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3
*tp
)
3181 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3182 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3184 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3188 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3189 u32 offset
, u32
*val
)
3194 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3197 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3198 EEPROM_ADDR_DEVID_MASK
|
3200 tw32(GRC_EEPROM_ADDR
,
3202 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3203 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3204 EEPROM_ADDR_ADDR_MASK
) |
3205 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3207 for (i
= 0; i
< 1000; i
++) {
3208 tmp
= tr32(GRC_EEPROM_ADDR
);
3210 if (tmp
& EEPROM_ADDR_COMPLETE
)
3214 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3217 tmp
= tr32(GRC_EEPROM_DATA
);
3220 * The data will always be opposite the native endian
3221 * format. Perform a blind byteswap to compensate.
3228 #define NVRAM_CMD_TIMEOUT 5000
3230 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3234 tw32(NVRAM_CMD
, nvram_cmd
);
3235 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3236 usleep_range(10, 40);
3237 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3243 if (i
== NVRAM_CMD_TIMEOUT
)
3249 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3251 if (tg3_flag(tp
, NVRAM
) &&
3252 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3253 tg3_flag(tp
, FLASH
) &&
3254 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3255 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3257 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3258 ATMEL_AT45DB0X1B_PAGE_POS
) +
3259 (addr
% tp
->nvram_pagesize
);
3264 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3266 if (tg3_flag(tp
, NVRAM
) &&
3267 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3268 tg3_flag(tp
, FLASH
) &&
3269 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3270 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3272 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3273 tp
->nvram_pagesize
) +
3274 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280 * the byteswapping settings for all other register accesses.
3281 * tg3 devices are BE devices, so on a BE machine, the data
3282 * returned will be exactly as it is seen in NVRAM. On a LE
3283 * machine, the 32-bit value will be byteswapped.
3285 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3289 if (!tg3_flag(tp
, NVRAM
))
3290 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3292 offset
= tg3_nvram_phys_addr(tp
, offset
);
3294 if (offset
> NVRAM_ADDR_MSK
)
3297 ret
= tg3_nvram_lock(tp
);
3301 tg3_enable_nvram_access(tp
);
3303 tw32(NVRAM_ADDR
, offset
);
3304 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3305 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3308 *val
= tr32(NVRAM_RDDATA
);
3310 tg3_disable_nvram_access(tp
);
3312 tg3_nvram_unlock(tp
);
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3321 int res
= tg3_nvram_read(tp
, offset
, &v
);
3323 *val
= cpu_to_be32(v
);
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3328 u32 offset
, u32 len
, u8
*buf
)
3333 for (i
= 0; i
< len
; i
+= 4) {
3339 memcpy(&data
, buf
+ i
, 4);
3342 * The SEEPROM interface expects the data to always be opposite
3343 * the native endian format. We accomplish this by reversing
3344 * all the operations that would have been performed on the
3345 * data from a call to tg3_nvram_read_be32().
3347 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3349 val
= tr32(GRC_EEPROM_ADDR
);
3350 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3352 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3354 tw32(GRC_EEPROM_ADDR
, val
|
3355 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3356 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3360 for (j
= 0; j
< 1000; j
++) {
3361 val
= tr32(GRC_EEPROM_ADDR
);
3363 if (val
& EEPROM_ADDR_COMPLETE
)
3367 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3381 u32 pagesize
= tp
->nvram_pagesize
;
3382 u32 pagemask
= pagesize
- 1;
3386 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3392 u32 phy_addr
, page_off
, size
;
3394 phy_addr
= offset
& ~pagemask
;
3396 for (j
= 0; j
< pagesize
; j
+= 4) {
3397 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3398 (__be32
*) (tmp
+ j
));
3405 page_off
= offset
& pagemask
;
3412 memcpy(tmp
+ page_off
, buf
, size
);
3414 offset
= offset
+ (pagesize
- page_off
);
3416 tg3_enable_nvram_access(tp
);
3419 * Before we can erase the flash page, we need
3420 * to issue a special "write enable" command.
3422 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3424 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3427 /* Erase the target page */
3428 tw32(NVRAM_ADDR
, phy_addr
);
3430 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3431 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3433 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3436 /* Issue another write enable to start the write. */
3437 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3439 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3442 for (j
= 0; j
< pagesize
; j
+= 4) {
3445 data
= *((__be32
*) (tmp
+ j
));
3447 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3449 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3451 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3455 nvram_cmd
|= NVRAM_CMD_FIRST
;
3456 else if (j
== (pagesize
- 4))
3457 nvram_cmd
|= NVRAM_CMD_LAST
;
3459 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3467 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3468 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3481 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3482 u32 page_off
, phy_addr
, nvram_cmd
;
3485 memcpy(&data
, buf
+ i
, 4);
3486 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3488 page_off
= offset
% tp
->nvram_pagesize
;
3490 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3492 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3494 if (page_off
== 0 || i
== 0)
3495 nvram_cmd
|= NVRAM_CMD_FIRST
;
3496 if (page_off
== (tp
->nvram_pagesize
- 4))
3497 nvram_cmd
|= NVRAM_CMD_LAST
;
3500 nvram_cmd
|= NVRAM_CMD_LAST
;
3502 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3503 !tg3_flag(tp
, FLASH
) ||
3504 !tg3_flag(tp
, 57765_PLUS
))
3505 tw32(NVRAM_ADDR
, phy_addr
);
3507 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3508 !tg3_flag(tp
, 5755_PLUS
) &&
3509 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3510 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3513 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3514 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3518 if (!tg3_flag(tp
, FLASH
)) {
3519 /* We always do complete word writes to eeprom. */
3520 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3523 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3535 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3536 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3537 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3541 if (!tg3_flag(tp
, NVRAM
)) {
3542 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3546 ret
= tg3_nvram_lock(tp
);
3550 tg3_enable_nvram_access(tp
);
3551 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3552 tw32(NVRAM_WRITE1
, 0x406);
3554 grc_mode
= tr32(GRC_MODE
);
3555 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3557 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3558 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3561 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3565 grc_mode
= tr32(GRC_MODE
);
3566 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3568 tg3_disable_nvram_access(tp
);
3569 tg3_nvram_unlock(tp
);
3572 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3573 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3580 #define RX_CPU_SCRATCH_BASE 0x30000
3581 #define RX_CPU_SCRATCH_SIZE 0x04000
3582 #define TX_CPU_SCRATCH_BASE 0x34000
3583 #define TX_CPU_SCRATCH_SIZE 0x04000
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3589 const int iters
= 10000;
3591 for (i
= 0; i
< iters
; i
++) {
3592 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3593 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3594 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3596 if (pci_channel_offline(tp
->pdev
))
3600 return (i
== iters
) ? -EBUSY
: 0;
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3
*tp
)
3606 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3608 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3609 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3
*tp
)
3618 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3624 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3625 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3
*tp
)
3631 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3639 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3641 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3642 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3644 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3647 if (cpu_base
== RX_CPU_BASE
) {
3648 rc
= tg3_rxcpu_pause(tp
);
3651 * There is only an Rx CPU for the 5750 derivative in the
3654 if (tg3_flag(tp
, IS_SSB_CORE
))
3657 rc
= tg3_txcpu_pause(tp
);
3661 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3662 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3666 /* Clear firmware's nvram arbitration. */
3667 if (tg3_flag(tp
, NVRAM
))
3668 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3672 static int tg3_fw_data_len(struct tg3
*tp
,
3673 const struct tg3_firmware_hdr
*fw_hdr
)
3677 /* Non fragmented firmware have one firmware header followed by a
3678 * contiguous chunk of data to be written. The length field in that
3679 * header is not the length of data to be written but the complete
3680 * length of the bss. The data length is determined based on
3681 * tp->fw->size minus headers.
3683 * Fragmented firmware have a main header followed by multiple
3684 * fragments. Each fragment is identical to non fragmented firmware
3685 * with a firmware header followed by a contiguous chunk of data. In
3686 * the main header, the length field is unused and set to 0xffffffff.
3687 * In each fragment header the length is the entire size of that
3688 * fragment i.e. fragment data + header length. Data length is
3689 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 if (tp
->fw_len
== 0xffffffff)
3692 fw_len
= be32_to_cpu(fw_hdr
->len
);
3694 fw_len
= tp
->fw
->size
;
3696 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3701 u32 cpu_scratch_base
, int cpu_scratch_size
,
3702 const struct tg3_firmware_hdr
*fw_hdr
)
3705 void (*write_op
)(struct tg3
*, u32
, u32
);
3706 int total_len
= tp
->fw
->size
;
3708 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3710 "%s: Trying to load TX cpu firmware which is 5705\n",
3715 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3716 write_op
= tg3_write_mem
;
3718 write_op
= tg3_write_indirect_reg32
;
3720 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3721 /* It is possible that bootcode is still loading at this point.
3722 * Get the nvram lock first before halting the cpu.
3724 int lock_err
= tg3_nvram_lock(tp
);
3725 err
= tg3_halt_cpu(tp
, cpu_base
);
3727 tg3_nvram_unlock(tp
);
3731 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3732 write_op(tp
, cpu_scratch_base
+ i
, 0);
3733 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3734 tw32(cpu_base
+ CPU_MODE
,
3735 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3737 /* Subtract additional main header for fragmented firmware and
3738 * advance to the first fragment
3740 total_len
-= TG3_FW_HDR_LEN
;
3745 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3746 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3747 write_op(tp
, cpu_scratch_base
+
3748 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3750 be32_to_cpu(fw_data
[i
]));
3752 total_len
-= be32_to_cpu(fw_hdr
->len
);
3754 /* Advance to next fragment */
3755 fw_hdr
= (struct tg3_firmware_hdr
*)
3756 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3757 } while (total_len
> 0);
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3769 const int iters
= 5;
3771 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3772 tw32_f(cpu_base
+ CPU_PC
, pc
);
3774 for (i
= 0; i
< iters
; i
++) {
3775 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3777 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3778 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3779 tw32_f(cpu_base
+ CPU_PC
, pc
);
3783 return (i
== iters
) ? -EBUSY
: 0;
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3789 const struct tg3_firmware_hdr
*fw_hdr
;
3792 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3794 /* Firmware blob starts with version numbers, followed by
3795 start address and length. We are setting complete length.
3796 length = end_address_of_bss - start_address_of_text.
3797 Remainder is the blob to be loaded contiguously
3798 from start address. */
3800 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3801 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3806 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3807 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3812 /* Now startup only the RX cpu. */
3813 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3814 be32_to_cpu(fw_hdr
->base_addr
));
3816 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3817 "should be %08x\n", __func__
,
3818 tr32(RX_CPU_BASE
+ CPU_PC
),
3819 be32_to_cpu(fw_hdr
->base_addr
));
3823 tg3_rxcpu_resume(tp
);
3828 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3830 const int iters
= 1000;
3834 /* Wait for boot code to complete initialization and enter service
3835 * loop. It is then safe to download service patches
3837 for (i
= 0; i
< iters
; i
++) {
3838 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3845 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3849 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3851 netdev_warn(tp
->dev
,
3852 "Other patches exist. Not downloading EEE patch\n");
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3
*tp
)
3862 struct tg3_firmware_hdr
*fw_hdr
;
3864 if (!tg3_flag(tp
, NO_NVRAM
))
3867 if (tg3_validate_rxcpu_state(tp
))
3873 /* This firmware blob has a different format than older firmware
3874 * releases as given below. The main difference is we have fragmented
3875 * data to be written to non-contiguous locations.
3877 * In the beginning we have a firmware header identical to other
3878 * firmware which consists of version, base addr and length. The length
3879 * here is unused and set to 0xffffffff.
3881 * This is followed by a series of firmware fragments which are
3882 * individually identical to previous firmware. i.e. they have the
3883 * firmware header and followed by data for that fragment. The version
3884 * field of the individual fragment header is unused.
3887 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3888 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3891 if (tg3_rxcpu_pause(tp
))
3894 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3897 tg3_rxcpu_resume(tp
);
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3
*tp
)
3903 const struct tg3_firmware_hdr
*fw_hdr
;
3904 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3907 if (!tg3_flag(tp
, FW_TSO
))
3910 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3912 /* Firmware blob starts with version numbers, followed by
3913 start address and length. We are setting complete length.
3914 length = end_address_of_bss - start_address_of_text.
3915 Remainder is the blob to be loaded contiguously
3916 from start address. */
3918 cpu_scratch_size
= tp
->fw_len
;
3920 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3921 cpu_base
= RX_CPU_BASE
;
3922 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3924 cpu_base
= TX_CPU_BASE
;
3925 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3926 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3929 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3930 cpu_scratch_base
, cpu_scratch_size
,
3935 /* Now startup the cpu. */
3936 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3937 be32_to_cpu(fw_hdr
->base_addr
));
3940 "%s fails to set CPU PC, is %08x should be %08x\n",
3941 __func__
, tr32(cpu_base
+ CPU_PC
),
3942 be32_to_cpu(fw_hdr
->base_addr
));
3946 tg3_resume_cpu(tp
, cpu_base
);
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3953 u32 addr_high
, addr_low
;
3955 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3956 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3957 (mac_addr
[4] << 8) | mac_addr
[5]);
3960 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3961 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3964 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3965 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3975 for (i
= 0; i
< 4; i
++) {
3976 if (i
== 1 && skip_mac_1
)
3978 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3981 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3982 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3983 for (i
= 4; i
< 16; i
++)
3984 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3987 addr_high
= (tp
->dev
->dev_addr
[0] +
3988 tp
->dev
->dev_addr
[1] +
3989 tp
->dev
->dev_addr
[2] +
3990 tp
->dev
->dev_addr
[3] +
3991 tp
->dev
->dev_addr
[4] +
3992 tp
->dev
->dev_addr
[5]) &
3993 TX_BACKOFF_SEED_MASK
;
3994 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3997 static void tg3_enable_register_access(struct tg3
*tp
)
4000 * Make sure register accesses (indirect or otherwise) will function
4003 pci_write_config_dword(tp
->pdev
,
4004 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4007 static int tg3_power_up(struct tg3
*tp
)
4011 tg3_enable_register_access(tp
);
4013 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4015 /* Switch out of Vaux if it is a NIC */
4016 tg3_pwrsrc_switch_to_vmain(tp
);
4018 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4024 static int tg3_setup_phy(struct tg3
*, bool);
4026 static int tg3_power_down_prepare(struct tg3
*tp
)
4029 bool device_should_wake
, do_low_power
;
4031 tg3_enable_register_access(tp
);
4033 /* Restore the CLKREQ setting. */
4034 if (tg3_flag(tp
, CLKREQ_BUG
))
4035 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4036 PCI_EXP_LNKCTL_CLKREQ_EN
);
4038 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4039 tw32(TG3PCI_MISC_HOST_CTRL
,
4040 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4042 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4043 tg3_flag(tp
, WOL_ENABLE
);
4045 if (tg3_flag(tp
, USE_PHYLIB
)) {
4046 do_low_power
= false;
4047 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4048 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4049 struct phy_device
*phydev
;
4050 u32 phyid
, advertising
;
4052 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
4054 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4056 tp
->link_config
.speed
= phydev
->speed
;
4057 tp
->link_config
.duplex
= phydev
->duplex
;
4058 tp
->link_config
.autoneg
= phydev
->autoneg
;
4059 tp
->link_config
.advertising
= phydev
->advertising
;
4061 advertising
= ADVERTISED_TP
|
4063 ADVERTISED_Autoneg
|
4064 ADVERTISED_10baseT_Half
;
4066 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4067 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4069 ADVERTISED_100baseT_Half
|
4070 ADVERTISED_100baseT_Full
|
4071 ADVERTISED_10baseT_Full
;
4073 advertising
|= ADVERTISED_10baseT_Full
;
4076 phydev
->advertising
= advertising
;
4078 phy_start_aneg(phydev
);
4080 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4081 if (phyid
!= PHY_ID_BCMAC131
) {
4082 phyid
&= PHY_BCM_OUI_MASK
;
4083 if (phyid
== PHY_BCM_OUI_1
||
4084 phyid
== PHY_BCM_OUI_2
||
4085 phyid
== PHY_BCM_OUI_3
)
4086 do_low_power
= true;
4090 do_low_power
= true;
4092 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4093 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4095 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4096 tg3_setup_phy(tp
, false);
4099 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4102 val
= tr32(GRC_VCPU_EXT_CTRL
);
4103 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4104 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4108 for (i
= 0; i
< 200; i
++) {
4109 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4110 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4115 if (tg3_flag(tp
, WOL_CAP
))
4116 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4117 WOL_DRV_STATE_SHUTDOWN
|
4121 if (device_should_wake
) {
4124 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4126 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4127 tg3_phy_auxctl_write(tp
,
4128 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4129 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4130 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4131 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4135 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4136 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4137 else if (tp
->phy_flags
&
4138 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4139 if (tp
->link_config
.active_speed
== SPEED_1000
)
4140 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4142 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4144 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4146 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4147 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4148 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4149 SPEED_100
: SPEED_10
;
4150 if (tg3_5700_link_polarity(tp
, speed
))
4151 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4153 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4156 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4159 if (!tg3_flag(tp
, 5750_PLUS
))
4160 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4162 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4163 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4164 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4165 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4167 if (tg3_flag(tp
, ENABLE_APE
))
4168 mac_mode
|= MAC_MODE_APE_TX_EN
|
4169 MAC_MODE_APE_RX_EN
|
4170 MAC_MODE_TDE_ENABLE
;
4172 tw32_f(MAC_MODE
, mac_mode
);
4175 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4179 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4180 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4181 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4184 base_val
= tp
->pci_clock_ctrl
;
4185 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4186 CLOCK_CTRL_TXCLK_DISABLE
);
4188 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4189 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4190 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4191 tg3_flag(tp
, CPMU_PRESENT
) ||
4192 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4194 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4195 u32 newbits1
, newbits2
;
4197 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4198 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4199 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4200 CLOCK_CTRL_TXCLK_DISABLE
|
4202 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4203 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4204 newbits1
= CLOCK_CTRL_625_CORE
;
4205 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4207 newbits1
= CLOCK_CTRL_ALTCLK
;
4208 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4211 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4214 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4217 if (!tg3_flag(tp
, 5705_PLUS
)) {
4220 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4221 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4222 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4223 CLOCK_CTRL_TXCLK_DISABLE
|
4224 CLOCK_CTRL_44MHZ_CORE
);
4226 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4230 tp
->pci_clock_ctrl
| newbits3
, 40);
4234 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4235 tg3_power_down_phy(tp
, do_low_power
);
4237 tg3_frob_aux_power(tp
, true);
4239 /* Workaround for unstable PLL clock */
4240 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4241 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4242 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4243 u32 val
= tr32(0x7d00);
4245 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4247 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4250 err
= tg3_nvram_lock(tp
);
4251 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4253 tg3_nvram_unlock(tp
);
4257 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4259 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4264 static void tg3_power_down(struct tg3
*tp
)
4266 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4267 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4270 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4272 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4273 case MII_TG3_AUX_STAT_10HALF
:
4275 *duplex
= DUPLEX_HALF
;
4278 case MII_TG3_AUX_STAT_10FULL
:
4280 *duplex
= DUPLEX_FULL
;
4283 case MII_TG3_AUX_STAT_100HALF
:
4285 *duplex
= DUPLEX_HALF
;
4288 case MII_TG3_AUX_STAT_100FULL
:
4290 *duplex
= DUPLEX_FULL
;
4293 case MII_TG3_AUX_STAT_1000HALF
:
4294 *speed
= SPEED_1000
;
4295 *duplex
= DUPLEX_HALF
;
4298 case MII_TG3_AUX_STAT_1000FULL
:
4299 *speed
= SPEED_1000
;
4300 *duplex
= DUPLEX_FULL
;
4304 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4305 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4307 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4311 *speed
= SPEED_UNKNOWN
;
4312 *duplex
= DUPLEX_UNKNOWN
;
4317 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4322 new_adv
= ADVERTISE_CSMA
;
4323 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4324 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4326 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4330 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4331 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4333 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4334 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4335 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4337 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4342 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4345 tw32(TG3_CPMU_EEE_MODE
,
4346 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4348 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4353 /* Advertise 100-BaseTX EEE ability */
4354 if (advertise
& ADVERTISED_100baseT_Full
)
4355 val
|= MDIO_AN_EEE_ADV_100TX
;
4356 /* Advertise 1000-BaseT EEE ability */
4357 if (advertise
& ADVERTISED_1000baseT_Full
)
4358 val
|= MDIO_AN_EEE_ADV_1000T
;
4360 if (!tp
->eee
.eee_enabled
) {
4362 tp
->eee
.advertised
= 0;
4364 tp
->eee
.advertised
= advertise
&
4365 (ADVERTISED_100baseT_Full
|
4366 ADVERTISED_1000baseT_Full
);
4369 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4373 switch (tg3_asic_rev(tp
)) {
4375 case ASIC_REV_57765
:
4376 case ASIC_REV_57766
:
4378 /* If we advertised any eee advertisements above... */
4380 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4381 MII_TG3_DSP_TAP26_RMRXSTO
|
4382 MII_TG3_DSP_TAP26_OPCSINPT
;
4383 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4387 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4388 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4389 MII_TG3_DSP_CH34TP2_HIBW01
);
4392 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4401 static void tg3_phy_copper_begin(struct tg3
*tp
)
4403 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4404 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4407 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4408 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4409 adv
= ADVERTISED_10baseT_Half
|
4410 ADVERTISED_10baseT_Full
;
4411 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4412 adv
|= ADVERTISED_100baseT_Half
|
4413 ADVERTISED_100baseT_Full
;
4414 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4415 if (!(tp
->phy_flags
&
4416 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4417 adv
|= ADVERTISED_1000baseT_Half
;
4418 adv
|= ADVERTISED_1000baseT_Full
;
4421 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4423 adv
= tp
->link_config
.advertising
;
4424 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4425 adv
&= ~(ADVERTISED_1000baseT_Half
|
4426 ADVERTISED_1000baseT_Full
);
4428 fc
= tp
->link_config
.flowctrl
;
4431 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4433 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4434 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4435 /* Normally during power down we want to autonegotiate
4436 * the lowest possible speed for WOL. However, to avoid
4437 * link flap, we leave it untouched.
4442 tg3_writephy(tp
, MII_BMCR
,
4443 BMCR_ANENABLE
| BMCR_ANRESTART
);
4446 u32 bmcr
, orig_bmcr
;
4448 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4449 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4451 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4452 /* With autoneg disabled, 5715 only links up when the
4453 * advertisement register has the configured speed
4456 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4460 switch (tp
->link_config
.speed
) {
4466 bmcr
|= BMCR_SPEED100
;
4470 bmcr
|= BMCR_SPEED1000
;
4474 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4475 bmcr
|= BMCR_FULLDPLX
;
4477 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4478 (bmcr
!= orig_bmcr
)) {
4479 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4480 for (i
= 0; i
< 1500; i
++) {
4484 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4485 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4487 if (!(tmp
& BMSR_LSTATUS
)) {
4492 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4498 static int tg3_phy_pull_config(struct tg3
*tp
)
4503 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4507 if (!(val
& BMCR_ANENABLE
)) {
4508 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4509 tp
->link_config
.advertising
= 0;
4510 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4514 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4516 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4519 tp
->link_config
.speed
= SPEED_10
;
4522 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4525 tp
->link_config
.speed
= SPEED_100
;
4527 case BMCR_SPEED1000
:
4528 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4529 tp
->link_config
.speed
= SPEED_1000
;
4537 if (val
& BMCR_FULLDPLX
)
4538 tp
->link_config
.duplex
= DUPLEX_FULL
;
4540 tp
->link_config
.duplex
= DUPLEX_HALF
;
4542 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4548 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4549 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4550 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4552 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4555 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4559 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4560 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4562 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4564 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4567 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4570 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4571 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4575 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4577 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4581 adv
= tg3_decode_flowctrl_1000X(val
);
4582 tp
->link_config
.flowctrl
= adv
;
4584 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4585 adv
= mii_adv_to_ethtool_adv_x(val
);
4588 tp
->link_config
.advertising
|= adv
;
4595 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4599 /* Turn off tap power management. */
4600 /* Set Extended packet length bit */
4601 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4603 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4604 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4605 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4606 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4607 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4614 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4616 struct ethtool_eee eee
;
4618 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4621 tg3_eee_pull_config(tp
, &eee
);
4623 if (tp
->eee
.eee_enabled
) {
4624 if (tp
->eee
.advertised
!= eee
.advertised
||
4625 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4626 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4629 /* EEE is disabled but we're advertising */
4637 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4639 u32 advmsk
, tgtadv
, advertising
;
4641 advertising
= tp
->link_config
.advertising
;
4642 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4644 advmsk
= ADVERTISE_ALL
;
4645 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4646 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4647 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4650 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4653 if ((*lcladv
& advmsk
) != tgtadv
)
4656 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4659 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4661 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4665 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4666 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4667 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4668 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4669 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4671 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4674 if (tg3_ctrl
!= tgtadv
)
4681 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4685 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4688 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4691 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4694 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4697 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4698 tp
->link_config
.rmt_adv
= lpeth
;
4703 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4705 if (curr_link_up
!= tp
->link_up
) {
4707 netif_carrier_on(tp
->dev
);
4709 netif_carrier_off(tp
->dev
);
4710 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4711 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4714 tg3_link_report(tp
);
4721 static void tg3_clear_mac_status(struct tg3
*tp
)
4726 MAC_STATUS_SYNC_CHANGED
|
4727 MAC_STATUS_CFG_CHANGED
|
4728 MAC_STATUS_MI_COMPLETION
|
4729 MAC_STATUS_LNKSTATE_CHANGED
);
4733 static void tg3_setup_eee(struct tg3
*tp
)
4737 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4738 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4739 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4740 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4742 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4744 tw32_f(TG3_CPMU_EEE_CTRL
,
4745 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4747 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4748 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4749 TG3_CPMU_EEEMD_LPI_IN_RX
|
4750 TG3_CPMU_EEEMD_EEE_ENABLE
;
4752 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4753 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4755 if (tg3_flag(tp
, ENABLE_APE
))
4756 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4758 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4760 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4761 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4762 (tp
->eee
.tx_lpi_timer
& 0xffff));
4764 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4765 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4766 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4769 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4771 bool current_link_up
;
4773 u32 lcl_adv
, rmt_adv
;
4778 tg3_clear_mac_status(tp
);
4780 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4782 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4786 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4788 /* Some third-party PHYs need to be reset on link going
4791 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4792 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4793 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4795 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4796 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4797 !(bmsr
& BMSR_LSTATUS
))
4803 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4804 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4805 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4806 !tg3_flag(tp
, INIT_COMPLETE
))
4809 if (!(bmsr
& BMSR_LSTATUS
)) {
4810 err
= tg3_init_5401phy_dsp(tp
);
4814 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4815 for (i
= 0; i
< 1000; i
++) {
4817 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4818 (bmsr
& BMSR_LSTATUS
)) {
4824 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4825 TG3_PHY_REV_BCM5401_B0
&&
4826 !(bmsr
& BMSR_LSTATUS
) &&
4827 tp
->link_config
.active_speed
== SPEED_1000
) {
4828 err
= tg3_phy_reset(tp
);
4830 err
= tg3_init_5401phy_dsp(tp
);
4835 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4836 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4837 /* 5701 {A0,B0} CRC bug workaround */
4838 tg3_writephy(tp
, 0x15, 0x0a75);
4839 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4840 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4841 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4844 /* Clear pending interrupts... */
4845 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4846 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4848 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4849 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4850 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4851 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4853 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4854 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4855 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4856 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4857 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4859 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4862 current_link_up
= false;
4863 current_speed
= SPEED_UNKNOWN
;
4864 current_duplex
= DUPLEX_UNKNOWN
;
4865 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4866 tp
->link_config
.rmt_adv
= 0;
4868 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4869 err
= tg3_phy_auxctl_read(tp
,
4870 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4872 if (!err
&& !(val
& (1 << 10))) {
4873 tg3_phy_auxctl_write(tp
,
4874 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4881 for (i
= 0; i
< 100; i
++) {
4882 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4883 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4884 (bmsr
& BMSR_LSTATUS
))
4889 if (bmsr
& BMSR_LSTATUS
) {
4892 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4893 for (i
= 0; i
< 2000; i
++) {
4895 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4900 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4905 for (i
= 0; i
< 200; i
++) {
4906 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4907 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4909 if (bmcr
&& bmcr
!= 0x7fff)
4917 tp
->link_config
.active_speed
= current_speed
;
4918 tp
->link_config
.active_duplex
= current_duplex
;
4920 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4921 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4923 if ((bmcr
& BMCR_ANENABLE
) &&
4925 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4926 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4927 current_link_up
= true;
4929 /* EEE settings changes take effect only after a phy
4930 * reset. If we have skipped a reset due to Link Flap
4931 * Avoidance being enabled, do it now.
4933 if (!eee_config_ok
&&
4934 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4940 if (!(bmcr
& BMCR_ANENABLE
) &&
4941 tp
->link_config
.speed
== current_speed
&&
4942 tp
->link_config
.duplex
== current_duplex
) {
4943 current_link_up
= true;
4947 if (current_link_up
&&
4948 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4951 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4952 reg
= MII_TG3_FET_GEN_STAT
;
4953 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4955 reg
= MII_TG3_EXT_STAT
;
4956 bit
= MII_TG3_EXT_STAT_MDIX
;
4959 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4960 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4962 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4967 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4968 tg3_phy_copper_begin(tp
);
4970 if (tg3_flag(tp
, ROBOSWITCH
)) {
4971 current_link_up
= true;
4972 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4973 current_speed
= SPEED_1000
;
4974 current_duplex
= DUPLEX_FULL
;
4975 tp
->link_config
.active_speed
= current_speed
;
4976 tp
->link_config
.active_duplex
= current_duplex
;
4979 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4980 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4981 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4982 current_link_up
= true;
4985 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4986 if (current_link_up
) {
4987 if (tp
->link_config
.active_speed
== SPEED_100
||
4988 tp
->link_config
.active_speed
== SPEED_10
)
4989 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4991 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4992 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4993 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4995 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4997 /* In order for the 5750 core in BCM4785 chip to work properly
4998 * in RGMII mode, the Led Control Register must be set up.
5000 if (tg3_flag(tp
, RGMII_MODE
)) {
5001 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5002 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5004 if (tp
->link_config
.active_speed
== SPEED_10
)
5005 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5006 else if (tp
->link_config
.active_speed
== SPEED_100
)
5007 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5008 LED_CTRL_100MBPS_ON
);
5009 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5010 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5011 LED_CTRL_1000MBPS_ON
);
5013 tw32(MAC_LED_CTRL
, led_ctrl
);
5017 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5018 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5019 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5021 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5022 if (current_link_up
&&
5023 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5024 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5026 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5029 /* ??? Without this setting Netgear GA302T PHY does not
5030 * ??? send/receive packets...
5032 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5033 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5034 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5035 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5039 tw32_f(MAC_MODE
, tp
->mac_mode
);
5042 tg3_phy_eee_adjust(tp
, current_link_up
);
5044 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5045 /* Polled via timer. */
5046 tw32_f(MAC_EVENT
, 0);
5048 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5052 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5054 tp
->link_config
.active_speed
== SPEED_1000
&&
5055 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5058 (MAC_STATUS_SYNC_CHANGED
|
5059 MAC_STATUS_CFG_CHANGED
));
5062 NIC_SRAM_FIRMWARE_MBOX
,
5063 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5066 /* Prevent send BD corruption. */
5067 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5068 if (tp
->link_config
.active_speed
== SPEED_100
||
5069 tp
->link_config
.active_speed
== SPEED_10
)
5070 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5071 PCI_EXP_LNKCTL_CLKREQ_EN
);
5073 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5074 PCI_EXP_LNKCTL_CLKREQ_EN
);
5077 tg3_test_and_report_link_chg(tp
, current_link_up
);
5082 struct tg3_fiber_aneginfo
{
5084 #define ANEG_STATE_UNKNOWN 0
5085 #define ANEG_STATE_AN_ENABLE 1
5086 #define ANEG_STATE_RESTART_INIT 2
5087 #define ANEG_STATE_RESTART 3
5088 #define ANEG_STATE_DISABLE_LINK_OK 4
5089 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5090 #define ANEG_STATE_ABILITY_DETECT 6
5091 #define ANEG_STATE_ACK_DETECT_INIT 7
5092 #define ANEG_STATE_ACK_DETECT 8
5093 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5094 #define ANEG_STATE_COMPLETE_ACK 10
5095 #define ANEG_STATE_IDLE_DETECT_INIT 11
5096 #define ANEG_STATE_IDLE_DETECT 12
5097 #define ANEG_STATE_LINK_OK 13
5098 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5099 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5102 #define MR_AN_ENABLE 0x00000001
5103 #define MR_RESTART_AN 0x00000002
5104 #define MR_AN_COMPLETE 0x00000004
5105 #define MR_PAGE_RX 0x00000008
5106 #define MR_NP_LOADED 0x00000010
5107 #define MR_TOGGLE_TX 0x00000020
5108 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5109 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5110 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5111 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5112 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5113 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5114 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5115 #define MR_TOGGLE_RX 0x00002000
5116 #define MR_NP_RX 0x00004000
5118 #define MR_LINK_OK 0x80000000
5120 unsigned long link_time
, cur_time
;
5122 u32 ability_match_cfg
;
5123 int ability_match_count
;
5125 char ability_match
, idle_match
, ack_match
;
5127 u32 txconfig
, rxconfig
;
5128 #define ANEG_CFG_NP 0x00000080
5129 #define ANEG_CFG_ACK 0x00000040
5130 #define ANEG_CFG_RF2 0x00000020
5131 #define ANEG_CFG_RF1 0x00000010
5132 #define ANEG_CFG_PS2 0x00000001
5133 #define ANEG_CFG_PS1 0x00008000
5134 #define ANEG_CFG_HD 0x00004000
5135 #define ANEG_CFG_FD 0x00002000
5136 #define ANEG_CFG_INVAL 0x00001f06
5141 #define ANEG_TIMER_ENAB 2
5142 #define ANEG_FAILED -1
5144 #define ANEG_STATE_SETTLE_TIME 10000
5146 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5147 struct tg3_fiber_aneginfo
*ap
)
5150 unsigned long delta
;
5154 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5158 ap
->ability_match_cfg
= 0;
5159 ap
->ability_match_count
= 0;
5160 ap
->ability_match
= 0;
5166 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5167 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5169 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5170 ap
->ability_match_cfg
= rx_cfg_reg
;
5171 ap
->ability_match
= 0;
5172 ap
->ability_match_count
= 0;
5174 if (++ap
->ability_match_count
> 1) {
5175 ap
->ability_match
= 1;
5176 ap
->ability_match_cfg
= rx_cfg_reg
;
5179 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5187 ap
->ability_match_cfg
= 0;
5188 ap
->ability_match_count
= 0;
5189 ap
->ability_match
= 0;
5195 ap
->rxconfig
= rx_cfg_reg
;
5198 switch (ap
->state
) {
5199 case ANEG_STATE_UNKNOWN
:
5200 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5201 ap
->state
= ANEG_STATE_AN_ENABLE
;
5204 case ANEG_STATE_AN_ENABLE
:
5205 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5206 if (ap
->flags
& MR_AN_ENABLE
) {
5209 ap
->ability_match_cfg
= 0;
5210 ap
->ability_match_count
= 0;
5211 ap
->ability_match
= 0;
5215 ap
->state
= ANEG_STATE_RESTART_INIT
;
5217 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5221 case ANEG_STATE_RESTART_INIT
:
5222 ap
->link_time
= ap
->cur_time
;
5223 ap
->flags
&= ~(MR_NP_LOADED
);
5225 tw32(MAC_TX_AUTO_NEG
, 0);
5226 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5227 tw32_f(MAC_MODE
, tp
->mac_mode
);
5230 ret
= ANEG_TIMER_ENAB
;
5231 ap
->state
= ANEG_STATE_RESTART
;
5234 case ANEG_STATE_RESTART
:
5235 delta
= ap
->cur_time
- ap
->link_time
;
5236 if (delta
> ANEG_STATE_SETTLE_TIME
)
5237 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5239 ret
= ANEG_TIMER_ENAB
;
5242 case ANEG_STATE_DISABLE_LINK_OK
:
5246 case ANEG_STATE_ABILITY_DETECT_INIT
:
5247 ap
->flags
&= ~(MR_TOGGLE_TX
);
5248 ap
->txconfig
= ANEG_CFG_FD
;
5249 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5250 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5251 ap
->txconfig
|= ANEG_CFG_PS1
;
5252 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5253 ap
->txconfig
|= ANEG_CFG_PS2
;
5254 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5255 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5256 tw32_f(MAC_MODE
, tp
->mac_mode
);
5259 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5262 case ANEG_STATE_ABILITY_DETECT
:
5263 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5264 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5267 case ANEG_STATE_ACK_DETECT_INIT
:
5268 ap
->txconfig
|= ANEG_CFG_ACK
;
5269 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5270 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5271 tw32_f(MAC_MODE
, tp
->mac_mode
);
5274 ap
->state
= ANEG_STATE_ACK_DETECT
;
5277 case ANEG_STATE_ACK_DETECT
:
5278 if (ap
->ack_match
!= 0) {
5279 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5280 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5281 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5283 ap
->state
= ANEG_STATE_AN_ENABLE
;
5285 } else if (ap
->ability_match
!= 0 &&
5286 ap
->rxconfig
== 0) {
5287 ap
->state
= ANEG_STATE_AN_ENABLE
;
5291 case ANEG_STATE_COMPLETE_ACK_INIT
:
5292 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5296 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5297 MR_LP_ADV_HALF_DUPLEX
|
5298 MR_LP_ADV_SYM_PAUSE
|
5299 MR_LP_ADV_ASYM_PAUSE
|
5300 MR_LP_ADV_REMOTE_FAULT1
|
5301 MR_LP_ADV_REMOTE_FAULT2
|
5302 MR_LP_ADV_NEXT_PAGE
|
5305 if (ap
->rxconfig
& ANEG_CFG_FD
)
5306 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5307 if (ap
->rxconfig
& ANEG_CFG_HD
)
5308 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5309 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5310 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5311 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5312 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5313 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5314 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5315 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5316 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5317 if (ap
->rxconfig
& ANEG_CFG_NP
)
5318 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5320 ap
->link_time
= ap
->cur_time
;
5322 ap
->flags
^= (MR_TOGGLE_TX
);
5323 if (ap
->rxconfig
& 0x0008)
5324 ap
->flags
|= MR_TOGGLE_RX
;
5325 if (ap
->rxconfig
& ANEG_CFG_NP
)
5326 ap
->flags
|= MR_NP_RX
;
5327 ap
->flags
|= MR_PAGE_RX
;
5329 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5330 ret
= ANEG_TIMER_ENAB
;
5333 case ANEG_STATE_COMPLETE_ACK
:
5334 if (ap
->ability_match
!= 0 &&
5335 ap
->rxconfig
== 0) {
5336 ap
->state
= ANEG_STATE_AN_ENABLE
;
5339 delta
= ap
->cur_time
- ap
->link_time
;
5340 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5341 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5342 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5344 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5345 !(ap
->flags
& MR_NP_RX
)) {
5346 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5354 case ANEG_STATE_IDLE_DETECT_INIT
:
5355 ap
->link_time
= ap
->cur_time
;
5356 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5357 tw32_f(MAC_MODE
, tp
->mac_mode
);
5360 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5361 ret
= ANEG_TIMER_ENAB
;
5364 case ANEG_STATE_IDLE_DETECT
:
5365 if (ap
->ability_match
!= 0 &&
5366 ap
->rxconfig
== 0) {
5367 ap
->state
= ANEG_STATE_AN_ENABLE
;
5370 delta
= ap
->cur_time
- ap
->link_time
;
5371 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5372 /* XXX another gem from the Broadcom driver :( */
5373 ap
->state
= ANEG_STATE_LINK_OK
;
5377 case ANEG_STATE_LINK_OK
:
5378 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5382 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5383 /* ??? unimplemented */
5386 case ANEG_STATE_NEXT_PAGE_WAIT
:
5387 /* ??? unimplemented */
5398 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5401 struct tg3_fiber_aneginfo aninfo
;
5402 int status
= ANEG_FAILED
;
5406 tw32_f(MAC_TX_AUTO_NEG
, 0);
5408 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5409 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5412 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5415 memset(&aninfo
, 0, sizeof(aninfo
));
5416 aninfo
.flags
|= MR_AN_ENABLE
;
5417 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5418 aninfo
.cur_time
= 0;
5420 while (++tick
< 195000) {
5421 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5422 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5428 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5429 tw32_f(MAC_MODE
, tp
->mac_mode
);
5432 *txflags
= aninfo
.txconfig
;
5433 *rxflags
= aninfo
.flags
;
5435 if (status
== ANEG_DONE
&&
5436 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5437 MR_LP_ADV_FULL_DUPLEX
)))
5443 static void tg3_init_bcm8002(struct tg3
*tp
)
5445 u32 mac_status
= tr32(MAC_STATUS
);
5448 /* Reset when initting first time or we have a link. */
5449 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5450 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5453 /* Set PLL lock range. */
5454 tg3_writephy(tp
, 0x16, 0x8007);
5457 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5459 /* Wait for reset to complete. */
5460 /* XXX schedule_timeout() ... */
5461 for (i
= 0; i
< 500; i
++)
5464 /* Config mode; select PMA/Ch 1 regs. */
5465 tg3_writephy(tp
, 0x10, 0x8411);
5467 /* Enable auto-lock and comdet, select txclk for tx. */
5468 tg3_writephy(tp
, 0x11, 0x0a10);
5470 tg3_writephy(tp
, 0x18, 0x00a0);
5471 tg3_writephy(tp
, 0x16, 0x41ff);
5473 /* Assert and deassert POR. */
5474 tg3_writephy(tp
, 0x13, 0x0400);
5476 tg3_writephy(tp
, 0x13, 0x0000);
5478 tg3_writephy(tp
, 0x11, 0x0a50);
5480 tg3_writephy(tp
, 0x11, 0x0a10);
5482 /* Wait for signal to stabilize */
5483 /* XXX schedule_timeout() ... */
5484 for (i
= 0; i
< 15000; i
++)
5487 /* Deselect the channel register so we can read the PHYID
5490 tg3_writephy(tp
, 0x10, 0x8011);
5493 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5496 bool current_link_up
;
5497 u32 sg_dig_ctrl
, sg_dig_status
;
5498 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5499 int workaround
, port_a
;
5502 expected_sg_dig_ctrl
= 0;
5505 current_link_up
= false;
5507 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5508 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5510 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5513 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5514 /* preserve bits 20-23 for voltage regulator */
5515 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5518 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5520 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5521 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5523 u32 val
= serdes_cfg
;
5529 tw32_f(MAC_SERDES_CFG
, val
);
5532 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5534 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5535 tg3_setup_flow_control(tp
, 0, 0);
5536 current_link_up
= true;
5541 /* Want auto-negotiation. */
5542 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5544 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5545 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5546 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5547 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5548 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5550 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5551 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5552 tp
->serdes_counter
&&
5553 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5554 MAC_STATUS_RCVD_CFG
)) ==
5555 MAC_STATUS_PCS_SYNCED
)) {
5556 tp
->serdes_counter
--;
5557 current_link_up
= true;
5562 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5563 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5565 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5567 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5568 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5569 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5570 MAC_STATUS_SIGNAL_DET
)) {
5571 sg_dig_status
= tr32(SG_DIG_STATUS
);
5572 mac_status
= tr32(MAC_STATUS
);
5574 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5575 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5576 u32 local_adv
= 0, remote_adv
= 0;
5578 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5579 local_adv
|= ADVERTISE_1000XPAUSE
;
5580 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5581 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5583 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5584 remote_adv
|= LPA_1000XPAUSE
;
5585 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5586 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5588 tp
->link_config
.rmt_adv
=
5589 mii_adv_to_ethtool_adv_x(remote_adv
);
5591 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5592 current_link_up
= true;
5593 tp
->serdes_counter
= 0;
5594 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5595 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5596 if (tp
->serdes_counter
)
5597 tp
->serdes_counter
--;
5600 u32 val
= serdes_cfg
;
5607 tw32_f(MAC_SERDES_CFG
, val
);
5610 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5613 /* Link parallel detection - link is up */
5614 /* only if we have PCS_SYNC and not */
5615 /* receiving config code words */
5616 mac_status
= tr32(MAC_STATUS
);
5617 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5618 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5619 tg3_setup_flow_control(tp
, 0, 0);
5620 current_link_up
= true;
5622 TG3_PHYFLG_PARALLEL_DETECT
;
5623 tp
->serdes_counter
=
5624 SERDES_PARALLEL_DET_TIMEOUT
;
5626 goto restart_autoneg
;
5630 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5631 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5635 return current_link_up
;
5638 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5640 bool current_link_up
= false;
5642 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5645 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5646 u32 txflags
, rxflags
;
5649 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5650 u32 local_adv
= 0, remote_adv
= 0;
5652 if (txflags
& ANEG_CFG_PS1
)
5653 local_adv
|= ADVERTISE_1000XPAUSE
;
5654 if (txflags
& ANEG_CFG_PS2
)
5655 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5657 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5658 remote_adv
|= LPA_1000XPAUSE
;
5659 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5660 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5662 tp
->link_config
.rmt_adv
=
5663 mii_adv_to_ethtool_adv_x(remote_adv
);
5665 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5667 current_link_up
= true;
5669 for (i
= 0; i
< 30; i
++) {
5672 (MAC_STATUS_SYNC_CHANGED
|
5673 MAC_STATUS_CFG_CHANGED
));
5675 if ((tr32(MAC_STATUS
) &
5676 (MAC_STATUS_SYNC_CHANGED
|
5677 MAC_STATUS_CFG_CHANGED
)) == 0)
5681 mac_status
= tr32(MAC_STATUS
);
5682 if (!current_link_up
&&
5683 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5684 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5685 current_link_up
= true;
5687 tg3_setup_flow_control(tp
, 0, 0);
5689 /* Forcing 1000FD link up. */
5690 current_link_up
= true;
5692 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5695 tw32_f(MAC_MODE
, tp
->mac_mode
);
5700 return current_link_up
;
5703 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5706 u16 orig_active_speed
;
5707 u8 orig_active_duplex
;
5709 bool current_link_up
;
5712 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5713 orig_active_speed
= tp
->link_config
.active_speed
;
5714 orig_active_duplex
= tp
->link_config
.active_duplex
;
5716 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5718 tg3_flag(tp
, INIT_COMPLETE
)) {
5719 mac_status
= tr32(MAC_STATUS
);
5720 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5721 MAC_STATUS_SIGNAL_DET
|
5722 MAC_STATUS_CFG_CHANGED
|
5723 MAC_STATUS_RCVD_CFG
);
5724 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5725 MAC_STATUS_SIGNAL_DET
)) {
5726 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5727 MAC_STATUS_CFG_CHANGED
));
5732 tw32_f(MAC_TX_AUTO_NEG
, 0);
5734 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5735 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5736 tw32_f(MAC_MODE
, tp
->mac_mode
);
5739 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5740 tg3_init_bcm8002(tp
);
5742 /* Enable link change event even when serdes polling. */
5743 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5746 current_link_up
= false;
5747 tp
->link_config
.rmt_adv
= 0;
5748 mac_status
= tr32(MAC_STATUS
);
5750 if (tg3_flag(tp
, HW_AUTONEG
))
5751 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5753 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5755 tp
->napi
[0].hw_status
->status
=
5756 (SD_STATUS_UPDATED
|
5757 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5759 for (i
= 0; i
< 100; i
++) {
5760 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5761 MAC_STATUS_CFG_CHANGED
));
5763 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5764 MAC_STATUS_CFG_CHANGED
|
5765 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5769 mac_status
= tr32(MAC_STATUS
);
5770 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5771 current_link_up
= false;
5772 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5773 tp
->serdes_counter
== 0) {
5774 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5775 MAC_MODE_SEND_CONFIGS
));
5777 tw32_f(MAC_MODE
, tp
->mac_mode
);
5781 if (current_link_up
) {
5782 tp
->link_config
.active_speed
= SPEED_1000
;
5783 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5784 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5785 LED_CTRL_LNKLED_OVERRIDE
|
5786 LED_CTRL_1000MBPS_ON
));
5788 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5789 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5790 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5791 LED_CTRL_LNKLED_OVERRIDE
|
5792 LED_CTRL_TRAFFIC_OVERRIDE
));
5795 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5796 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5797 if (orig_pause_cfg
!= now_pause_cfg
||
5798 orig_active_speed
!= tp
->link_config
.active_speed
||
5799 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5800 tg3_link_report(tp
);
5806 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5810 u16 current_speed
= SPEED_UNKNOWN
;
5811 u8 current_duplex
= DUPLEX_UNKNOWN
;
5812 bool current_link_up
= false;
5813 u32 local_adv
, remote_adv
, sgsr
;
5815 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5816 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5817 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5818 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5823 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5825 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5826 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5828 current_link_up
= true;
5829 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5830 current_speed
= SPEED_1000
;
5831 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5832 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5833 current_speed
= SPEED_100
;
5834 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5836 current_speed
= SPEED_10
;
5837 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5840 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5841 current_duplex
= DUPLEX_FULL
;
5843 current_duplex
= DUPLEX_HALF
;
5846 tw32_f(MAC_MODE
, tp
->mac_mode
);
5849 tg3_clear_mac_status(tp
);
5851 goto fiber_setup_done
;
5854 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5855 tw32_f(MAC_MODE
, tp
->mac_mode
);
5858 tg3_clear_mac_status(tp
);
5863 tp
->link_config
.rmt_adv
= 0;
5865 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5866 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5867 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5868 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5869 bmsr
|= BMSR_LSTATUS
;
5871 bmsr
&= ~BMSR_LSTATUS
;
5874 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5876 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5877 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5878 /* do nothing, just check for link up at the end */
5879 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5882 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5883 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5884 ADVERTISE_1000XPAUSE
|
5885 ADVERTISE_1000XPSE_ASYM
|
5888 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5889 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5891 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5892 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5893 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5894 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5896 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5897 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5898 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5905 bmcr
&= ~BMCR_SPEED1000
;
5906 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5908 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5909 new_bmcr
|= BMCR_FULLDPLX
;
5911 if (new_bmcr
!= bmcr
) {
5912 /* BMCR_SPEED1000 is a reserved bit that needs
5913 * to be set on write.
5915 new_bmcr
|= BMCR_SPEED1000
;
5917 /* Force a linkdown */
5921 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5922 adv
&= ~(ADVERTISE_1000XFULL
|
5923 ADVERTISE_1000XHALF
|
5925 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5926 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5930 tg3_carrier_off(tp
);
5932 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5934 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5935 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5936 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5937 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5938 bmsr
|= BMSR_LSTATUS
;
5940 bmsr
&= ~BMSR_LSTATUS
;
5942 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5946 if (bmsr
& BMSR_LSTATUS
) {
5947 current_speed
= SPEED_1000
;
5948 current_link_up
= true;
5949 if (bmcr
& BMCR_FULLDPLX
)
5950 current_duplex
= DUPLEX_FULL
;
5952 current_duplex
= DUPLEX_HALF
;
5957 if (bmcr
& BMCR_ANENABLE
) {
5960 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5961 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5962 common
= local_adv
& remote_adv
;
5963 if (common
& (ADVERTISE_1000XHALF
|
5964 ADVERTISE_1000XFULL
)) {
5965 if (common
& ADVERTISE_1000XFULL
)
5966 current_duplex
= DUPLEX_FULL
;
5968 current_duplex
= DUPLEX_HALF
;
5970 tp
->link_config
.rmt_adv
=
5971 mii_adv_to_ethtool_adv_x(remote_adv
);
5972 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5973 /* Link is up via parallel detect */
5975 current_link_up
= false;
5981 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5982 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5984 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5985 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5986 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5988 tw32_f(MAC_MODE
, tp
->mac_mode
);
5991 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5993 tp
->link_config
.active_speed
= current_speed
;
5994 tp
->link_config
.active_duplex
= current_duplex
;
5996 tg3_test_and_report_link_chg(tp
, current_link_up
);
6000 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6002 if (tp
->serdes_counter
) {
6003 /* Give autoneg time to complete. */
6004 tp
->serdes_counter
--;
6009 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6012 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6013 if (bmcr
& BMCR_ANENABLE
) {
6016 /* Select shadow register 0x1f */
6017 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6018 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6020 /* Select expansion interrupt status register */
6021 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6022 MII_TG3_DSP_EXP1_INT_STAT
);
6023 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6024 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6026 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6027 /* We have signal detect and not receiving
6028 * config code words, link is up by parallel
6032 bmcr
&= ~BMCR_ANENABLE
;
6033 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6034 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6035 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6038 } else if (tp
->link_up
&&
6039 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6040 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6043 /* Select expansion interrupt status register */
6044 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6045 MII_TG3_DSP_EXP1_INT_STAT
);
6046 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6050 /* Config code words received, turn on autoneg. */
6051 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6052 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6054 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6060 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6065 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6066 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6067 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6068 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6070 err
= tg3_setup_copper_phy(tp
, force_reset
);
6072 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6075 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6076 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6078 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6083 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6084 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6085 tw32(GRC_MISC_CFG
, val
);
6088 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6089 (6 << TX_LENGTHS_IPG_SHIFT
);
6090 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6091 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6092 val
|= tr32(MAC_TX_LENGTHS
) &
6093 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6094 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6096 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6097 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6098 tw32(MAC_TX_LENGTHS
, val
|
6099 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6101 tw32(MAC_TX_LENGTHS
, val
|
6102 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6104 if (!tg3_flag(tp
, 5705_PLUS
)) {
6106 tw32(HOSTCC_STAT_COAL_TICKS
,
6107 tp
->coal
.stats_block_coalesce_usecs
);
6109 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6113 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6114 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6116 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6119 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6120 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6126 /* tp->lock must be held */
6127 static u64
tg3_refclk_read(struct tg3
*tp
)
6129 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6130 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6133 /* tp->lock must be held */
6134 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6136 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6138 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6139 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6140 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6141 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6144 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6145 static inline void tg3_full_unlock(struct tg3
*tp
);
6146 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6148 struct tg3
*tp
= netdev_priv(dev
);
6150 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6151 SOF_TIMESTAMPING_RX_SOFTWARE
|
6152 SOF_TIMESTAMPING_SOFTWARE
;
6154 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6155 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6156 SOF_TIMESTAMPING_RX_HARDWARE
|
6157 SOF_TIMESTAMPING_RAW_HARDWARE
;
6161 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6163 info
->phc_index
= -1;
6165 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6167 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6168 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6169 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6170 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6174 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6176 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6177 bool neg_adj
= false;
6185 /* Frequency adjustment is performed using hardware with a 24 bit
6186 * accumulator and a programmable correction value. On each clk, the
6187 * correction value gets added to the accumulator and when it
6188 * overflows, the time counter is incremented/decremented.
6190 * So conversion from ppb to correction value is
6191 * ppb * (1 << 24) / 1000000000
6193 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6194 TG3_EAV_REF_CLK_CORRECT_MASK
;
6196 tg3_full_lock(tp
, 0);
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6200 TG3_EAV_REF_CLK_CORRECT_EN
|
6201 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6203 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6205 tg3_full_unlock(tp
);
6210 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6212 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6214 tg3_full_lock(tp
, 0);
6215 tp
->ptp_adjust
+= delta
;
6216 tg3_full_unlock(tp
);
6221 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
6224 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6226 tg3_full_lock(tp
, 0);
6227 ns
= tg3_refclk_read(tp
);
6228 ns
+= tp
->ptp_adjust
;
6229 tg3_full_unlock(tp
);
6231 *ts
= ns_to_timespec64(ns
);
6236 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6237 const struct timespec64
*ts
)
6240 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6242 ns
= timespec64_to_ns(ts
);
6244 tg3_full_lock(tp
, 0);
6245 tg3_refclk_write(tp
, ns
);
6247 tg3_full_unlock(tp
);
6252 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6253 struct ptp_clock_request
*rq
, int on
)
6255 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6260 case PTP_CLK_REQ_PEROUT
:
6261 if (rq
->perout
.index
!= 0)
6264 tg3_full_lock(tp
, 0);
6265 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6266 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6271 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6272 rq
->perout
.start
.nsec
;
6274 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6275 netdev_warn(tp
->dev
,
6276 "Device supports only a one-shot timesync output, period must be 0\n");
6281 if (nsec
& (1ULL << 63)) {
6282 netdev_warn(tp
->dev
,
6283 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6288 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6289 tw32(TG3_EAV_WATCHDOG0_MSB
,
6290 TG3_EAV_WATCHDOG0_EN
|
6291 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6293 tw32(TG3_EAV_REF_CLCK_CTL
,
6294 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6296 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6297 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6301 tg3_full_unlock(tp
);
6311 static const struct ptp_clock_info tg3_ptp_caps
= {
6312 .owner
= THIS_MODULE
,
6313 .name
= "tg3 clock",
6314 .max_adj
= 250000000,
6320 .adjfreq
= tg3_ptp_adjfreq
,
6321 .adjtime
= tg3_ptp_adjtime
,
6322 .gettime64
= tg3_ptp_gettime
,
6323 .settime64
= tg3_ptp_settime
,
6324 .enable
= tg3_ptp_enable
,
6327 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6328 struct skb_shared_hwtstamps
*timestamp
)
6330 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6331 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6335 /* tp->lock must be held */
6336 static void tg3_ptp_init(struct tg3
*tp
)
6338 if (!tg3_flag(tp
, PTP_CAPABLE
))
6341 /* Initialize the hardware clock to the system time. */
6342 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6344 tp
->ptp_info
= tg3_ptp_caps
;
6347 /* tp->lock must be held */
6348 static void tg3_ptp_resume(struct tg3
*tp
)
6350 if (!tg3_flag(tp
, PTP_CAPABLE
))
6353 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6357 static void tg3_ptp_fini(struct tg3
*tp
)
6359 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6362 ptp_clock_unregister(tp
->ptp_clock
);
6363 tp
->ptp_clock
= NULL
;
6367 static inline int tg3_irq_sync(struct tg3
*tp
)
6369 return tp
->irq_sync
;
6372 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6376 dst
= (u32
*)((u8
*)dst
+ off
);
6377 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6378 *dst
++ = tr32(off
+ i
);
6381 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6383 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6384 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6385 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6386 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6387 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6388 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6389 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6390 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6391 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6392 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6393 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6394 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6395 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6396 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6397 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6398 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6399 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6400 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6401 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6403 if (tg3_flag(tp
, SUPPORT_MSIX
))
6404 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6406 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6407 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6408 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6409 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6410 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6411 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6412 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6413 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6415 if (!tg3_flag(tp
, 5705_PLUS
)) {
6416 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6417 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6418 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6421 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6422 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6423 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6424 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6425 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6427 if (tg3_flag(tp
, NVRAM
))
6428 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6431 static void tg3_dump_state(struct tg3
*tp
)
6436 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6440 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6441 /* Read up to but not including private PCI registers */
6442 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6443 regs
[i
/ sizeof(u32
)] = tr32(i
);
6445 tg3_dump_legacy_regs(tp
, regs
);
6447 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6448 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6449 !regs
[i
+ 2] && !regs
[i
+ 3])
6452 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6454 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6459 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6460 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6462 /* SW status block */
6464 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6466 tnapi
->hw_status
->status
,
6467 tnapi
->hw_status
->status_tag
,
6468 tnapi
->hw_status
->rx_jumbo_consumer
,
6469 tnapi
->hw_status
->rx_consumer
,
6470 tnapi
->hw_status
->rx_mini_consumer
,
6471 tnapi
->hw_status
->idx
[0].rx_producer
,
6472 tnapi
->hw_status
->idx
[0].tx_consumer
);
6475 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6477 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6478 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6480 tnapi
->prodring
.rx_std_prod_idx
,
6481 tnapi
->prodring
.rx_std_cons_idx
,
6482 tnapi
->prodring
.rx_jmb_prod_idx
,
6483 tnapi
->prodring
.rx_jmb_cons_idx
);
6487 /* This is called whenever we suspect that the system chipset is re-
6488 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6489 * is bogus tx completions. We try to recover by setting the
6490 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6493 static void tg3_tx_recover(struct tg3
*tp
)
6495 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6496 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6498 netdev_warn(tp
->dev
,
6499 "The system may be re-ordering memory-mapped I/O "
6500 "cycles to the network device, attempting to recover. "
6501 "Please report the problem to the driver maintainer "
6502 "and include system chipset information.\n");
6504 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6507 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6509 /* Tell compiler to fetch tx indices from memory. */
6511 return tnapi
->tx_pending
-
6512 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6515 /* Tigon3 never reports partial packet sends. So we do not
6516 * need special logic to handle SKBs that have not had all
6517 * of their frags sent yet, like SunGEM does.
6519 static void tg3_tx(struct tg3_napi
*tnapi
)
6521 struct tg3
*tp
= tnapi
->tp
;
6522 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6523 u32 sw_idx
= tnapi
->tx_cons
;
6524 struct netdev_queue
*txq
;
6525 int index
= tnapi
- tp
->napi
;
6526 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6528 if (tg3_flag(tp
, ENABLE_TSS
))
6531 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6533 while (sw_idx
!= hw_idx
) {
6534 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6535 struct sk_buff
*skb
= ri
->skb
;
6538 if (unlikely(skb
== NULL
)) {
6543 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6544 struct skb_shared_hwtstamps timestamp
;
6545 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6546 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6548 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6550 skb_tstamp_tx(skb
, ×tamp
);
6553 pci_unmap_single(tp
->pdev
,
6554 dma_unmap_addr(ri
, mapping
),
6560 while (ri
->fragmented
) {
6561 ri
->fragmented
= false;
6562 sw_idx
= NEXT_TX(sw_idx
);
6563 ri
= &tnapi
->tx_buffers
[sw_idx
];
6566 sw_idx
= NEXT_TX(sw_idx
);
6568 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6569 ri
= &tnapi
->tx_buffers
[sw_idx
];
6570 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6573 pci_unmap_page(tp
->pdev
,
6574 dma_unmap_addr(ri
, mapping
),
6575 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6578 while (ri
->fragmented
) {
6579 ri
->fragmented
= false;
6580 sw_idx
= NEXT_TX(sw_idx
);
6581 ri
= &tnapi
->tx_buffers
[sw_idx
];
6584 sw_idx
= NEXT_TX(sw_idx
);
6588 bytes_compl
+= skb
->len
;
6590 dev_kfree_skb_any(skb
);
6592 if (unlikely(tx_bug
)) {
6598 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6600 tnapi
->tx_cons
= sw_idx
;
6602 /* Need to make the tx_cons update visible to tg3_start_xmit()
6603 * before checking for netif_queue_stopped(). Without the
6604 * memory barrier, there is a small possibility that tg3_start_xmit()
6605 * will miss it and cause the queue to be stopped forever.
6609 if (unlikely(netif_tx_queue_stopped(txq
) &&
6610 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6611 __netif_tx_lock(txq
, smp_processor_id());
6612 if (netif_tx_queue_stopped(txq
) &&
6613 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6614 netif_tx_wake_queue(txq
);
6615 __netif_tx_unlock(txq
);
6619 static void tg3_frag_free(bool is_frag
, void *data
)
6622 skb_free_frag(data
);
6627 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6629 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6630 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6635 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6636 map_sz
, PCI_DMA_FROMDEVICE
);
6637 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6642 /* Returns size of skb allocated or < 0 on error.
6644 * We only need to fill in the address because the other members
6645 * of the RX descriptor are invariant, see tg3_init_rings.
6647 * Note the purposeful assymetry of cpu vs. chip accesses. For
6648 * posting buffers we only dirty the first cache line of the RX
6649 * descriptor (containing the address). Whereas for the RX status
6650 * buffers the cpu only reads the last cacheline of the RX descriptor
6651 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6653 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6654 u32 opaque_key
, u32 dest_idx_unmasked
,
6655 unsigned int *frag_size
)
6657 struct tg3_rx_buffer_desc
*desc
;
6658 struct ring_info
*map
;
6661 int skb_size
, data_size
, dest_idx
;
6663 switch (opaque_key
) {
6664 case RXD_OPAQUE_RING_STD
:
6665 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6666 desc
= &tpr
->rx_std
[dest_idx
];
6667 map
= &tpr
->rx_std_buffers
[dest_idx
];
6668 data_size
= tp
->rx_pkt_map_sz
;
6671 case RXD_OPAQUE_RING_JUMBO
:
6672 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6673 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6674 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6675 data_size
= TG3_RX_JMB_MAP_SZ
;
6682 /* Do not overwrite any of the map or rp information
6683 * until we are sure we can commit to a new buffer.
6685 * Callers depend upon this behavior and assume that
6686 * we leave everything unchanged if we fail.
6688 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6689 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6690 if (skb_size
<= PAGE_SIZE
) {
6691 data
= netdev_alloc_frag(skb_size
);
6692 *frag_size
= skb_size
;
6694 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6700 mapping
= pci_map_single(tp
->pdev
,
6701 data
+ TG3_RX_OFFSET(tp
),
6703 PCI_DMA_FROMDEVICE
);
6704 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6705 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6710 dma_unmap_addr_set(map
, mapping
, mapping
);
6712 desc
->addr_hi
= ((u64
)mapping
>> 32);
6713 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6718 /* We only need to move over in the address because the other
6719 * members of the RX descriptor are invariant. See notes above
6720 * tg3_alloc_rx_data for full details.
6722 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6723 struct tg3_rx_prodring_set
*dpr
,
6724 u32 opaque_key
, int src_idx
,
6725 u32 dest_idx_unmasked
)
6727 struct tg3
*tp
= tnapi
->tp
;
6728 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6729 struct ring_info
*src_map
, *dest_map
;
6730 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6733 switch (opaque_key
) {
6734 case RXD_OPAQUE_RING_STD
:
6735 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6736 dest_desc
= &dpr
->rx_std
[dest_idx
];
6737 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6738 src_desc
= &spr
->rx_std
[src_idx
];
6739 src_map
= &spr
->rx_std_buffers
[src_idx
];
6742 case RXD_OPAQUE_RING_JUMBO
:
6743 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6744 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6745 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6746 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6747 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6754 dest_map
->data
= src_map
->data
;
6755 dma_unmap_addr_set(dest_map
, mapping
,
6756 dma_unmap_addr(src_map
, mapping
));
6757 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6758 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6760 /* Ensure that the update to the skb happens after the physical
6761 * addresses have been transferred to the new BD location.
6765 src_map
->data
= NULL
;
6768 /* The RX ring scheme is composed of multiple rings which post fresh
6769 * buffers to the chip, and one special ring the chip uses to report
6770 * status back to the host.
6772 * The special ring reports the status of received packets to the
6773 * host. The chip does not write into the original descriptor the
6774 * RX buffer was obtained from. The chip simply takes the original
6775 * descriptor as provided by the host, updates the status and length
6776 * field, then writes this into the next status ring entry.
6778 * Each ring the host uses to post buffers to the chip is described
6779 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6780 * it is first placed into the on-chip ram. When the packet's length
6781 * is known, it walks down the TG3_BDINFO entries to select the ring.
6782 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6783 * which is within the range of the new packet's length is chosen.
6785 * The "separate ring for rx status" scheme may sound queer, but it makes
6786 * sense from a cache coherency perspective. If only the host writes
6787 * to the buffer post rings, and only the chip writes to the rx status
6788 * rings, then cache lines never move beyond shared-modified state.
6789 * If both the host and chip were to write into the same ring, cache line
6790 * eviction could occur since both entities want it in an exclusive state.
6792 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6794 struct tg3
*tp
= tnapi
->tp
;
6795 u32 work_mask
, rx_std_posted
= 0;
6796 u32 std_prod_idx
, jmb_prod_idx
;
6797 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6800 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6802 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6804 * We need to order the read of hw_idx and the read of
6805 * the opaque cookie.
6810 std_prod_idx
= tpr
->rx_std_prod_idx
;
6811 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6812 while (sw_idx
!= hw_idx
&& budget
> 0) {
6813 struct ring_info
*ri
;
6814 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6816 struct sk_buff
*skb
;
6817 dma_addr_t dma_addr
;
6818 u32 opaque_key
, desc_idx
, *post_ptr
;
6822 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6823 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6824 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6825 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6826 dma_addr
= dma_unmap_addr(ri
, mapping
);
6828 post_ptr
= &std_prod_idx
;
6830 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6831 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6832 dma_addr
= dma_unmap_addr(ri
, mapping
);
6834 post_ptr
= &jmb_prod_idx
;
6836 goto next_pkt_nopost
;
6838 work_mask
|= opaque_key
;
6840 if (desc
->err_vlan
& RXD_ERR_MASK
) {
6842 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6843 desc_idx
, *post_ptr
);
6845 /* Other statistics kept track of by card. */
6850 prefetch(data
+ TG3_RX_OFFSET(tp
));
6851 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6854 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6855 RXD_FLAG_PTPSTAT_PTPV1
||
6856 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6857 RXD_FLAG_PTPSTAT_PTPV2
) {
6858 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6859 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6862 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6864 unsigned int frag_size
;
6866 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6867 *post_ptr
, &frag_size
);
6871 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6872 PCI_DMA_FROMDEVICE
);
6874 /* Ensure that the update to the data happens
6875 * after the usage of the old DMA mapping.
6881 skb
= build_skb(data
, frag_size
);
6883 tg3_frag_free(frag_size
!= 0, data
);
6884 goto drop_it_no_recycle
;
6886 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6888 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6889 desc_idx
, *post_ptr
);
6891 skb
= netdev_alloc_skb(tp
->dev
,
6892 len
+ TG3_RAW_IP_ALIGN
);
6894 goto drop_it_no_recycle
;
6896 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6897 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6899 data
+ TG3_RX_OFFSET(tp
),
6901 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6906 tg3_hwclock_to_timestamp(tp
, tstamp
,
6907 skb_hwtstamps(skb
));
6909 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6910 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6911 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6912 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6913 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6915 skb_checksum_none_assert(skb
);
6917 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6919 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6920 skb
->protocol
!= htons(ETH_P_8021Q
) &&
6921 skb
->protocol
!= htons(ETH_P_8021AD
)) {
6922 dev_kfree_skb_any(skb
);
6923 goto drop_it_no_recycle
;
6926 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6927 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6928 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6929 desc
->err_vlan
& RXD_VLAN_MASK
);
6931 napi_gro_receive(&tnapi
->napi
, skb
);
6939 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6940 tpr
->rx_std_prod_idx
= std_prod_idx
&
6941 tp
->rx_std_ring_mask
;
6942 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6943 tpr
->rx_std_prod_idx
);
6944 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6949 sw_idx
&= tp
->rx_ret_ring_mask
;
6951 /* Refresh hw_idx to see if there is new work */
6952 if (sw_idx
== hw_idx
) {
6953 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6958 /* ACK the status ring. */
6959 tnapi
->rx_rcb_ptr
= sw_idx
;
6960 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6962 /* Refill RX ring(s). */
6963 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6964 /* Sync BD data before updating mailbox */
6967 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6968 tpr
->rx_std_prod_idx
= std_prod_idx
&
6969 tp
->rx_std_ring_mask
;
6970 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6971 tpr
->rx_std_prod_idx
);
6973 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6974 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6975 tp
->rx_jmb_ring_mask
;
6976 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6977 tpr
->rx_jmb_prod_idx
);
6980 } else if (work_mask
) {
6981 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6982 * updated before the producer indices can be updated.
6986 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6987 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6989 if (tnapi
!= &tp
->napi
[1]) {
6990 tp
->rx_refill
= true;
6991 napi_schedule(&tp
->napi
[1].napi
);
6998 static void tg3_poll_link(struct tg3
*tp
)
7000 /* handle link change and other phy events */
7001 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7002 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7004 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7005 sblk
->status
= SD_STATUS_UPDATED
|
7006 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7007 spin_lock(&tp
->lock
);
7008 if (tg3_flag(tp
, USE_PHYLIB
)) {
7010 (MAC_STATUS_SYNC_CHANGED
|
7011 MAC_STATUS_CFG_CHANGED
|
7012 MAC_STATUS_MI_COMPLETION
|
7013 MAC_STATUS_LNKSTATE_CHANGED
));
7016 tg3_setup_phy(tp
, false);
7017 spin_unlock(&tp
->lock
);
7022 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7023 struct tg3_rx_prodring_set
*dpr
,
7024 struct tg3_rx_prodring_set
*spr
)
7026 u32 si
, di
, cpycnt
, src_prod_idx
;
7030 src_prod_idx
= spr
->rx_std_prod_idx
;
7032 /* Make sure updates to the rx_std_buffers[] entries and the
7033 * standard producer index are seen in the correct order.
7037 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7040 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7041 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7043 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7044 spr
->rx_std_cons_idx
;
7046 cpycnt
= min(cpycnt
,
7047 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7049 si
= spr
->rx_std_cons_idx
;
7050 di
= dpr
->rx_std_prod_idx
;
7052 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7053 if (dpr
->rx_std_buffers
[i
].data
) {
7063 /* Ensure that updates to the rx_std_buffers ring and the
7064 * shadowed hardware producer ring from tg3_recycle_skb() are
7065 * ordered correctly WRT the skb check above.
7069 memcpy(&dpr
->rx_std_buffers
[di
],
7070 &spr
->rx_std_buffers
[si
],
7071 cpycnt
* sizeof(struct ring_info
));
7073 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7074 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7075 sbd
= &spr
->rx_std
[si
];
7076 dbd
= &dpr
->rx_std
[di
];
7077 dbd
->addr_hi
= sbd
->addr_hi
;
7078 dbd
->addr_lo
= sbd
->addr_lo
;
7081 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7082 tp
->rx_std_ring_mask
;
7083 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7084 tp
->rx_std_ring_mask
;
7088 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7090 /* Make sure updates to the rx_jmb_buffers[] entries and
7091 * the jumbo producer index are seen in the correct order.
7095 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7098 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7099 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7101 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7102 spr
->rx_jmb_cons_idx
;
7104 cpycnt
= min(cpycnt
,
7105 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7107 si
= spr
->rx_jmb_cons_idx
;
7108 di
= dpr
->rx_jmb_prod_idx
;
7110 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7111 if (dpr
->rx_jmb_buffers
[i
].data
) {
7121 /* Ensure that updates to the rx_jmb_buffers ring and the
7122 * shadowed hardware producer ring from tg3_recycle_skb() are
7123 * ordered correctly WRT the skb check above.
7127 memcpy(&dpr
->rx_jmb_buffers
[di
],
7128 &spr
->rx_jmb_buffers
[si
],
7129 cpycnt
* sizeof(struct ring_info
));
7131 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7132 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7133 sbd
= &spr
->rx_jmb
[si
].std
;
7134 dbd
= &dpr
->rx_jmb
[di
].std
;
7135 dbd
->addr_hi
= sbd
->addr_hi
;
7136 dbd
->addr_lo
= sbd
->addr_lo
;
7139 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7140 tp
->rx_jmb_ring_mask
;
7141 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7142 tp
->rx_jmb_ring_mask
;
7148 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7150 struct tg3
*tp
= tnapi
->tp
;
7152 /* run TX completion thread */
7153 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7155 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7159 if (!tnapi
->rx_rcb_prod_idx
)
7162 /* run RX thread, within the bounds set by NAPI.
7163 * All RX "locking" is done by ensuring outside
7164 * code synchronizes with tg3->napi.poll()
7166 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7167 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7169 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7170 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7172 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7173 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7175 tp
->rx_refill
= false;
7176 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7177 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7178 &tp
->napi
[i
].prodring
);
7182 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7183 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7184 dpr
->rx_std_prod_idx
);
7186 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7187 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7188 dpr
->rx_jmb_prod_idx
);
7193 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7199 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7201 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7202 schedule_work(&tp
->reset_task
);
7205 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7207 cancel_work_sync(&tp
->reset_task
);
7208 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7209 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7212 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7214 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7215 struct tg3
*tp
= tnapi
->tp
;
7217 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7220 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7222 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7225 if (unlikely(work_done
>= budget
))
7228 /* tp->last_tag is used in tg3_int_reenable() below
7229 * to tell the hw how much work has been processed,
7230 * so we must read it before checking for more work.
7232 tnapi
->last_tag
= sblk
->status_tag
;
7233 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7236 /* check for RX/TX work to do */
7237 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7238 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7240 /* This test here is not race free, but will reduce
7241 * the number of interrupts by looping again.
7243 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7246 napi_complete_done(napi
, work_done
);
7247 /* Reenable interrupts. */
7248 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7250 /* This test here is synchronized by napi_schedule()
7251 * and napi_complete() to close the race condition.
7253 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7254 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7255 HOSTCC_MODE_ENABLE
|
7266 /* work_done is guaranteed to be less than budget. */
7267 napi_complete(napi
);
7268 tg3_reset_task_schedule(tp
);
7272 static void tg3_process_error(struct tg3
*tp
)
7275 bool real_error
= false;
7277 if (tg3_flag(tp
, ERROR_PROCESSED
))
7280 /* Check Flow Attention register */
7281 val
= tr32(HOSTCC_FLOW_ATTN
);
7282 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7283 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7287 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7288 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7292 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7293 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7302 tg3_flag_set(tp
, ERROR_PROCESSED
);
7303 tg3_reset_task_schedule(tp
);
7306 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7308 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7309 struct tg3
*tp
= tnapi
->tp
;
7311 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7314 if (sblk
->status
& SD_STATUS_ERROR
)
7315 tg3_process_error(tp
);
7319 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7321 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7324 if (unlikely(work_done
>= budget
))
7327 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7328 /* tp->last_tag is used in tg3_int_reenable() below
7329 * to tell the hw how much work has been processed,
7330 * so we must read it before checking for more work.
7332 tnapi
->last_tag
= sblk
->status_tag
;
7333 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7336 sblk
->status
&= ~SD_STATUS_UPDATED
;
7338 if (likely(!tg3_has_work(tnapi
))) {
7339 napi_complete_done(napi
, work_done
);
7340 tg3_int_reenable(tnapi
);
7348 /* work_done is guaranteed to be less than budget. */
7349 napi_complete(napi
);
7350 tg3_reset_task_schedule(tp
);
7354 static void tg3_napi_disable(struct tg3
*tp
)
7358 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7359 napi_disable(&tp
->napi
[i
].napi
);
7362 static void tg3_napi_enable(struct tg3
*tp
)
7366 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7367 napi_enable(&tp
->napi
[i
].napi
);
7370 static void tg3_napi_init(struct tg3
*tp
)
7374 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7375 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7376 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7379 static void tg3_napi_fini(struct tg3
*tp
)
7383 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7384 netif_napi_del(&tp
->napi
[i
].napi
);
7387 static inline void tg3_netif_stop(struct tg3
*tp
)
7389 netif_trans_update(tp
->dev
); /* prevent tx timeout */
7390 tg3_napi_disable(tp
);
7391 netif_carrier_off(tp
->dev
);
7392 netif_tx_disable(tp
->dev
);
7395 /* tp->lock must be held */
7396 static inline void tg3_netif_start(struct tg3
*tp
)
7400 /* NOTE: unconditional netif_tx_wake_all_queues is only
7401 * appropriate so long as all callers are assured to
7402 * have free tx slots (such as after tg3_init_hw)
7404 netif_tx_wake_all_queues(tp
->dev
);
7407 netif_carrier_on(tp
->dev
);
7409 tg3_napi_enable(tp
);
7410 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7411 tg3_enable_ints(tp
);
7414 static void tg3_irq_quiesce(struct tg3
*tp
)
7415 __releases(tp
->lock
)
7416 __acquires(tp
->lock
)
7420 BUG_ON(tp
->irq_sync
);
7425 spin_unlock_bh(&tp
->lock
);
7427 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7428 synchronize_irq(tp
->napi
[i
].irq_vec
);
7430 spin_lock_bh(&tp
->lock
);
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435 * with as well. Most of the time, this is not necessary except when
7436 * shutting down the device.
7438 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7440 spin_lock_bh(&tp
->lock
);
7442 tg3_irq_quiesce(tp
);
7445 static inline void tg3_full_unlock(struct tg3
*tp
)
7447 spin_unlock_bh(&tp
->lock
);
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451 * after sending MSI so driver doesn't have to do it.
7453 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7455 struct tg3_napi
*tnapi
= dev_id
;
7456 struct tg3
*tp
= tnapi
->tp
;
7458 prefetch(tnapi
->hw_status
);
7460 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7462 if (likely(!tg3_irq_sync(tp
)))
7463 napi_schedule(&tnapi
->napi
);
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469 * flush status block and interrupt mailbox. PCI ordering rules
7470 * guarantee that MSI will arrive after the status block.
7472 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7474 struct tg3_napi
*tnapi
= dev_id
;
7475 struct tg3
*tp
= tnapi
->tp
;
7477 prefetch(tnapi
->hw_status
);
7479 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7481 * Writing any value to intr-mbox-0 clears PCI INTA# and
7482 * chip-internal interrupt pending events.
7483 * Writing non-zero to intr-mbox-0 additional tells the
7484 * NIC to stop sending us irqs, engaging "in-intr-handler"
7487 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7488 if (likely(!tg3_irq_sync(tp
)))
7489 napi_schedule(&tnapi
->napi
);
7491 return IRQ_RETVAL(1);
7494 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7496 struct tg3_napi
*tnapi
= dev_id
;
7497 struct tg3
*tp
= tnapi
->tp
;
7498 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7499 unsigned int handled
= 1;
7501 /* In INTx mode, it is possible for the interrupt to arrive at
7502 * the CPU before the status block posted prior to the interrupt.
7503 * Reading the PCI State register will confirm whether the
7504 * interrupt is ours and will flush the status block.
7506 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7507 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7508 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7515 * Writing any value to intr-mbox-0 clears PCI INTA# and
7516 * chip-internal interrupt pending events.
7517 * Writing non-zero to intr-mbox-0 additional tells the
7518 * NIC to stop sending us irqs, engaging "in-intr-handler"
7521 * Flush the mailbox to de-assert the IRQ immediately to prevent
7522 * spurious interrupts. The flush impacts performance but
7523 * excessive spurious interrupts can be worse in some cases.
7525 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7526 if (tg3_irq_sync(tp
))
7528 sblk
->status
&= ~SD_STATUS_UPDATED
;
7529 if (likely(tg3_has_work(tnapi
))) {
7530 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7531 napi_schedule(&tnapi
->napi
);
7533 /* No work, shared interrupt perhaps? re-enable
7534 * interrupts, and flush that PCI write
7536 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7540 return IRQ_RETVAL(handled
);
7543 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7545 struct tg3_napi
*tnapi
= dev_id
;
7546 struct tg3
*tp
= tnapi
->tp
;
7547 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7548 unsigned int handled
= 1;
7550 /* In INTx mode, it is possible for the interrupt to arrive at
7551 * the CPU before the status block posted prior to the interrupt.
7552 * Reading the PCI State register will confirm whether the
7553 * interrupt is ours and will flush the status block.
7555 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7556 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7557 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7564 * writing any value to intr-mbox-0 clears PCI INTA# and
7565 * chip-internal interrupt pending events.
7566 * writing non-zero to intr-mbox-0 additional tells the
7567 * NIC to stop sending us irqs, engaging "in-intr-handler"
7570 * Flush the mailbox to de-assert the IRQ immediately to prevent
7571 * spurious interrupts. The flush impacts performance but
7572 * excessive spurious interrupts can be worse in some cases.
7574 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7577 * In a shared interrupt configuration, sometimes other devices'
7578 * interrupts will scream. We record the current status tag here
7579 * so that the above check can report that the screaming interrupts
7580 * are unhandled. Eventually they will be silenced.
7582 tnapi
->last_irq_tag
= sblk
->status_tag
;
7584 if (tg3_irq_sync(tp
))
7587 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7589 napi_schedule(&tnapi
->napi
);
7592 return IRQ_RETVAL(handled
);
7595 /* ISR for interrupt test */
7596 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7598 struct tg3_napi
*tnapi
= dev_id
;
7599 struct tg3
*tp
= tnapi
->tp
;
7600 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7602 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7603 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7604 tg3_disable_ints(tp
);
7605 return IRQ_RETVAL(1);
7607 return IRQ_RETVAL(0);
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device
*dev
)
7614 struct tg3
*tp
= netdev_priv(dev
);
7616 if (tg3_irq_sync(tp
))
7619 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7620 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7624 static void tg3_tx_timeout(struct net_device
*dev
)
7626 struct tg3
*tp
= netdev_priv(dev
);
7628 if (netif_msg_tx_err(tp
)) {
7629 netdev_err(dev
, "transmit timed out, resetting\n");
7633 tg3_reset_task_schedule(tp
);
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7639 u32 base
= (u32
) mapping
& 0xffffffff;
7641 return base
+ len
+ 8 < base
;
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645 * of any 4GB boundaries: 4G, 8G, etc
7647 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7650 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7651 u32 base
= (u32
) mapping
& 0xffffffff;
7653 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7664 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7672 dma_addr_t mapping
, u32 len
, u32 flags
,
7675 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7676 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7677 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7678 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7681 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7682 dma_addr_t map
, u32 len
, u32 flags
,
7685 struct tg3
*tp
= tnapi
->tp
;
7688 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7691 if (tg3_4g_overflow_test(map
, len
))
7694 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7697 if (tg3_40bit_overflow_test(tp
, map
, len
))
7700 if (tp
->dma_limit
) {
7701 u32 prvidx
= *entry
;
7702 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7703 while (len
> tp
->dma_limit
&& *budget
) {
7704 u32 frag_len
= tp
->dma_limit
;
7705 len
-= tp
->dma_limit
;
7707 /* Avoid the 8byte DMA problem */
7709 len
+= tp
->dma_limit
/ 2;
7710 frag_len
= tp
->dma_limit
/ 2;
7713 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7715 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7716 frag_len
, tmp_flag
, mss
, vlan
);
7719 *entry
= NEXT_TX(*entry
);
7726 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7727 len
, flags
, mss
, vlan
);
7729 *entry
= NEXT_TX(*entry
);
7732 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7736 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7737 len
, flags
, mss
, vlan
);
7738 *entry
= NEXT_TX(*entry
);
7744 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7747 struct sk_buff
*skb
;
7748 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7753 pci_unmap_single(tnapi
->tp
->pdev
,
7754 dma_unmap_addr(txb
, mapping
),
7758 while (txb
->fragmented
) {
7759 txb
->fragmented
= false;
7760 entry
= NEXT_TX(entry
);
7761 txb
= &tnapi
->tx_buffers
[entry
];
7764 for (i
= 0; i
<= last
; i
++) {
7765 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7767 entry
= NEXT_TX(entry
);
7768 txb
= &tnapi
->tx_buffers
[entry
];
7770 pci_unmap_page(tnapi
->tp
->pdev
,
7771 dma_unmap_addr(txb
, mapping
),
7772 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7774 while (txb
->fragmented
) {
7775 txb
->fragmented
= false;
7776 entry
= NEXT_TX(entry
);
7777 txb
= &tnapi
->tx_buffers
[entry
];
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7784 struct sk_buff
**pskb
,
7785 u32
*entry
, u32
*budget
,
7786 u32 base_flags
, u32 mss
, u32 vlan
)
7788 struct tg3
*tp
= tnapi
->tp
;
7789 struct sk_buff
*new_skb
, *skb
= *pskb
;
7790 dma_addr_t new_addr
= 0;
7793 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7794 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7796 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7798 new_skb
= skb_copy_expand(skb
,
7799 skb_headroom(skb
) + more_headroom
,
7800 skb_tailroom(skb
), GFP_ATOMIC
);
7806 /* New SKB is guaranteed to be linear. */
7807 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7809 /* Make sure the mapping succeeded */
7810 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7811 dev_kfree_skb_any(new_skb
);
7814 u32 save_entry
= *entry
;
7816 base_flags
|= TXD_FLAG_END
;
7818 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7819 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7822 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7823 new_skb
->len
, base_flags
,
7825 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7826 dev_kfree_skb_any(new_skb
);
7832 dev_kfree_skb_any(skb
);
7837 static bool tg3_tso_bug_gso_check(struct tg3_napi
*tnapi
, struct sk_buff
*skb
)
7839 /* Check if we will never have enough descriptors,
7840 * as gso_segs can be more than current ring size
7842 return skb_shinfo(skb
)->gso_segs
< tnapi
->tx_pending
/ 3;
7845 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7847 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7848 * indicated in tg3_tx_frag_set()
7850 static int tg3_tso_bug(struct tg3
*tp
, struct tg3_napi
*tnapi
,
7851 struct netdev_queue
*txq
, struct sk_buff
*skb
)
7853 struct sk_buff
*segs
, *nskb
;
7854 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7856 /* Estimate the number of fragments in the worst case */
7857 if (unlikely(tg3_tx_avail(tnapi
) <= frag_cnt_est
)) {
7858 netif_tx_stop_queue(txq
);
7860 /* netif_tx_stop_queue() must be done before checking
7861 * checking tx index in tg3_tx_avail() below, because in
7862 * tg3_tx(), we update tx index before checking for
7863 * netif_tx_queue_stopped().
7866 if (tg3_tx_avail(tnapi
) <= frag_cnt_est
)
7867 return NETDEV_TX_BUSY
;
7869 netif_tx_wake_queue(txq
);
7872 segs
= skb_gso_segment(skb
, tp
->dev
->features
&
7873 ~(NETIF_F_TSO
| NETIF_F_TSO6
));
7874 if (IS_ERR(segs
) || !segs
)
7875 goto tg3_tso_bug_end
;
7881 tg3_start_xmit(nskb
, tp
->dev
);
7885 dev_kfree_skb_any(skb
);
7887 return NETDEV_TX_OK
;
7890 /* hard_start_xmit for all devices */
7891 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7893 struct tg3
*tp
= netdev_priv(dev
);
7894 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7896 int i
= -1, would_hit_hwbug
;
7898 struct tg3_napi
*tnapi
;
7899 struct netdev_queue
*txq
;
7901 struct iphdr
*iph
= NULL
;
7902 struct tcphdr
*tcph
= NULL
;
7903 __sum16 tcp_csum
= 0, ip_csum
= 0;
7904 __be16 ip_tot_len
= 0;
7906 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7907 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7908 if (tg3_flag(tp
, ENABLE_TSS
))
7911 budget
= tg3_tx_avail(tnapi
);
7913 /* We are running in BH disabled context with netif_tx_lock
7914 * and TX reclaim runs via tp->napi.poll inside of a software
7915 * interrupt. Furthermore, IRQ processing runs lockless so we have
7916 * no IRQ context deadlocks to worry about either. Rejoice!
7918 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7919 if (!netif_tx_queue_stopped(txq
)) {
7920 netif_tx_stop_queue(txq
);
7922 /* This is a hard error, log it. */
7924 "BUG! Tx Ring full when queue awake!\n");
7926 return NETDEV_TX_BUSY
;
7929 entry
= tnapi
->tx_prod
;
7932 mss
= skb_shinfo(skb
)->gso_size
;
7934 u32 tcp_opt_len
, hdr_len
;
7936 if (skb_cow_head(skb
, 0))
7940 tcp_opt_len
= tcp_optlen(skb
);
7942 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7944 /* HW/FW can not correctly segment packets that have been
7945 * vlan encapsulated.
7947 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
7948 skb
->protocol
== htons(ETH_P_8021AD
)) {
7949 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7950 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7954 if (!skb_is_gso_v6(skb
)) {
7955 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7956 tg3_flag(tp
, TSO_BUG
)) {
7957 if (tg3_tso_bug_gso_check(tnapi
, skb
))
7958 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
7961 ip_csum
= iph
->check
;
7962 ip_tot_len
= iph
->tot_len
;
7964 iph
->tot_len
= htons(mss
+ hdr_len
);
7967 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7968 TXD_FLAG_CPU_POST_DMA
);
7970 tcph
= tcp_hdr(skb
);
7971 tcp_csum
= tcph
->check
;
7973 if (tg3_flag(tp
, HW_TSO_1
) ||
7974 tg3_flag(tp
, HW_TSO_2
) ||
7975 tg3_flag(tp
, HW_TSO_3
)) {
7977 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7979 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
7983 if (tg3_flag(tp
, HW_TSO_3
)) {
7984 mss
|= (hdr_len
& 0xc) << 12;
7986 base_flags
|= 0x00000010;
7987 base_flags
|= (hdr_len
& 0x3e0) << 5;
7988 } else if (tg3_flag(tp
, HW_TSO_2
))
7989 mss
|= hdr_len
<< 9;
7990 else if (tg3_flag(tp
, HW_TSO_1
) ||
7991 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7992 if (tcp_opt_len
|| iph
->ihl
> 5) {
7995 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7996 mss
|= (tsflags
<< 11);
7999 if (tcp_opt_len
|| iph
->ihl
> 5) {
8002 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
8003 base_flags
|= tsflags
<< 12;
8006 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
8007 /* HW/FW can not correctly checksum packets that have been
8008 * vlan encapsulated.
8010 if (skb
->protocol
== htons(ETH_P_8021Q
) ||
8011 skb
->protocol
== htons(ETH_P_8021AD
)) {
8012 if (skb_checksum_help(skb
))
8015 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
8019 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
8020 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
8021 base_flags
|= TXD_FLAG_JMB_PKT
;
8023 if (skb_vlan_tag_present(skb
)) {
8024 base_flags
|= TXD_FLAG_VLAN
;
8025 vlan
= skb_vlan_tag_get(skb
);
8028 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
8029 tg3_flag(tp
, TX_TSTAMP_EN
)) {
8030 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
8031 base_flags
|= TXD_FLAG_HWTSTAMP
;
8034 len
= skb_headlen(skb
);
8036 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8037 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8041 tnapi
->tx_buffers
[entry
].skb
= skb
;
8042 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8044 would_hit_hwbug
= 0;
8046 if (tg3_flag(tp
, 5701_DMA_BUG
))
8047 would_hit_hwbug
= 1;
8049 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8050 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8052 would_hit_hwbug
= 1;
8053 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8056 if (!tg3_flag(tp
, HW_TSO_1
) &&
8057 !tg3_flag(tp
, HW_TSO_2
) &&
8058 !tg3_flag(tp
, HW_TSO_3
))
8061 /* Now loop through additional data
8062 * fragments, and queue them.
8064 last
= skb_shinfo(skb
)->nr_frags
- 1;
8065 for (i
= 0; i
<= last
; i
++) {
8066 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8068 len
= skb_frag_size(frag
);
8069 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8070 len
, DMA_TO_DEVICE
);
8072 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8073 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8075 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8079 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8081 ((i
== last
) ? TXD_FLAG_END
: 0),
8083 would_hit_hwbug
= 1;
8089 if (would_hit_hwbug
) {
8090 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8092 if (mss
&& tg3_tso_bug_gso_check(tnapi
, skb
)) {
8093 /* If it's a TSO packet, do GSO instead of
8094 * allocating and copying to a large linear SKB
8097 iph
->check
= ip_csum
;
8098 iph
->tot_len
= ip_tot_len
;
8100 tcph
->check
= tcp_csum
;
8101 return tg3_tso_bug(tp
, tnapi
, txq
, skb
);
8104 /* If the workaround fails due to memory/mapping
8105 * failure, silently drop this packet.
8107 entry
= tnapi
->tx_prod
;
8108 budget
= tg3_tx_avail(tnapi
);
8109 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8110 base_flags
, mss
, vlan
))
8114 skb_tx_timestamp(skb
);
8115 netdev_tx_sent_queue(txq
, skb
->len
);
8117 /* Sync BD data before updating mailbox */
8120 tnapi
->tx_prod
= entry
;
8121 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8122 netif_tx_stop_queue(txq
);
8124 /* netif_tx_stop_queue() must be done before checking
8125 * checking tx index in tg3_tx_avail() below, because in
8126 * tg3_tx(), we update tx index before checking for
8127 * netif_tx_queue_stopped().
8130 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8131 netif_tx_wake_queue(txq
);
8134 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
)) {
8135 /* Packets are ready, update Tx producer idx on card. */
8136 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8140 return NETDEV_TX_OK
;
8143 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8144 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8146 dev_kfree_skb_any(skb
);
8149 return NETDEV_TX_OK
;
8152 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8155 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8156 MAC_MODE_PORT_MODE_MASK
);
8158 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8160 if (!tg3_flag(tp
, 5705_PLUS
))
8161 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8163 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8164 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8166 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8168 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8170 if (tg3_flag(tp
, 5705_PLUS
) ||
8171 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8172 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8173 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8176 tw32(MAC_MODE
, tp
->mac_mode
);
8180 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8182 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8184 tg3_phy_toggle_apd(tp
, false);
8185 tg3_phy_toggle_automdix(tp
, false);
8187 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8190 bmcr
= BMCR_FULLDPLX
;
8195 bmcr
|= BMCR_SPEED100
;
8199 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8201 bmcr
|= BMCR_SPEED100
;
8204 bmcr
|= BMCR_SPEED1000
;
8209 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8210 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8211 val
|= CTL1000_AS_MASTER
|
8212 CTL1000_ENABLE_MASTER
;
8213 tg3_writephy(tp
, MII_CTRL1000
, val
);
8215 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8216 MII_TG3_FET_PTEST_TRIM_2
;
8217 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8220 bmcr
|= BMCR_LOOPBACK
;
8222 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8224 /* The write needs to be flushed for the FETs */
8225 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8226 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8230 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8231 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8232 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8233 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8234 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8236 /* The write needs to be flushed for the AC131 */
8237 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8240 /* Reset to prevent losing 1st rx packet intermittently */
8241 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8242 tg3_flag(tp
, 5780_CLASS
)) {
8243 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8245 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8248 mac_mode
= tp
->mac_mode
&
8249 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8250 if (speed
== SPEED_1000
)
8251 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8253 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8255 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8256 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8258 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8259 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8260 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8261 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8263 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8264 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8267 tw32(MAC_MODE
, mac_mode
);
8273 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8275 struct tg3
*tp
= netdev_priv(dev
);
8277 if (features
& NETIF_F_LOOPBACK
) {
8278 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8281 spin_lock_bh(&tp
->lock
);
8282 tg3_mac_loopback(tp
, true);
8283 netif_carrier_on(tp
->dev
);
8284 spin_unlock_bh(&tp
->lock
);
8285 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8287 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8290 spin_lock_bh(&tp
->lock
);
8291 tg3_mac_loopback(tp
, false);
8292 /* Force link status check */
8293 tg3_setup_phy(tp
, true);
8294 spin_unlock_bh(&tp
->lock
);
8295 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8299 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8300 netdev_features_t features
)
8302 struct tg3
*tp
= netdev_priv(dev
);
8304 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8305 features
&= ~NETIF_F_ALL_TSO
;
8310 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8312 netdev_features_t changed
= dev
->features
^ features
;
8314 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8315 tg3_set_loopback(dev
, features
);
8320 static void tg3_rx_prodring_free(struct tg3
*tp
,
8321 struct tg3_rx_prodring_set
*tpr
)
8325 if (tpr
!= &tp
->napi
[0].prodring
) {
8326 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8327 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8328 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8331 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8332 for (i
= tpr
->rx_jmb_cons_idx
;
8333 i
!= tpr
->rx_jmb_prod_idx
;
8334 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8335 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8343 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8344 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8347 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8348 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8349 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8354 /* Initialize rx rings for packet processing.
8356 * The chip has been shut down and the driver detached from
8357 * the networking, so no interrupts or new tx packets will
8358 * end up in the driver. tp->{tx,}lock are held and thus
8361 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8362 struct tg3_rx_prodring_set
*tpr
)
8364 u32 i
, rx_pkt_dma_sz
;
8366 tpr
->rx_std_cons_idx
= 0;
8367 tpr
->rx_std_prod_idx
= 0;
8368 tpr
->rx_jmb_cons_idx
= 0;
8369 tpr
->rx_jmb_prod_idx
= 0;
8371 if (tpr
!= &tp
->napi
[0].prodring
) {
8372 memset(&tpr
->rx_std_buffers
[0], 0,
8373 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8374 if (tpr
->rx_jmb_buffers
)
8375 memset(&tpr
->rx_jmb_buffers
[0], 0,
8376 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8380 /* Zero out all descriptors. */
8381 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8383 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8384 if (tg3_flag(tp
, 5780_CLASS
) &&
8385 tp
->dev
->mtu
> ETH_DATA_LEN
)
8386 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8387 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8389 /* Initialize invariants of the rings, we only set this
8390 * stuff once. This works because the card does not
8391 * write into the rx buffer posting rings.
8393 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8394 struct tg3_rx_buffer_desc
*rxd
;
8396 rxd
= &tpr
->rx_std
[i
];
8397 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8398 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8399 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8400 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8403 /* Now allocate fresh SKBs for each rx ring. */
8404 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8405 unsigned int frag_size
;
8407 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8409 netdev_warn(tp
->dev
,
8410 "Using a smaller RX standard ring. Only "
8411 "%d out of %d buffers were allocated "
8412 "successfully\n", i
, tp
->rx_pending
);
8420 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8423 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8425 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8428 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8429 struct tg3_rx_buffer_desc
*rxd
;
8431 rxd
= &tpr
->rx_jmb
[i
].std
;
8432 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8433 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8435 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8436 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8439 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8440 unsigned int frag_size
;
8442 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8444 netdev_warn(tp
->dev
,
8445 "Using a smaller RX jumbo ring. Only %d "
8446 "out of %d buffers were allocated "
8447 "successfully\n", i
, tp
->rx_jumbo_pending
);
8450 tp
->rx_jumbo_pending
= i
;
8459 tg3_rx_prodring_free(tp
, tpr
);
8463 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8464 struct tg3_rx_prodring_set
*tpr
)
8466 kfree(tpr
->rx_std_buffers
);
8467 tpr
->rx_std_buffers
= NULL
;
8468 kfree(tpr
->rx_jmb_buffers
);
8469 tpr
->rx_jmb_buffers
= NULL
;
8471 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8472 tpr
->rx_std
, tpr
->rx_std_mapping
);
8476 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8477 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8482 static int tg3_rx_prodring_init(struct tg3
*tp
,
8483 struct tg3_rx_prodring_set
*tpr
)
8485 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8487 if (!tpr
->rx_std_buffers
)
8490 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8491 TG3_RX_STD_RING_BYTES(tp
),
8492 &tpr
->rx_std_mapping
,
8497 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8498 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8500 if (!tpr
->rx_jmb_buffers
)
8503 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8504 TG3_RX_JMB_RING_BYTES(tp
),
8505 &tpr
->rx_jmb_mapping
,
8514 tg3_rx_prodring_fini(tp
, tpr
);
8518 /* Free up pending packets in all rx/tx rings.
8520 * The chip has been shut down and the driver detached from
8521 * the networking, so no interrupts or new tx packets will
8522 * end up in the driver. tp->{tx,}lock is not held and we are not
8523 * in an interrupt context and thus may sleep.
8525 static void tg3_free_rings(struct tg3
*tp
)
8529 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8530 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8532 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8534 if (!tnapi
->tx_buffers
)
8537 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8538 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8543 tg3_tx_skb_unmap(tnapi
, i
,
8544 skb_shinfo(skb
)->nr_frags
- 1);
8546 dev_kfree_skb_any(skb
);
8548 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8552 /* Initialize tx/rx rings for packet processing.
8554 * The chip has been shut down and the driver detached from
8555 * the networking, so no interrupts or new tx packets will
8556 * end up in the driver. tp->{tx,}lock are held and thus
8559 static int tg3_init_rings(struct tg3
*tp
)
8563 /* Free up all the SKBs. */
8566 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8567 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8569 tnapi
->last_tag
= 0;
8570 tnapi
->last_irq_tag
= 0;
8571 tnapi
->hw_status
->status
= 0;
8572 tnapi
->hw_status
->status_tag
= 0;
8573 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8578 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8580 tnapi
->rx_rcb_ptr
= 0;
8582 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8584 if (tnapi
->prodring
.rx_std
&&
8585 tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8594 static void tg3_mem_tx_release(struct tg3
*tp
)
8598 for (i
= 0; i
< tp
->irq_max
; i
++) {
8599 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8601 if (tnapi
->tx_ring
) {
8602 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8603 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8604 tnapi
->tx_ring
= NULL
;
8607 kfree(tnapi
->tx_buffers
);
8608 tnapi
->tx_buffers
= NULL
;
8612 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8615 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8617 /* If multivector TSS is enabled, vector 0 does not handle
8618 * tx interrupts. Don't allocate any resources for it.
8620 if (tg3_flag(tp
, ENABLE_TSS
))
8623 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8624 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8625 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8626 if (!tnapi
->tx_buffers
)
8629 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8631 &tnapi
->tx_desc_mapping
,
8633 if (!tnapi
->tx_ring
)
8640 tg3_mem_tx_release(tp
);
8644 static void tg3_mem_rx_release(struct tg3
*tp
)
8648 for (i
= 0; i
< tp
->irq_max
; i
++) {
8649 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8651 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8656 dma_free_coherent(&tp
->pdev
->dev
,
8657 TG3_RX_RCB_RING_BYTES(tp
),
8659 tnapi
->rx_rcb_mapping
);
8660 tnapi
->rx_rcb
= NULL
;
8664 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8666 unsigned int i
, limit
;
8668 limit
= tp
->rxq_cnt
;
8670 /* If RSS is enabled, we need a (dummy) producer ring
8671 * set on vector zero. This is the true hw prodring.
8673 if (tg3_flag(tp
, ENABLE_RSS
))
8676 for (i
= 0; i
< limit
; i
++) {
8677 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8679 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8682 /* If multivector RSS is enabled, vector 0
8683 * does not handle rx or tx interrupts.
8684 * Don't allocate any resources for it.
8686 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8689 tnapi
->rx_rcb
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8690 TG3_RX_RCB_RING_BYTES(tp
),
8691 &tnapi
->rx_rcb_mapping
,
8700 tg3_mem_rx_release(tp
);
8705 * Must not be invoked with interrupt sources disabled and
8706 * the hardware shutdown down.
8708 static void tg3_free_consistent(struct tg3
*tp
)
8712 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8713 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8715 if (tnapi
->hw_status
) {
8716 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8718 tnapi
->status_mapping
);
8719 tnapi
->hw_status
= NULL
;
8723 tg3_mem_rx_release(tp
);
8724 tg3_mem_tx_release(tp
);
8726 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8727 tg3_full_lock(tp
, 0);
8729 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8730 tp
->hw_stats
, tp
->stats_mapping
);
8731 tp
->hw_stats
= NULL
;
8733 tg3_full_unlock(tp
);
8737 * Must not be invoked with interrupt sources disabled and
8738 * the hardware shutdown down. Can sleep.
8740 static int tg3_alloc_consistent(struct tg3
*tp
)
8744 tp
->hw_stats
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8745 sizeof(struct tg3_hw_stats
),
8746 &tp
->stats_mapping
, GFP_KERNEL
);
8750 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8751 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8752 struct tg3_hw_status
*sblk
;
8754 tnapi
->hw_status
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8756 &tnapi
->status_mapping
,
8758 if (!tnapi
->hw_status
)
8761 sblk
= tnapi
->hw_status
;
8763 if (tg3_flag(tp
, ENABLE_RSS
)) {
8764 u16
*prodptr
= NULL
;
8767 * When RSS is enabled, the status block format changes
8768 * slightly. The "rx_jumbo_consumer", "reserved",
8769 * and "rx_mini_consumer" members get mapped to the
8770 * other three rx return ring producer indexes.
8774 prodptr
= &sblk
->idx
[0].rx_producer
;
8777 prodptr
= &sblk
->rx_jumbo_consumer
;
8780 prodptr
= &sblk
->reserved
;
8783 prodptr
= &sblk
->rx_mini_consumer
;
8786 tnapi
->rx_rcb_prod_idx
= prodptr
;
8788 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8792 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8798 tg3_free_consistent(tp
);
8802 #define MAX_WAIT_CNT 1000
8804 /* To stop a block, clear the enable bit and poll till it
8805 * clears. tp->lock is held.
8807 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8812 if (tg3_flag(tp
, 5705_PLUS
)) {
8819 /* We can't enable/disable these bits of the
8820 * 5705/5750, just say success.
8833 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8834 if (pci_channel_offline(tp
->pdev
)) {
8835 dev_err(&tp
->pdev
->dev
,
8836 "tg3_stop_block device offline, "
8837 "ofs=%lx enable_bit=%x\n",
8844 if ((val
& enable_bit
) == 0)
8848 if (i
== MAX_WAIT_CNT
&& !silent
) {
8849 dev_err(&tp
->pdev
->dev
,
8850 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8858 /* tp->lock is held. */
8859 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8863 tg3_disable_ints(tp
);
8865 if (pci_channel_offline(tp
->pdev
)) {
8866 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8867 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8872 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8873 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8876 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8877 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8878 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8879 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8880 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8881 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8883 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8884 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8885 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8886 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8887 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8888 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8889 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8891 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8892 tw32_f(MAC_MODE
, tp
->mac_mode
);
8895 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8896 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8898 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8900 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8903 if (i
>= MAX_WAIT_CNT
) {
8904 dev_err(&tp
->pdev
->dev
,
8905 "%s timed out, TX_MODE_ENABLE will not clear "
8906 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8910 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8911 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8912 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8914 tw32(FTQ_RESET
, 0xffffffff);
8915 tw32(FTQ_RESET
, 0x00000000);
8917 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8918 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8921 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8922 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8923 if (tnapi
->hw_status
)
8924 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8930 /* Save PCI command register before chip reset */
8931 static void tg3_save_pci_state(struct tg3
*tp
)
8933 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8936 /* Restore PCI state after chip reset */
8937 static void tg3_restore_pci_state(struct tg3
*tp
)
8941 /* Re-enable indirect register accesses. */
8942 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8943 tp
->misc_host_ctrl
);
8945 /* Set MAX PCI retry to zero. */
8946 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8947 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8948 tg3_flag(tp
, PCIX_MODE
))
8949 val
|= PCISTATE_RETRY_SAME_DMA
;
8950 /* Allow reads and writes to the APE register and memory space. */
8951 if (tg3_flag(tp
, ENABLE_APE
))
8952 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8953 PCISTATE_ALLOW_APE_SHMEM_WR
|
8954 PCISTATE_ALLOW_APE_PSPACE_WR
;
8955 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8957 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8959 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8960 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8961 tp
->pci_cacheline_sz
);
8962 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8966 /* Make sure PCI-X relaxed ordering bit is clear. */
8967 if (tg3_flag(tp
, PCIX_MODE
)) {
8970 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8972 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8973 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8977 if (tg3_flag(tp
, 5780_CLASS
)) {
8979 /* Chip reset on 5780 will reset MSI enable bit,
8980 * so need to restore it.
8982 if (tg3_flag(tp
, USING_MSI
)) {
8985 pci_read_config_word(tp
->pdev
,
8986 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8988 pci_write_config_word(tp
->pdev
,
8989 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8990 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8991 val
= tr32(MSGINT_MODE
);
8992 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8997 static void tg3_override_clk(struct tg3
*tp
)
9001 switch (tg3_asic_rev(tp
)) {
9003 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9004 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
9005 TG3_CPMU_MAC_ORIDE_ENABLE
);
9010 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9018 static void tg3_restore_clk(struct tg3
*tp
)
9022 switch (tg3_asic_rev(tp
)) {
9024 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
9025 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
9026 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
9031 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9032 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9040 /* tp->lock is held. */
9041 static int tg3_chip_reset(struct tg3
*tp
)
9042 __releases(tp
->lock
)
9043 __acquires(tp
->lock
)
9046 void (*write_op
)(struct tg3
*, u32
, u32
);
9049 if (!pci_device_is_present(tp
->pdev
))
9054 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9056 /* No matching tg3_nvram_unlock() after this because
9057 * chip reset below will undo the nvram lock.
9059 tp
->nvram_lock_cnt
= 0;
9061 /* GRC_MISC_CFG core clock reset will clear the memory
9062 * enable bit in PCI register 4 and the MSI enable bit
9063 * on some chips, so we save relevant registers here.
9065 tg3_save_pci_state(tp
);
9067 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9068 tg3_flag(tp
, 5755_PLUS
))
9069 tw32(GRC_FASTBOOT_PC
, 0);
9072 * We must avoid the readl() that normally takes place.
9073 * It locks machines, causes machine checks, and other
9074 * fun things. So, temporarily disable the 5701
9075 * hardware workaround, while we do the reset.
9077 write_op
= tp
->write32
;
9078 if (write_op
== tg3_write_flush_reg32
)
9079 tp
->write32
= tg3_write32
;
9081 /* Prevent the irq handler from reading or writing PCI registers
9082 * during chip reset when the memory enable bit in the PCI command
9083 * register may be cleared. The chip does not generate interrupt
9084 * at this time, but the irq handler may still be called due to irq
9085 * sharing or irqpoll.
9087 tg3_flag_set(tp
, CHIP_RESETTING
);
9088 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9089 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9090 if (tnapi
->hw_status
) {
9091 tnapi
->hw_status
->status
= 0;
9092 tnapi
->hw_status
->status_tag
= 0;
9094 tnapi
->last_tag
= 0;
9095 tnapi
->last_irq_tag
= 0;
9099 tg3_full_unlock(tp
);
9101 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9102 synchronize_irq(tp
->napi
[i
].irq_vec
);
9104 tg3_full_lock(tp
, 0);
9106 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9107 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9108 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9112 val
= GRC_MISC_CFG_CORECLK_RESET
;
9114 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9115 /* Force PCIe 1.0a mode */
9116 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9117 !tg3_flag(tp
, 57765_PLUS
) &&
9118 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9119 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9120 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9122 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9123 tw32(GRC_MISC_CFG
, (1 << 29));
9128 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9129 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9130 tw32(GRC_VCPU_EXT_CTRL
,
9131 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9134 /* Set the clock to the highest frequency to avoid timeouts. With link
9135 * aware mode, the clock speed could be slow and bootcode does not
9136 * complete within the expected time. Override the clock to allow the
9137 * bootcode to finish sooner and then restore it.
9139 tg3_override_clk(tp
);
9141 /* Manage gphy power for all CPMU absent PCIe devices. */
9142 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9143 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9145 tw32(GRC_MISC_CFG
, val
);
9147 /* restore 5701 hardware bug workaround write method */
9148 tp
->write32
= write_op
;
9150 /* Unfortunately, we have to delay before the PCI read back.
9151 * Some 575X chips even will not respond to a PCI cfg access
9152 * when the reset command is given to the chip.
9154 * How do these hardware designers expect things to work
9155 * properly if the PCI write is posted for a long period
9156 * of time? It is always necessary to have some method by
9157 * which a register read back can occur to push the write
9158 * out which does the reset.
9160 * For most tg3 variants the trick below was working.
9165 /* Flush PCI posted writes. The normal MMIO registers
9166 * are inaccessible at this time so this is the only
9167 * way to make this reliably (actually, this is no longer
9168 * the case, see above). I tried to use indirect
9169 * register read/write but this upset some 5701 variants.
9171 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9175 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9178 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9182 /* Wait for link training to complete. */
9183 for (j
= 0; j
< 5000; j
++)
9186 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9187 pci_write_config_dword(tp
->pdev
, 0xc4,
9188 cfg_val
| (1 << 15));
9191 /* Clear the "no snoop" and "relaxed ordering" bits. */
9192 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9194 * Older PCIe devices only support the 128 byte
9195 * MPS setting. Enforce the restriction.
9197 if (!tg3_flag(tp
, CPMU_PRESENT
))
9198 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9199 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9201 /* Clear error status */
9202 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9203 PCI_EXP_DEVSTA_CED
|
9204 PCI_EXP_DEVSTA_NFED
|
9205 PCI_EXP_DEVSTA_FED
|
9206 PCI_EXP_DEVSTA_URD
);
9209 tg3_restore_pci_state(tp
);
9211 tg3_flag_clear(tp
, CHIP_RESETTING
);
9212 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9215 if (tg3_flag(tp
, 5780_CLASS
))
9216 val
= tr32(MEMARB_MODE
);
9217 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9219 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9221 tw32(0x5000, 0x400);
9224 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9226 * BCM4785: In order to avoid repercussions from using
9227 * potentially defective internal ROM, stop the Rx RISC CPU,
9228 * which is not required.
9231 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9234 err
= tg3_poll_fw(tp
);
9238 tw32(GRC_MODE
, tp
->grc_mode
);
9240 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9243 tw32(0xc4, val
| (1 << 15));
9246 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9247 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9248 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9249 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9250 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9251 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9254 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9255 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9257 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9258 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9263 tw32_f(MAC_MODE
, val
);
9266 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9270 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9271 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9272 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9273 !tg3_flag(tp
, 57765_PLUS
)) {
9276 tw32(0x7c00, val
| (1 << 25));
9279 tg3_restore_clk(tp
);
9281 /* Reprobe ASF enable state. */
9282 tg3_flag_clear(tp
, ENABLE_ASF
);
9283 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9284 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9286 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9287 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9288 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9291 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9292 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9293 tg3_flag_set(tp
, ENABLE_ASF
);
9294 tp
->last_event_jiffies
= jiffies
;
9295 if (tg3_flag(tp
, 5750_PLUS
))
9296 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9298 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9299 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9300 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9301 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9302 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9309 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9310 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9311 static void __tg3_set_rx_mode(struct net_device
*);
9313 /* tp->lock is held. */
9314 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9320 tg3_write_sig_pre_reset(tp
, kind
);
9322 tg3_abort_hw(tp
, silent
);
9323 err
= tg3_chip_reset(tp
);
9325 __tg3_set_mac_addr(tp
, false);
9327 tg3_write_sig_legacy(tp
, kind
);
9328 tg3_write_sig_post_reset(tp
, kind
);
9331 /* Save the stats across chip resets... */
9332 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9333 tg3_get_estats(tp
, &tp
->estats_prev
);
9335 /* And make sure the next sample is new data */
9336 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9342 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9344 struct tg3
*tp
= netdev_priv(dev
);
9345 struct sockaddr
*addr
= p
;
9347 bool skip_mac_1
= false;
9349 if (!is_valid_ether_addr(addr
->sa_data
))
9350 return -EADDRNOTAVAIL
;
9352 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9354 if (!netif_running(dev
))
9357 if (tg3_flag(tp
, ENABLE_ASF
)) {
9358 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9360 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9361 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9362 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9363 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9365 /* Skip MAC addr 1 if ASF is using it. */
9366 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9367 !(addr1_high
== 0 && addr1_low
== 0))
9370 spin_lock_bh(&tp
->lock
);
9371 __tg3_set_mac_addr(tp
, skip_mac_1
);
9372 __tg3_set_rx_mode(dev
);
9373 spin_unlock_bh(&tp
->lock
);
9378 /* tp->lock is held. */
9379 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9380 dma_addr_t mapping
, u32 maxlen_flags
,
9384 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9385 ((u64
) mapping
>> 32));
9387 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9388 ((u64
) mapping
& 0xffffffff));
9390 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9393 if (!tg3_flag(tp
, 5705_PLUS
))
9395 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9400 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9404 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9405 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9406 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9407 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9409 tw32(HOSTCC_TXCOL_TICKS
, 0);
9410 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9411 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9413 for (; i
< tp
->txq_cnt
; i
++) {
9416 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9417 tw32(reg
, ec
->tx_coalesce_usecs
);
9418 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9419 tw32(reg
, ec
->tx_max_coalesced_frames
);
9420 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9421 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9425 for (; i
< tp
->irq_max
- 1; i
++) {
9426 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9427 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9428 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9432 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9435 u32 limit
= tp
->rxq_cnt
;
9437 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9438 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9439 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9440 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9443 tw32(HOSTCC_RXCOL_TICKS
, 0);
9444 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9445 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9448 for (; i
< limit
; i
++) {
9451 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9452 tw32(reg
, ec
->rx_coalesce_usecs
);
9453 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9454 tw32(reg
, ec
->rx_max_coalesced_frames
);
9455 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9456 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9459 for (; i
< tp
->irq_max
- 1; i
++) {
9460 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9461 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9462 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9466 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9468 tg3_coal_tx_init(tp
, ec
);
9469 tg3_coal_rx_init(tp
, ec
);
9471 if (!tg3_flag(tp
, 5705_PLUS
)) {
9472 u32 val
= ec
->stats_block_coalesce_usecs
;
9474 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9475 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9480 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9484 /* tp->lock is held. */
9485 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9489 /* Disable all transmit rings but the first. */
9490 if (!tg3_flag(tp
, 5705_PLUS
))
9491 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9492 else if (tg3_flag(tp
, 5717_PLUS
))
9493 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9494 else if (tg3_flag(tp
, 57765_CLASS
) ||
9495 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9496 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9498 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9500 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9501 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9502 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9503 BDINFO_FLAGS_DISABLED
);
9506 /* tp->lock is held. */
9507 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9510 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9512 if (tg3_flag(tp
, ENABLE_TSS
))
9515 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9516 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9518 if (!tnapi
->tx_ring
)
9521 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9522 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9523 NIC_SRAM_TX_BUFFER_DESC
);
9527 /* tp->lock is held. */
9528 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9532 /* Disable all receive return rings but the first. */
9533 if (tg3_flag(tp
, 5717_PLUS
))
9534 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9535 else if (!tg3_flag(tp
, 5705_PLUS
))
9536 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9537 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9538 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9539 tg3_flag(tp
, 57765_CLASS
))
9540 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9542 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9544 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9545 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9546 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9547 BDINFO_FLAGS_DISABLED
);
9550 /* tp->lock is held. */
9551 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9554 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9556 if (tg3_flag(tp
, ENABLE_RSS
))
9559 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9560 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9565 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9566 (tp
->rx_ret_ring_mask
+ 1) <<
9567 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9571 /* tp->lock is held. */
9572 static void tg3_rings_reset(struct tg3
*tp
)
9576 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9578 tg3_tx_rcbs_disable(tp
);
9580 tg3_rx_ret_rcbs_disable(tp
);
9582 /* Disable interrupts */
9583 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9584 tp
->napi
[0].chk_msi_cnt
= 0;
9585 tp
->napi
[0].last_rx_cons
= 0;
9586 tp
->napi
[0].last_tx_cons
= 0;
9588 /* Zero mailbox registers. */
9589 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9590 for (i
= 1; i
< tp
->irq_max
; i
++) {
9591 tp
->napi
[i
].tx_prod
= 0;
9592 tp
->napi
[i
].tx_cons
= 0;
9593 if (tg3_flag(tp
, ENABLE_TSS
))
9594 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9595 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9596 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9597 tp
->napi
[i
].chk_msi_cnt
= 0;
9598 tp
->napi
[i
].last_rx_cons
= 0;
9599 tp
->napi
[i
].last_tx_cons
= 0;
9601 if (!tg3_flag(tp
, ENABLE_TSS
))
9602 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9604 tp
->napi
[0].tx_prod
= 0;
9605 tp
->napi
[0].tx_cons
= 0;
9606 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9607 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9610 /* Make sure the NIC-based send BD rings are disabled. */
9611 if (!tg3_flag(tp
, 5705_PLUS
)) {
9612 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9613 for (i
= 0; i
< 16; i
++)
9614 tw32_tx_mbox(mbox
+ i
* 8, 0);
9617 /* Clear status block in ram. */
9618 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9620 /* Set status block DMA address */
9621 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9622 ((u64
) tnapi
->status_mapping
>> 32));
9623 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9624 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9626 stblk
= HOSTCC_STATBLCK_RING1
;
9628 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9629 u64 mapping
= (u64
)tnapi
->status_mapping
;
9630 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9631 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9634 /* Clear status block in ram. */
9635 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9638 tg3_tx_rcbs_init(tp
);
9639 tg3_rx_ret_rcbs_init(tp
);
9642 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9644 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9646 if (!tg3_flag(tp
, 5750_PLUS
) ||
9647 tg3_flag(tp
, 5780_CLASS
) ||
9648 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9649 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9650 tg3_flag(tp
, 57765_PLUS
))
9651 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9652 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9653 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9654 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9656 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9658 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9659 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9661 val
= min(nic_rep_thresh
, host_rep_thresh
);
9662 tw32(RCVBDI_STD_THRESH
, val
);
9664 if (tg3_flag(tp
, 57765_PLUS
))
9665 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9667 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9670 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9672 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9674 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9675 tw32(RCVBDI_JUMBO_THRESH
, val
);
9677 if (tg3_flag(tp
, 57765_PLUS
))
9678 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9681 static inline u32
calc_crc(unsigned char *buf
, int len
)
9689 for (j
= 0; j
< len
; j
++) {
9692 for (k
= 0; k
< 8; k
++) {
9705 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9707 /* accept or reject all multicast frames */
9708 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9709 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9710 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9711 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9714 static void __tg3_set_rx_mode(struct net_device
*dev
)
9716 struct tg3
*tp
= netdev_priv(dev
);
9719 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9720 RX_MODE_KEEP_VLAN_TAG
);
9722 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9723 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9726 if (!tg3_flag(tp
, ENABLE_ASF
))
9727 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9730 if (dev
->flags
& IFF_PROMISC
) {
9731 /* Promiscuous mode. */
9732 rx_mode
|= RX_MODE_PROMISC
;
9733 } else if (dev
->flags
& IFF_ALLMULTI
) {
9734 /* Accept all multicast. */
9735 tg3_set_multi(tp
, 1);
9736 } else if (netdev_mc_empty(dev
)) {
9737 /* Reject all multicast. */
9738 tg3_set_multi(tp
, 0);
9740 /* Accept one or more multicast(s). */
9741 struct netdev_hw_addr
*ha
;
9742 u32 mc_filter
[4] = { 0, };
9747 netdev_for_each_mc_addr(ha
, dev
) {
9748 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9750 regidx
= (bit
& 0x60) >> 5;
9752 mc_filter
[regidx
] |= (1 << bit
);
9755 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9756 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9757 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9758 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9761 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9762 rx_mode
|= RX_MODE_PROMISC
;
9763 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9764 /* Add all entries into to the mac addr filter list */
9766 struct netdev_hw_addr
*ha
;
9768 netdev_for_each_uc_addr(ha
, dev
) {
9769 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9770 i
+ TG3_UCAST_ADDR_IDX(tp
));
9775 if (rx_mode
!= tp
->rx_mode
) {
9776 tp
->rx_mode
= rx_mode
;
9777 tw32_f(MAC_RX_MODE
, rx_mode
);
9782 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9786 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9787 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9790 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9794 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9797 if (tp
->rxq_cnt
== 1) {
9798 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9802 /* Validate table against current IRQ count */
9803 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9804 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9808 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9809 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9812 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9815 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9817 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9818 u32 val
= tp
->rss_ind_tbl
[i
];
9820 for (; i
% 8; i
++) {
9822 val
|= tp
->rss_ind_tbl
[i
];
9829 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9831 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9832 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9834 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9837 /* tp->lock is held. */
9838 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9840 u32 val
, rdmac_mode
;
9842 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9844 tg3_disable_ints(tp
);
9848 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9850 if (tg3_flag(tp
, INIT_COMPLETE
))
9851 tg3_abort_hw(tp
, 1);
9853 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9854 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9855 tg3_phy_pull_config(tp
);
9856 tg3_eee_pull_config(tp
, NULL
);
9857 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9860 /* Enable MAC control of LPI */
9861 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9867 err
= tg3_chip_reset(tp
);
9871 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9873 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9874 val
= tr32(TG3_CPMU_CTRL
);
9875 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9876 tw32(TG3_CPMU_CTRL
, val
);
9878 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9879 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9880 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9881 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9883 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9884 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9885 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9886 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9888 val
= tr32(TG3_CPMU_HST_ACC
);
9889 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9890 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9891 tw32(TG3_CPMU_HST_ACC
, val
);
9894 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9895 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9896 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9897 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9898 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9900 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9901 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9903 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9905 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9906 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9909 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9910 u32 grc_mode
= tr32(GRC_MODE
);
9912 /* Access the lower 1K of PL PCIE block registers. */
9913 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9914 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9916 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9917 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9918 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9920 tw32(GRC_MODE
, grc_mode
);
9923 if (tg3_flag(tp
, 57765_CLASS
)) {
9924 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9925 u32 grc_mode
= tr32(GRC_MODE
);
9927 /* Access the lower 1K of PL PCIE block registers. */
9928 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9929 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9931 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9932 TG3_PCIE_PL_LO_PHYCTL5
);
9933 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9934 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9936 tw32(GRC_MODE
, grc_mode
);
9939 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9942 /* Fix transmit hangs */
9943 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9944 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9945 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9947 grc_mode
= tr32(GRC_MODE
);
9949 /* Access the lower 1K of DL PCIE block registers. */
9950 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9951 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9953 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9954 TG3_PCIE_DL_LO_FTSMAX
);
9955 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9956 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9957 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9959 tw32(GRC_MODE
, grc_mode
);
9962 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9963 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9964 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9965 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9968 /* This works around an issue with Athlon chipsets on
9969 * B3 tigon3 silicon. This bit has no effect on any
9970 * other revision. But do not set this on PCI Express
9971 * chips and don't even touch the clocks if the CPMU is present.
9973 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9974 if (!tg3_flag(tp
, PCI_EXPRESS
))
9975 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9976 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9979 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9980 tg3_flag(tp
, PCIX_MODE
)) {
9981 val
= tr32(TG3PCI_PCISTATE
);
9982 val
|= PCISTATE_RETRY_SAME_DMA
;
9983 tw32(TG3PCI_PCISTATE
, val
);
9986 if (tg3_flag(tp
, ENABLE_APE
)) {
9987 /* Allow reads and writes to the
9988 * APE register and memory space.
9990 val
= tr32(TG3PCI_PCISTATE
);
9991 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9992 PCISTATE_ALLOW_APE_SHMEM_WR
|
9993 PCISTATE_ALLOW_APE_PSPACE_WR
;
9994 tw32(TG3PCI_PCISTATE
, val
);
9997 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9998 /* Enable some hw fixes. */
9999 val
= tr32(TG3PCI_MSI_DATA
);
10000 val
|= (1 << 26) | (1 << 28) | (1 << 29);
10001 tw32(TG3PCI_MSI_DATA
, val
);
10004 /* Descriptor ring init may make accesses to the
10005 * NIC SRAM area to setup the TX descriptors, so we
10006 * can only do this after the hardware has been
10007 * successfully reset.
10009 err
= tg3_init_rings(tp
);
10013 if (tg3_flag(tp
, 57765_PLUS
)) {
10014 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
10015 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
10016 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
10017 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
10018 if (!tg3_flag(tp
, 57765_CLASS
) &&
10019 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10020 tg3_asic_rev(tp
) != ASIC_REV_5762
)
10021 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
10022 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
10023 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
10024 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
10025 /* This value is determined during the probe time DMA
10026 * engine test, tg3_test_dma.
10028 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10031 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
10032 GRC_MODE_4X_NIC_SEND_RINGS
|
10033 GRC_MODE_NO_TX_PHDR_CSUM
|
10034 GRC_MODE_NO_RX_PHDR_CSUM
);
10035 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
10037 /* Pseudo-header checksum is done by hardware logic and not
10038 * the offload processers, so make the chip do the pseudo-
10039 * header checksums on receive. For transmit it is more
10040 * convenient to do the pseudo-header checksum in software
10041 * as Linux does that on transmit for us in all cases.
10043 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
10045 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
10047 tw32(TG3_RX_PTP_CTL
,
10048 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
10050 if (tg3_flag(tp
, PTP_CAPABLE
))
10051 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
10053 tw32(GRC_MODE
, tp
->grc_mode
| val
);
10055 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10056 val
= tr32(GRC_MISC_CFG
);
10058 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10059 tw32(GRC_MISC_CFG
, val
);
10061 /* Initialize MBUF/DESC pool. */
10062 if (tg3_flag(tp
, 5750_PLUS
)) {
10064 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10065 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10066 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10067 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10069 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10070 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10071 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10072 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10075 fw_len
= tp
->fw_len
;
10076 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10077 tw32(BUFMGR_MB_POOL_ADDR
,
10078 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10079 tw32(BUFMGR_MB_POOL_SIZE
,
10080 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10083 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10084 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10085 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10086 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10087 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10088 tw32(BUFMGR_MB_HIGH_WATER
,
10089 tp
->bufmgr_config
.mbuf_high_water
);
10091 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10092 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10093 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10094 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10095 tw32(BUFMGR_MB_HIGH_WATER
,
10096 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10098 tw32(BUFMGR_DMA_LOW_WATER
,
10099 tp
->bufmgr_config
.dma_low_water
);
10100 tw32(BUFMGR_DMA_HIGH_WATER
,
10101 tp
->bufmgr_config
.dma_high_water
);
10103 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10104 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10105 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10106 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10107 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10108 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10109 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10110 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10111 tw32(BUFMGR_MODE
, val
);
10112 for (i
= 0; i
< 2000; i
++) {
10113 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10118 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10122 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10123 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10125 tg3_setup_rxbd_thresholds(tp
);
10127 /* Initialize TG3_BDINFO's at:
10128 * RCVDBDI_STD_BD: standard eth size rx ring
10129 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10130 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10133 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10134 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10135 * ring attribute flags
10136 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10138 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10139 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10141 * The size of each ring is fixed in the firmware, but the location is
10144 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10145 ((u64
) tpr
->rx_std_mapping
>> 32));
10146 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10147 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10148 if (!tg3_flag(tp
, 5717_PLUS
))
10149 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10150 NIC_SRAM_RX_BUFFER_DESC
);
10152 /* Disable the mini ring */
10153 if (!tg3_flag(tp
, 5705_PLUS
))
10154 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10155 BDINFO_FLAGS_DISABLED
);
10157 /* Program the jumbo buffer descriptor ring control
10158 * blocks on those devices that have them.
10160 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10161 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10163 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10164 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10165 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10166 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10167 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10168 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10169 BDINFO_FLAGS_MAXLEN_SHIFT
;
10170 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10171 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10172 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10173 tg3_flag(tp
, 57765_CLASS
) ||
10174 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10175 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10176 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10178 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10179 BDINFO_FLAGS_DISABLED
);
10182 if (tg3_flag(tp
, 57765_PLUS
)) {
10183 val
= TG3_RX_STD_RING_SIZE(tp
);
10184 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10185 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10187 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10189 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10191 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10193 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10194 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10196 tpr
->rx_jmb_prod_idx
=
10197 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10198 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10200 tg3_rings_reset(tp
);
10202 /* Initialize MAC address and backoff seed. */
10203 __tg3_set_mac_addr(tp
, false);
10205 /* MTU + ethernet header + FCS + optional VLAN tag */
10206 tw32(MAC_RX_MTU_SIZE
,
10207 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10209 /* The slot time is changed by tg3_setup_phy if we
10210 * run at gigabit with half duplex.
10212 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10213 (6 << TX_LENGTHS_IPG_SHIFT
) |
10214 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10216 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10217 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10218 val
|= tr32(MAC_TX_LENGTHS
) &
10219 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10220 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10222 tw32(MAC_TX_LENGTHS
, val
);
10224 /* Receive rules. */
10225 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10226 tw32(RCVLPC_CONFIG
, 0x0181);
10228 /* Calculate RDMAC_MODE setting early, we need it to determine
10229 * the RCVLPC_STATE_ENABLE mask.
10231 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10232 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10233 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10234 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10235 RDMAC_MODE_LNGREAD_ENAB
);
10237 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10238 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10240 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10241 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10242 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10243 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10244 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10245 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10247 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10248 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10249 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10250 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10251 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10252 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10253 !tg3_flag(tp
, IS_5788
)) {
10254 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10258 if (tg3_flag(tp
, PCI_EXPRESS
))
10259 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10261 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10263 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10264 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10265 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10269 if (tg3_flag(tp
, HW_TSO_1
) ||
10270 tg3_flag(tp
, HW_TSO_2
) ||
10271 tg3_flag(tp
, HW_TSO_3
))
10272 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10274 if (tg3_flag(tp
, 57765_PLUS
) ||
10275 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10276 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10277 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10279 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10280 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10281 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10283 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10284 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10285 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10286 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10287 tg3_flag(tp
, 57765_PLUS
)) {
10290 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10291 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10293 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10295 val
= tr32(tgtreg
);
10296 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10297 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10298 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10299 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10300 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10301 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10302 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10303 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10305 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10308 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10309 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10310 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10313 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10314 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10316 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10318 val
= tr32(tgtreg
);
10320 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10321 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10324 /* Receive/send statistics. */
10325 if (tg3_flag(tp
, 5750_PLUS
)) {
10326 val
= tr32(RCVLPC_STATS_ENABLE
);
10327 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10328 tw32(RCVLPC_STATS_ENABLE
, val
);
10329 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10330 tg3_flag(tp
, TSO_CAPABLE
)) {
10331 val
= tr32(RCVLPC_STATS_ENABLE
);
10332 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10333 tw32(RCVLPC_STATS_ENABLE
, val
);
10335 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10337 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10338 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10339 tw32(SNDDATAI_STATSCTRL
,
10340 (SNDDATAI_SCTRL_ENABLE
|
10341 SNDDATAI_SCTRL_FASTUPD
));
10343 /* Setup host coalescing engine. */
10344 tw32(HOSTCC_MODE
, 0);
10345 for (i
= 0; i
< 2000; i
++) {
10346 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10351 __tg3_set_coalesce(tp
, &tp
->coal
);
10353 if (!tg3_flag(tp
, 5705_PLUS
)) {
10354 /* Status/statistics block address. See tg3_timer,
10355 * the tg3_periodic_fetch_stats call there, and
10356 * tg3_get_stats to see how this works for 5705/5750 chips.
10358 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10359 ((u64
) tp
->stats_mapping
>> 32));
10360 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10361 ((u64
) tp
->stats_mapping
& 0xffffffff));
10362 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10364 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10366 /* Clear statistics and status block memory areas */
10367 for (i
= NIC_SRAM_STATS_BLK
;
10368 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10369 i
+= sizeof(u32
)) {
10370 tg3_write_mem(tp
, i
, 0);
10375 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10377 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10378 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10379 if (!tg3_flag(tp
, 5705_PLUS
))
10380 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10382 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10383 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10384 /* reset to prevent losing 1st rx packet intermittently */
10385 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10389 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10390 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10391 MAC_MODE_FHDE_ENABLE
;
10392 if (tg3_flag(tp
, ENABLE_APE
))
10393 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10394 if (!tg3_flag(tp
, 5705_PLUS
) &&
10395 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10396 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10397 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10398 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10401 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10402 * If TG3_FLAG_IS_NIC is zero, we should read the
10403 * register to preserve the GPIO settings for LOMs. The GPIOs,
10404 * whether used as inputs or outputs, are set by boot code after
10407 if (!tg3_flag(tp
, IS_NIC
)) {
10410 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10411 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10412 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10414 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10415 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10416 GRC_LCLCTRL_GPIO_OUTPUT3
;
10418 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10419 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10421 tp
->grc_local_ctrl
&= ~gpio_mask
;
10422 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10424 /* GPIO1 must be driven high for eeprom write protect */
10425 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10426 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10427 GRC_LCLCTRL_GPIO_OUTPUT1
);
10429 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10432 if (tg3_flag(tp
, USING_MSIX
)) {
10433 val
= tr32(MSGINT_MODE
);
10434 val
|= MSGINT_MODE_ENABLE
;
10435 if (tp
->irq_cnt
> 1)
10436 val
|= MSGINT_MODE_MULTIVEC_EN
;
10437 if (!tg3_flag(tp
, 1SHOT_MSI
))
10438 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10439 tw32(MSGINT_MODE
, val
);
10442 if (!tg3_flag(tp
, 5705_PLUS
)) {
10443 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10447 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10448 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10449 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10450 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10451 WDMAC_MODE_LNGREAD_ENAB
);
10453 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10454 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10455 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10456 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10457 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10459 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10460 !tg3_flag(tp
, IS_5788
)) {
10461 val
|= WDMAC_MODE_RX_ACCEL
;
10465 /* Enable host coalescing bug fix */
10466 if (tg3_flag(tp
, 5755_PLUS
))
10467 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10469 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10470 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10472 tw32_f(WDMAC_MODE
, val
);
10475 if (tg3_flag(tp
, PCIX_MODE
)) {
10478 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10480 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10481 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10482 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10483 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10484 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10485 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10487 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10491 tw32_f(RDMAC_MODE
, rdmac_mode
);
10494 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10495 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10496 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10497 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10500 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10501 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10502 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10503 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10504 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10508 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10509 if (!tg3_flag(tp
, 5705_PLUS
))
10510 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10512 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10513 tw32(SNDDATAC_MODE
,
10514 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10516 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10518 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10519 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10520 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10521 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10522 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10523 tw32(RCVDBDI_MODE
, val
);
10524 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10525 if (tg3_flag(tp
, HW_TSO_1
) ||
10526 tg3_flag(tp
, HW_TSO_2
) ||
10527 tg3_flag(tp
, HW_TSO_3
))
10528 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10529 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10530 if (tg3_flag(tp
, ENABLE_TSS
))
10531 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10532 tw32(SNDBDI_MODE
, val
);
10533 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10535 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10536 err
= tg3_load_5701_a0_firmware_fix(tp
);
10541 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10542 /* Ignore any errors for the firmware download. If download
10543 * fails, the device will operate with EEE disabled
10545 tg3_load_57766_firmware(tp
);
10548 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10549 err
= tg3_load_tso_firmware(tp
);
10554 tp
->tx_mode
= TX_MODE_ENABLE
;
10556 if (tg3_flag(tp
, 5755_PLUS
) ||
10557 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10558 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10560 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10561 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10562 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10563 tp
->tx_mode
&= ~val
;
10564 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10567 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10570 if (tg3_flag(tp
, ENABLE_RSS
)) {
10573 tg3_rss_write_indir_tbl(tp
);
10575 netdev_rss_key_fill(rss_key
, 10 * sizeof(u32
));
10577 for (i
= 0; i
< 10 ; i
++)
10578 tw32(MAC_RSS_HASH_KEY_0
+ i
*4, rss_key
[i
]);
10581 tp
->rx_mode
= RX_MODE_ENABLE
;
10582 if (tg3_flag(tp
, 5755_PLUS
))
10583 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10585 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10586 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10588 if (tg3_flag(tp
, ENABLE_RSS
))
10589 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10590 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10591 RX_MODE_RSS_IPV6_HASH_EN
|
10592 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10593 RX_MODE_RSS_IPV4_HASH_EN
|
10594 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10596 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10599 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10601 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10602 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10603 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10606 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10609 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10610 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10611 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10612 /* Set drive transmission level to 1.2V */
10613 /* only if the signal pre-emphasis bit is not set */
10614 val
= tr32(MAC_SERDES_CFG
);
10617 tw32(MAC_SERDES_CFG
, val
);
10619 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10620 tw32(MAC_SERDES_CFG
, 0x616000);
10623 /* Prevent chip from dropping frames when flow control
10626 if (tg3_flag(tp
, 57765_CLASS
))
10630 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10632 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10633 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10634 /* Use hardware link auto-negotiation */
10635 tg3_flag_set(tp
, HW_AUTONEG
);
10638 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10639 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10642 tmp
= tr32(SERDES_RX_CTRL
);
10643 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10644 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10645 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10646 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10649 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10650 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10651 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10653 err
= tg3_setup_phy(tp
, false);
10657 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10658 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10661 /* Clear CRC stats. */
10662 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10663 tg3_writephy(tp
, MII_TG3_TEST1
,
10664 tmp
| MII_TG3_TEST1_CRC_EN
);
10665 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10670 __tg3_set_rx_mode(tp
->dev
);
10672 /* Initialize receive rules. */
10673 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10674 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10675 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10676 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10678 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10682 if (tg3_flag(tp
, ENABLE_ASF
))
10686 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10688 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10690 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10692 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10694 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10696 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10698 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10700 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10702 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10704 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10706 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10708 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10710 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10712 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10720 if (tg3_flag(tp
, ENABLE_APE
))
10721 /* Write our heartbeat update interval to APE. */
10722 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10723 APE_HOST_HEARTBEAT_INT_DISABLE
);
10725 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10730 /* Called at device open time to get the chip ready for
10731 * packet processing. Invoked with tp->lock held.
10733 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10735 /* Chip may have been just powered on. If so, the boot code may still
10736 * be running initialization. Wait for it to finish to avoid races in
10737 * accessing the hardware.
10739 tg3_enable_register_access(tp
);
10742 tg3_switch_clocks(tp
);
10744 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10746 return tg3_reset_hw(tp
, reset_phy
);
10749 #ifdef CONFIG_TIGON3_HWMON
10750 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10754 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10755 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10757 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10760 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10761 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10762 memset(ocir
, 0, TG3_OCIR_LEN
);
10766 /* sysfs attributes for hwmon */
10767 static ssize_t
tg3_show_temp(struct device
*dev
,
10768 struct device_attribute
*devattr
, char *buf
)
10770 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10771 struct tg3
*tp
= dev_get_drvdata(dev
);
10774 spin_lock_bh(&tp
->lock
);
10775 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10776 sizeof(temperature
));
10777 spin_unlock_bh(&tp
->lock
);
10778 return sprintf(buf
, "%u\n", temperature
* 1000);
10782 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10783 TG3_TEMP_SENSOR_OFFSET
);
10784 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10785 TG3_TEMP_CAUTION_OFFSET
);
10786 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10787 TG3_TEMP_MAX_OFFSET
);
10789 static struct attribute
*tg3_attrs
[] = {
10790 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10791 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10792 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10795 ATTRIBUTE_GROUPS(tg3
);
10797 static void tg3_hwmon_close(struct tg3
*tp
)
10799 if (tp
->hwmon_dev
) {
10800 hwmon_device_unregister(tp
->hwmon_dev
);
10801 tp
->hwmon_dev
= NULL
;
10805 static void tg3_hwmon_open(struct tg3
*tp
)
10809 struct pci_dev
*pdev
= tp
->pdev
;
10810 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10812 tg3_sd_scan_scratchpad(tp
, ocirs
);
10814 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10815 if (!ocirs
[i
].src_data_length
)
10818 size
+= ocirs
[i
].src_hdr_length
;
10819 size
+= ocirs
[i
].src_data_length
;
10825 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10827 if (IS_ERR(tp
->hwmon_dev
)) {
10828 tp
->hwmon_dev
= NULL
;
10829 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10833 static inline void tg3_hwmon_close(struct tg3
*tp
) { }
10834 static inline void tg3_hwmon_open(struct tg3
*tp
) { }
10835 #endif /* CONFIG_TIGON3_HWMON */
10838 #define TG3_STAT_ADD32(PSTAT, REG) \
10839 do { u32 __val = tr32(REG); \
10840 (PSTAT)->low += __val; \
10841 if ((PSTAT)->low < __val) \
10842 (PSTAT)->high += 1; \
10845 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10847 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10852 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10853 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10854 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10855 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10856 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10857 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10858 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10859 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10860 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10861 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10862 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10863 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10864 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10865 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10866 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10867 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10870 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10871 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10872 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10873 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10876 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10877 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10878 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10879 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10880 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10881 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10882 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10883 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10884 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10885 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10886 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10887 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10888 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10889 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10891 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10892 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10893 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10894 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10895 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10896 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10898 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10899 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10901 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10902 sp
->rx_discards
.low
+= val
;
10903 if (sp
->rx_discards
.low
< val
)
10904 sp
->rx_discards
.high
+= 1;
10906 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10908 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10911 static void tg3_chk_missed_msi(struct tg3
*tp
)
10915 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10916 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10918 if (tg3_has_work(tnapi
)) {
10919 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10920 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10921 if (tnapi
->chk_msi_cnt
< 1) {
10922 tnapi
->chk_msi_cnt
++;
10928 tnapi
->chk_msi_cnt
= 0;
10929 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10930 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10934 static void tg3_timer(unsigned long __opaque
)
10936 struct tg3
*tp
= (struct tg3
*) __opaque
;
10938 spin_lock(&tp
->lock
);
10940 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
)) {
10941 spin_unlock(&tp
->lock
);
10942 goto restart_timer
;
10945 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10946 tg3_flag(tp
, 57765_CLASS
))
10947 tg3_chk_missed_msi(tp
);
10949 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10950 /* BCM4785: Flush posted writes from GbE to host memory. */
10954 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10955 /* All of this garbage is because when using non-tagged
10956 * IRQ status the mailbox/status_block protocol the chip
10957 * uses with the cpu is race prone.
10959 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10960 tw32(GRC_LOCAL_CTRL
,
10961 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10963 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10964 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10967 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10968 spin_unlock(&tp
->lock
);
10969 tg3_reset_task_schedule(tp
);
10970 goto restart_timer
;
10974 /* This part only runs once per second. */
10975 if (!--tp
->timer_counter
) {
10976 if (tg3_flag(tp
, 5705_PLUS
))
10977 tg3_periodic_fetch_stats(tp
);
10979 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10980 tg3_phy_eee_enable(tp
);
10982 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10986 mac_stat
= tr32(MAC_STATUS
);
10989 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10990 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10992 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10996 tg3_setup_phy(tp
, false);
10997 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10998 u32 mac_stat
= tr32(MAC_STATUS
);
10999 int need_setup
= 0;
11002 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
11005 if (!tp
->link_up
&&
11006 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
11007 MAC_STATUS_SIGNAL_DET
))) {
11011 if (!tp
->serdes_counter
) {
11014 ~MAC_MODE_PORT_MODE_MASK
));
11016 tw32_f(MAC_MODE
, tp
->mac_mode
);
11019 tg3_setup_phy(tp
, false);
11021 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
11022 tg3_flag(tp
, 5780_CLASS
)) {
11023 tg3_serdes_parallel_detect(tp
);
11024 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
11025 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
11026 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
11027 TG3_CPMU_STATUS_LINK_MASK
);
11029 if (link_up
!= tp
->link_up
)
11030 tg3_setup_phy(tp
, false);
11033 tp
->timer_counter
= tp
->timer_multiplier
;
11036 /* Heartbeat is only sent once every 2 seconds.
11038 * The heartbeat is to tell the ASF firmware that the host
11039 * driver is still alive. In the event that the OS crashes,
11040 * ASF needs to reset the hardware to free up the FIFO space
11041 * that may be filled with rx packets destined for the host.
11042 * If the FIFO is full, ASF will no longer function properly.
11044 * Unintended resets have been reported on real time kernels
11045 * where the timer doesn't run on time. Netpoll will also have
11048 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11049 * to check the ring condition when the heartbeat is expiring
11050 * before doing the reset. This will prevent most unintended
11053 if (!--tp
->asf_counter
) {
11054 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
11055 tg3_wait_for_event_ack(tp
);
11057 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
11058 FWCMD_NICDRV_ALIVE3
);
11059 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
11060 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11061 TG3_FW_UPDATE_TIMEOUT_SEC
);
11063 tg3_generate_fw_event(tp
);
11065 tp
->asf_counter
= tp
->asf_multiplier
;
11068 spin_unlock(&tp
->lock
);
11071 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11072 add_timer(&tp
->timer
);
11075 static void tg3_timer_init(struct tg3
*tp
)
11077 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11078 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11079 !tg3_flag(tp
, 57765_CLASS
))
11080 tp
->timer_offset
= HZ
;
11082 tp
->timer_offset
= HZ
/ 10;
11084 BUG_ON(tp
->timer_offset
> HZ
);
11086 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11087 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11088 TG3_FW_UPDATE_FREQ_SEC
;
11090 init_timer(&tp
->timer
);
11091 tp
->timer
.data
= (unsigned long) tp
;
11092 tp
->timer
.function
= tg3_timer
;
11095 static void tg3_timer_start(struct tg3
*tp
)
11097 tp
->asf_counter
= tp
->asf_multiplier
;
11098 tp
->timer_counter
= tp
->timer_multiplier
;
11100 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11101 add_timer(&tp
->timer
);
11104 static void tg3_timer_stop(struct tg3
*tp
)
11106 del_timer_sync(&tp
->timer
);
11109 /* Restart hardware after configuration changes, self-test, etc.
11110 * Invoked with tp->lock held.
11112 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11113 __releases(tp
->lock
)
11114 __acquires(tp
->lock
)
11118 err
= tg3_init_hw(tp
, reset_phy
);
11120 netdev_err(tp
->dev
,
11121 "Failed to re-initialize device, aborting\n");
11122 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11123 tg3_full_unlock(tp
);
11124 tg3_timer_stop(tp
);
11126 tg3_napi_enable(tp
);
11127 dev_close(tp
->dev
);
11128 tg3_full_lock(tp
, 0);
11133 static void tg3_reset_task(struct work_struct
*work
)
11135 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11139 tg3_full_lock(tp
, 0);
11141 if (!netif_running(tp
->dev
)) {
11142 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11143 tg3_full_unlock(tp
);
11148 tg3_full_unlock(tp
);
11152 tg3_netif_stop(tp
);
11154 tg3_full_lock(tp
, 1);
11156 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11157 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11158 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11159 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11160 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11163 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11164 err
= tg3_init_hw(tp
, true);
11168 tg3_netif_start(tp
);
11171 tg3_full_unlock(tp
);
11176 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11180 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11183 unsigned long flags
;
11185 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11187 if (tp
->irq_cnt
== 1)
11188 name
= tp
->dev
->name
;
11190 name
= &tnapi
->irq_lbl
[0];
11191 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11192 snprintf(name
, IFNAMSIZ
,
11193 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11194 else if (tnapi
->tx_buffers
)
11195 snprintf(name
, IFNAMSIZ
,
11196 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11197 else if (tnapi
->rx_rcb
)
11198 snprintf(name
, IFNAMSIZ
,
11199 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11201 snprintf(name
, IFNAMSIZ
,
11202 "%s-%d", tp
->dev
->name
, irq_num
);
11203 name
[IFNAMSIZ
-1] = 0;
11206 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11208 if (tg3_flag(tp
, 1SHOT_MSI
))
11209 fn
= tg3_msi_1shot
;
11212 fn
= tg3_interrupt
;
11213 if (tg3_flag(tp
, TAGGED_STATUS
))
11214 fn
= tg3_interrupt_tagged
;
11215 flags
= IRQF_SHARED
;
11218 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11221 static int tg3_test_interrupt(struct tg3
*tp
)
11223 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11224 struct net_device
*dev
= tp
->dev
;
11225 int err
, i
, intr_ok
= 0;
11228 if (!netif_running(dev
))
11231 tg3_disable_ints(tp
);
11233 free_irq(tnapi
->irq_vec
, tnapi
);
11236 * Turn off MSI one shot mode. Otherwise this test has no
11237 * observable way to know whether the interrupt was delivered.
11239 if (tg3_flag(tp
, 57765_PLUS
)) {
11240 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11241 tw32(MSGINT_MODE
, val
);
11244 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11245 IRQF_SHARED
, dev
->name
, tnapi
);
11249 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11250 tg3_enable_ints(tp
);
11252 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11255 for (i
= 0; i
< 5; i
++) {
11256 u32 int_mbox
, misc_host_ctrl
;
11258 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11259 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11261 if ((int_mbox
!= 0) ||
11262 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11267 if (tg3_flag(tp
, 57765_PLUS
) &&
11268 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11269 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11274 tg3_disable_ints(tp
);
11276 free_irq(tnapi
->irq_vec
, tnapi
);
11278 err
= tg3_request_irq(tp
, 0);
11284 /* Reenable MSI one shot mode. */
11285 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11286 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11287 tw32(MSGINT_MODE
, val
);
11295 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11296 * successfully restored
11298 static int tg3_test_msi(struct tg3
*tp
)
11303 if (!tg3_flag(tp
, USING_MSI
))
11306 /* Turn off SERR reporting in case MSI terminates with Master
11309 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11310 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11311 pci_cmd
& ~PCI_COMMAND_SERR
);
11313 err
= tg3_test_interrupt(tp
);
11315 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11320 /* other failures */
11324 /* MSI test failed, go back to INTx mode */
11325 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11326 "to INTx mode. Please report this failure to the PCI "
11327 "maintainer and include system chipset information\n");
11329 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11331 pci_disable_msi(tp
->pdev
);
11333 tg3_flag_clear(tp
, USING_MSI
);
11334 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11336 err
= tg3_request_irq(tp
, 0);
11340 /* Need to reset the chip because the MSI cycle may have terminated
11341 * with Master Abort.
11343 tg3_full_lock(tp
, 1);
11345 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11346 err
= tg3_init_hw(tp
, true);
11348 tg3_full_unlock(tp
);
11351 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11356 static int tg3_request_firmware(struct tg3
*tp
)
11358 const struct tg3_firmware_hdr
*fw_hdr
;
11360 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11361 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11366 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11368 /* Firmware blob starts with version numbers, followed by
11369 * start address and _full_ length including BSS sections
11370 * (which must be longer than the actual data, of course
11373 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11374 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11375 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11376 tp
->fw_len
, tp
->fw_needed
);
11377 release_firmware(tp
->fw
);
11382 /* We no longer need firmware; we have it. */
11383 tp
->fw_needed
= NULL
;
11387 static u32
tg3_irq_count(struct tg3
*tp
)
11389 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11392 /* We want as many rx rings enabled as there are cpus.
11393 * In multiqueue MSI-X mode, the first MSI-X vector
11394 * only deals with link interrupts, etc, so we add
11395 * one to the number of vectors we are requesting.
11397 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11403 static bool tg3_enable_msix(struct tg3
*tp
)
11406 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11408 tp
->txq_cnt
= tp
->txq_req
;
11409 tp
->rxq_cnt
= tp
->rxq_req
;
11411 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11412 if (tp
->rxq_cnt
> tp
->rxq_max
)
11413 tp
->rxq_cnt
= tp
->rxq_max
;
11415 /* Disable multiple TX rings by default. Simple round-robin hardware
11416 * scheduling of the TX rings can cause starvation of rings with
11417 * small packets when other rings have TSO or jumbo packets.
11422 tp
->irq_cnt
= tg3_irq_count(tp
);
11424 for (i
= 0; i
< tp
->irq_max
; i
++) {
11425 msix_ent
[i
].entry
= i
;
11426 msix_ent
[i
].vector
= 0;
11429 rc
= pci_enable_msix_range(tp
->pdev
, msix_ent
, 1, tp
->irq_cnt
);
11432 } else if (rc
< tp
->irq_cnt
) {
11433 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11436 tp
->rxq_cnt
= max(rc
- 1, 1);
11438 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11441 for (i
= 0; i
< tp
->irq_max
; i
++)
11442 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11444 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11445 pci_disable_msix(tp
->pdev
);
11449 if (tp
->irq_cnt
== 1)
11452 tg3_flag_set(tp
, ENABLE_RSS
);
11454 if (tp
->txq_cnt
> 1)
11455 tg3_flag_set(tp
, ENABLE_TSS
);
11457 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11462 static void tg3_ints_init(struct tg3
*tp
)
11464 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11465 !tg3_flag(tp
, TAGGED_STATUS
)) {
11466 /* All MSI supporting chips should support tagged
11467 * status. Assert that this is the case.
11469 netdev_warn(tp
->dev
,
11470 "MSI without TAGGED_STATUS? Not using MSI\n");
11474 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11475 tg3_flag_set(tp
, USING_MSIX
);
11476 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11477 tg3_flag_set(tp
, USING_MSI
);
11479 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11480 u32 msi_mode
= tr32(MSGINT_MODE
);
11481 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11482 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11483 if (!tg3_flag(tp
, 1SHOT_MSI
))
11484 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11485 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11488 if (!tg3_flag(tp
, USING_MSIX
)) {
11490 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11493 if (tp
->irq_cnt
== 1) {
11496 netif_set_real_num_tx_queues(tp
->dev
, 1);
11497 netif_set_real_num_rx_queues(tp
->dev
, 1);
11501 static void tg3_ints_fini(struct tg3
*tp
)
11503 if (tg3_flag(tp
, USING_MSIX
))
11504 pci_disable_msix(tp
->pdev
);
11505 else if (tg3_flag(tp
, USING_MSI
))
11506 pci_disable_msi(tp
->pdev
);
11507 tg3_flag_clear(tp
, USING_MSI
);
11508 tg3_flag_clear(tp
, USING_MSIX
);
11509 tg3_flag_clear(tp
, ENABLE_RSS
);
11510 tg3_flag_clear(tp
, ENABLE_TSS
);
11513 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11516 struct net_device
*dev
= tp
->dev
;
11520 * Setup interrupts first so we know how
11521 * many NAPI resources to allocate
11525 tg3_rss_check_indir_tbl(tp
);
11527 /* The placement of this call is tied
11528 * to the setup and use of Host TX descriptors.
11530 err
= tg3_alloc_consistent(tp
);
11532 goto out_ints_fini
;
11536 tg3_napi_enable(tp
);
11538 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11539 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11540 err
= tg3_request_irq(tp
, i
);
11542 for (i
--; i
>= 0; i
--) {
11543 tnapi
= &tp
->napi
[i
];
11544 free_irq(tnapi
->irq_vec
, tnapi
);
11546 goto out_napi_fini
;
11550 tg3_full_lock(tp
, 0);
11553 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11555 err
= tg3_init_hw(tp
, reset_phy
);
11557 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11558 tg3_free_rings(tp
);
11561 tg3_full_unlock(tp
);
11566 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11567 err
= tg3_test_msi(tp
);
11570 tg3_full_lock(tp
, 0);
11571 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11572 tg3_free_rings(tp
);
11573 tg3_full_unlock(tp
);
11575 goto out_napi_fini
;
11578 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11579 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11581 tw32(PCIE_TRANSACTION_CFG
,
11582 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11588 tg3_hwmon_open(tp
);
11590 tg3_full_lock(tp
, 0);
11592 tg3_timer_start(tp
);
11593 tg3_flag_set(tp
, INIT_COMPLETE
);
11594 tg3_enable_ints(tp
);
11596 tg3_ptp_resume(tp
);
11598 tg3_full_unlock(tp
);
11600 netif_tx_start_all_queues(dev
);
11603 * Reset loopback feature if it was turned on while the device was down
11604 * make sure that it's installed properly now.
11606 if (dev
->features
& NETIF_F_LOOPBACK
)
11607 tg3_set_loopback(dev
, dev
->features
);
11612 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11613 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11614 free_irq(tnapi
->irq_vec
, tnapi
);
11618 tg3_napi_disable(tp
);
11620 tg3_free_consistent(tp
);
11628 static void tg3_stop(struct tg3
*tp
)
11632 tg3_reset_task_cancel(tp
);
11633 tg3_netif_stop(tp
);
11635 tg3_timer_stop(tp
);
11637 tg3_hwmon_close(tp
);
11641 tg3_full_lock(tp
, 1);
11643 tg3_disable_ints(tp
);
11645 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11646 tg3_free_rings(tp
);
11647 tg3_flag_clear(tp
, INIT_COMPLETE
);
11649 tg3_full_unlock(tp
);
11651 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11652 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11653 free_irq(tnapi
->irq_vec
, tnapi
);
11660 tg3_free_consistent(tp
);
11663 static int tg3_open(struct net_device
*dev
)
11665 struct tg3
*tp
= netdev_priv(dev
);
11668 if (tp
->pcierr_recovery
) {
11669 netdev_err(dev
, "Failed to open device. PCI error recovery "
11674 if (tp
->fw_needed
) {
11675 err
= tg3_request_firmware(tp
);
11676 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11678 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11679 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11680 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11681 netdev_warn(tp
->dev
, "EEE capability restored\n");
11682 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11684 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11688 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11689 tg3_flag_clear(tp
, TSO_CAPABLE
);
11690 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11691 netdev_notice(tp
->dev
, "TSO capability restored\n");
11692 tg3_flag_set(tp
, TSO_CAPABLE
);
11696 tg3_carrier_off(tp
);
11698 err
= tg3_power_up(tp
);
11702 tg3_full_lock(tp
, 0);
11704 tg3_disable_ints(tp
);
11705 tg3_flag_clear(tp
, INIT_COMPLETE
);
11707 tg3_full_unlock(tp
);
11709 err
= tg3_start(tp
,
11710 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11713 tg3_frob_aux_power(tp
, false);
11714 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11720 static int tg3_close(struct net_device
*dev
)
11722 struct tg3
*tp
= netdev_priv(dev
);
11724 if (tp
->pcierr_recovery
) {
11725 netdev_err(dev
, "Failed to close device. PCI error recovery "
11732 if (pci_device_is_present(tp
->pdev
)) {
11733 tg3_power_down_prepare(tp
);
11735 tg3_carrier_off(tp
);
11740 static inline u64
get_stat64(tg3_stat64_t
*val
)
11742 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11745 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11747 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11749 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11750 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11751 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11754 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11755 tg3_writephy(tp
, MII_TG3_TEST1
,
11756 val
| MII_TG3_TEST1_CRC_EN
);
11757 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11761 tp
->phy_crc_errors
+= val
;
11763 return tp
->phy_crc_errors
;
11766 return get_stat64(&hw_stats
->rx_fcs_errors
);
11769 #define ESTAT_ADD(member) \
11770 estats->member = old_estats->member + \
11771 get_stat64(&hw_stats->member)
11773 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11775 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11776 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11778 ESTAT_ADD(rx_octets
);
11779 ESTAT_ADD(rx_fragments
);
11780 ESTAT_ADD(rx_ucast_packets
);
11781 ESTAT_ADD(rx_mcast_packets
);
11782 ESTAT_ADD(rx_bcast_packets
);
11783 ESTAT_ADD(rx_fcs_errors
);
11784 ESTAT_ADD(rx_align_errors
);
11785 ESTAT_ADD(rx_xon_pause_rcvd
);
11786 ESTAT_ADD(rx_xoff_pause_rcvd
);
11787 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11788 ESTAT_ADD(rx_xoff_entered
);
11789 ESTAT_ADD(rx_frame_too_long_errors
);
11790 ESTAT_ADD(rx_jabbers
);
11791 ESTAT_ADD(rx_undersize_packets
);
11792 ESTAT_ADD(rx_in_length_errors
);
11793 ESTAT_ADD(rx_out_length_errors
);
11794 ESTAT_ADD(rx_64_or_less_octet_packets
);
11795 ESTAT_ADD(rx_65_to_127_octet_packets
);
11796 ESTAT_ADD(rx_128_to_255_octet_packets
);
11797 ESTAT_ADD(rx_256_to_511_octet_packets
);
11798 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11799 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11800 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11801 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11802 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11803 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11805 ESTAT_ADD(tx_octets
);
11806 ESTAT_ADD(tx_collisions
);
11807 ESTAT_ADD(tx_xon_sent
);
11808 ESTAT_ADD(tx_xoff_sent
);
11809 ESTAT_ADD(tx_flow_control
);
11810 ESTAT_ADD(tx_mac_errors
);
11811 ESTAT_ADD(tx_single_collisions
);
11812 ESTAT_ADD(tx_mult_collisions
);
11813 ESTAT_ADD(tx_deferred
);
11814 ESTAT_ADD(tx_excessive_collisions
);
11815 ESTAT_ADD(tx_late_collisions
);
11816 ESTAT_ADD(tx_collide_2times
);
11817 ESTAT_ADD(tx_collide_3times
);
11818 ESTAT_ADD(tx_collide_4times
);
11819 ESTAT_ADD(tx_collide_5times
);
11820 ESTAT_ADD(tx_collide_6times
);
11821 ESTAT_ADD(tx_collide_7times
);
11822 ESTAT_ADD(tx_collide_8times
);
11823 ESTAT_ADD(tx_collide_9times
);
11824 ESTAT_ADD(tx_collide_10times
);
11825 ESTAT_ADD(tx_collide_11times
);
11826 ESTAT_ADD(tx_collide_12times
);
11827 ESTAT_ADD(tx_collide_13times
);
11828 ESTAT_ADD(tx_collide_14times
);
11829 ESTAT_ADD(tx_collide_15times
);
11830 ESTAT_ADD(tx_ucast_packets
);
11831 ESTAT_ADD(tx_mcast_packets
);
11832 ESTAT_ADD(tx_bcast_packets
);
11833 ESTAT_ADD(tx_carrier_sense_errors
);
11834 ESTAT_ADD(tx_discards
);
11835 ESTAT_ADD(tx_errors
);
11837 ESTAT_ADD(dma_writeq_full
);
11838 ESTAT_ADD(dma_write_prioq_full
);
11839 ESTAT_ADD(rxbds_empty
);
11840 ESTAT_ADD(rx_discards
);
11841 ESTAT_ADD(rx_errors
);
11842 ESTAT_ADD(rx_threshold_hit
);
11844 ESTAT_ADD(dma_readq_full
);
11845 ESTAT_ADD(dma_read_prioq_full
);
11846 ESTAT_ADD(tx_comp_queue_full
);
11848 ESTAT_ADD(ring_set_send_prod_index
);
11849 ESTAT_ADD(ring_status_update
);
11850 ESTAT_ADD(nic_irqs
);
11851 ESTAT_ADD(nic_avoided_irqs
);
11852 ESTAT_ADD(nic_tx_threshold_hit
);
11854 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11857 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11859 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11860 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11862 stats
->rx_packets
= old_stats
->rx_packets
+
11863 get_stat64(&hw_stats
->rx_ucast_packets
) +
11864 get_stat64(&hw_stats
->rx_mcast_packets
) +
11865 get_stat64(&hw_stats
->rx_bcast_packets
);
11867 stats
->tx_packets
= old_stats
->tx_packets
+
11868 get_stat64(&hw_stats
->tx_ucast_packets
) +
11869 get_stat64(&hw_stats
->tx_mcast_packets
) +
11870 get_stat64(&hw_stats
->tx_bcast_packets
);
11872 stats
->rx_bytes
= old_stats
->rx_bytes
+
11873 get_stat64(&hw_stats
->rx_octets
);
11874 stats
->tx_bytes
= old_stats
->tx_bytes
+
11875 get_stat64(&hw_stats
->tx_octets
);
11877 stats
->rx_errors
= old_stats
->rx_errors
+
11878 get_stat64(&hw_stats
->rx_errors
);
11879 stats
->tx_errors
= old_stats
->tx_errors
+
11880 get_stat64(&hw_stats
->tx_errors
) +
11881 get_stat64(&hw_stats
->tx_mac_errors
) +
11882 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11883 get_stat64(&hw_stats
->tx_discards
);
11885 stats
->multicast
= old_stats
->multicast
+
11886 get_stat64(&hw_stats
->rx_mcast_packets
);
11887 stats
->collisions
= old_stats
->collisions
+
11888 get_stat64(&hw_stats
->tx_collisions
);
11890 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11891 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11892 get_stat64(&hw_stats
->rx_undersize_packets
);
11894 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11895 get_stat64(&hw_stats
->rx_align_errors
);
11896 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11897 get_stat64(&hw_stats
->tx_discards
);
11898 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11899 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11901 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11902 tg3_calc_crc_errors(tp
);
11904 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11905 get_stat64(&hw_stats
->rx_discards
);
11907 stats
->rx_dropped
= tp
->rx_dropped
;
11908 stats
->tx_dropped
= tp
->tx_dropped
;
11911 static int tg3_get_regs_len(struct net_device
*dev
)
11913 return TG3_REG_BLK_SIZE
;
11916 static void tg3_get_regs(struct net_device
*dev
,
11917 struct ethtool_regs
*regs
, void *_p
)
11919 struct tg3
*tp
= netdev_priv(dev
);
11923 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11925 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11928 tg3_full_lock(tp
, 0);
11930 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11932 tg3_full_unlock(tp
);
11935 static int tg3_get_eeprom_len(struct net_device
*dev
)
11937 struct tg3
*tp
= netdev_priv(dev
);
11939 return tp
->nvram_size
;
11942 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11944 struct tg3
*tp
= netdev_priv(dev
);
11945 int ret
, cpmu_restore
= 0;
11947 u32 i
, offset
, len
, b_offset
, b_count
, cpmu_val
= 0;
11950 if (tg3_flag(tp
, NO_NVRAM
))
11953 offset
= eeprom
->offset
;
11957 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11959 /* Override clock, link aware and link idle modes */
11960 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11961 cpmu_val
= tr32(TG3_CPMU_CTRL
);
11962 if (cpmu_val
& (CPMU_CTRL_LINK_AWARE_MODE
|
11963 CPMU_CTRL_LINK_IDLE_MODE
)) {
11964 tw32(TG3_CPMU_CTRL
, cpmu_val
&
11965 ~(CPMU_CTRL_LINK_AWARE_MODE
|
11966 CPMU_CTRL_LINK_IDLE_MODE
));
11970 tg3_override_clk(tp
);
11973 /* adjustments to start on required 4 byte boundary */
11974 b_offset
= offset
& 3;
11975 b_count
= 4 - b_offset
;
11976 if (b_count
> len
) {
11977 /* i.e. offset=1 len=2 */
11980 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11983 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11986 eeprom
->len
+= b_count
;
11989 /* read bytes up to the last 4 byte boundary */
11990 pd
= &data
[eeprom
->len
];
11991 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11992 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11999 memcpy(pd
+ i
, &val
, 4);
12000 if (need_resched()) {
12001 if (signal_pending(current
)) {
12012 /* read last bytes not ending on 4 byte boundary */
12013 pd
= &data
[eeprom
->len
];
12015 b_offset
= offset
+ len
- b_count
;
12016 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
12019 memcpy(pd
, &val
, b_count
);
12020 eeprom
->len
+= b_count
;
12025 /* Restore clock, link aware and link idle modes */
12026 tg3_restore_clk(tp
);
12028 tw32(TG3_CPMU_CTRL
, cpmu_val
);
12033 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
12035 struct tg3
*tp
= netdev_priv(dev
);
12037 u32 offset
, len
, b_offset
, odd_len
;
12039 __be32 start
= 0, end
;
12041 if (tg3_flag(tp
, NO_NVRAM
) ||
12042 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
12045 offset
= eeprom
->offset
;
12048 if ((b_offset
= (offset
& 3))) {
12049 /* adjustments to start on required 4 byte boundary */
12050 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
12061 /* adjustments to end on required 4 byte boundary */
12063 len
= (len
+ 3) & ~3;
12064 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
12070 if (b_offset
|| odd_len
) {
12071 buf
= kmalloc(len
, GFP_KERNEL
);
12075 memcpy(buf
, &start
, 4);
12077 memcpy(buf
+len
-4, &end
, 4);
12078 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
12081 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12089 static int tg3_get_link_ksettings(struct net_device
*dev
,
12090 struct ethtool_link_ksettings
*cmd
)
12092 struct tg3
*tp
= netdev_priv(dev
);
12093 u32 supported
, advertising
;
12095 if (tg3_flag(tp
, USE_PHYLIB
)) {
12096 struct phy_device
*phydev
;
12097 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12099 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12100 phy_ethtool_ksettings_get(phydev
, cmd
);
12105 supported
= (SUPPORTED_Autoneg
);
12107 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12108 supported
|= (SUPPORTED_1000baseT_Half
|
12109 SUPPORTED_1000baseT_Full
);
12111 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12112 supported
|= (SUPPORTED_100baseT_Half
|
12113 SUPPORTED_100baseT_Full
|
12114 SUPPORTED_10baseT_Half
|
12115 SUPPORTED_10baseT_Full
|
12117 cmd
->base
.port
= PORT_TP
;
12119 supported
|= SUPPORTED_FIBRE
;
12120 cmd
->base
.port
= PORT_FIBRE
;
12122 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
12125 advertising
= tp
->link_config
.advertising
;
12126 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12127 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12128 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12129 advertising
|= ADVERTISED_Pause
;
12131 advertising
|= ADVERTISED_Pause
|
12132 ADVERTISED_Asym_Pause
;
12134 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12135 advertising
|= ADVERTISED_Asym_Pause
;
12138 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
12141 if (netif_running(dev
) && tp
->link_up
) {
12142 cmd
->base
.speed
= tp
->link_config
.active_speed
;
12143 cmd
->base
.duplex
= tp
->link_config
.active_duplex
;
12144 ethtool_convert_legacy_u32_to_link_mode(
12145 cmd
->link_modes
.lp_advertising
,
12146 tp
->link_config
.rmt_adv
);
12148 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12149 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12150 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_X
;
12152 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI
;
12155 cmd
->base
.speed
= SPEED_UNKNOWN
;
12156 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
12157 cmd
->base
.eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12159 cmd
->base
.phy_address
= tp
->phy_addr
;
12160 cmd
->base
.autoneg
= tp
->link_config
.autoneg
;
12164 static int tg3_set_link_ksettings(struct net_device
*dev
,
12165 const struct ethtool_link_ksettings
*cmd
)
12167 struct tg3
*tp
= netdev_priv(dev
);
12168 u32 speed
= cmd
->base
.speed
;
12171 if (tg3_flag(tp
, USE_PHYLIB
)) {
12172 struct phy_device
*phydev
;
12173 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12175 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12176 return phy_ethtool_ksettings_set(phydev
, cmd
);
12179 if (cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
12180 cmd
->base
.autoneg
!= AUTONEG_DISABLE
)
12183 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
12184 cmd
->base
.duplex
!= DUPLEX_FULL
&&
12185 cmd
->base
.duplex
!= DUPLEX_HALF
)
12188 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
12189 cmd
->link_modes
.advertising
);
12191 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12192 u32 mask
= ADVERTISED_Autoneg
|
12194 ADVERTISED_Asym_Pause
;
12196 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12197 mask
|= ADVERTISED_1000baseT_Half
|
12198 ADVERTISED_1000baseT_Full
;
12200 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12201 mask
|= ADVERTISED_100baseT_Half
|
12202 ADVERTISED_100baseT_Full
|
12203 ADVERTISED_10baseT_Half
|
12204 ADVERTISED_10baseT_Full
|
12207 mask
|= ADVERTISED_FIBRE
;
12209 if (advertising
& ~mask
)
12212 mask
&= (ADVERTISED_1000baseT_Half
|
12213 ADVERTISED_1000baseT_Full
|
12214 ADVERTISED_100baseT_Half
|
12215 ADVERTISED_100baseT_Full
|
12216 ADVERTISED_10baseT_Half
|
12217 ADVERTISED_10baseT_Full
);
12219 advertising
&= mask
;
12221 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12222 if (speed
!= SPEED_1000
)
12225 if (cmd
->base
.duplex
!= DUPLEX_FULL
)
12228 if (speed
!= SPEED_100
&&
12234 tg3_full_lock(tp
, 0);
12236 tp
->link_config
.autoneg
= cmd
->base
.autoneg
;
12237 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
12238 tp
->link_config
.advertising
= (advertising
|
12239 ADVERTISED_Autoneg
);
12240 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12241 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12243 tp
->link_config
.advertising
= 0;
12244 tp
->link_config
.speed
= speed
;
12245 tp
->link_config
.duplex
= cmd
->base
.duplex
;
12248 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12250 tg3_warn_mgmt_link_flap(tp
);
12252 if (netif_running(dev
))
12253 tg3_setup_phy(tp
, true);
12255 tg3_full_unlock(tp
);
12260 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12262 struct tg3
*tp
= netdev_priv(dev
);
12264 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12265 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12266 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12267 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12270 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12272 struct tg3
*tp
= netdev_priv(dev
);
12274 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12275 wol
->supported
= WAKE_MAGIC
;
12277 wol
->supported
= 0;
12279 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12280 wol
->wolopts
= WAKE_MAGIC
;
12281 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12284 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12286 struct tg3
*tp
= netdev_priv(dev
);
12287 struct device
*dp
= &tp
->pdev
->dev
;
12289 if (wol
->wolopts
& ~WAKE_MAGIC
)
12291 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12292 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12295 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12297 if (device_may_wakeup(dp
))
12298 tg3_flag_set(tp
, WOL_ENABLE
);
12300 tg3_flag_clear(tp
, WOL_ENABLE
);
12305 static u32
tg3_get_msglevel(struct net_device
*dev
)
12307 struct tg3
*tp
= netdev_priv(dev
);
12308 return tp
->msg_enable
;
12311 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12313 struct tg3
*tp
= netdev_priv(dev
);
12314 tp
->msg_enable
= value
;
12317 static int tg3_nway_reset(struct net_device
*dev
)
12319 struct tg3
*tp
= netdev_priv(dev
);
12322 if (!netif_running(dev
))
12325 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12328 tg3_warn_mgmt_link_flap(tp
);
12330 if (tg3_flag(tp
, USE_PHYLIB
)) {
12331 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12333 r
= phy_start_aneg(mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
));
12337 spin_lock_bh(&tp
->lock
);
12339 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12340 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12341 ((bmcr
& BMCR_ANENABLE
) ||
12342 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12343 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12347 spin_unlock_bh(&tp
->lock
);
12353 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12355 struct tg3
*tp
= netdev_priv(dev
);
12357 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12358 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12359 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12361 ering
->rx_jumbo_max_pending
= 0;
12363 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12365 ering
->rx_pending
= tp
->rx_pending
;
12366 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12367 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12369 ering
->rx_jumbo_pending
= 0;
12371 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12374 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12376 struct tg3
*tp
= netdev_priv(dev
);
12377 int i
, irq_sync
= 0, err
= 0;
12379 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12380 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12381 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12382 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12383 (tg3_flag(tp
, TSO_BUG
) &&
12384 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12387 if (netif_running(dev
)) {
12389 tg3_netif_stop(tp
);
12393 tg3_full_lock(tp
, irq_sync
);
12395 tp
->rx_pending
= ering
->rx_pending
;
12397 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12398 tp
->rx_pending
> 63)
12399 tp
->rx_pending
= 63;
12401 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12402 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12404 for (i
= 0; i
< tp
->irq_max
; i
++)
12405 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12407 if (netif_running(dev
)) {
12408 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12409 err
= tg3_restart_hw(tp
, false);
12411 tg3_netif_start(tp
);
12414 tg3_full_unlock(tp
);
12416 if (irq_sync
&& !err
)
12422 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12424 struct tg3
*tp
= netdev_priv(dev
);
12426 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12428 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12429 epause
->rx_pause
= 1;
12431 epause
->rx_pause
= 0;
12433 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12434 epause
->tx_pause
= 1;
12436 epause
->tx_pause
= 0;
12439 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12441 struct tg3
*tp
= netdev_priv(dev
);
12444 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12445 tg3_warn_mgmt_link_flap(tp
);
12447 if (tg3_flag(tp
, USE_PHYLIB
)) {
12449 struct phy_device
*phydev
;
12451 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
12453 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12454 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12455 (epause
->rx_pause
!= epause
->tx_pause
)))
12458 tp
->link_config
.flowctrl
= 0;
12459 if (epause
->rx_pause
) {
12460 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12462 if (epause
->tx_pause
) {
12463 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12464 newadv
= ADVERTISED_Pause
;
12466 newadv
= ADVERTISED_Pause
|
12467 ADVERTISED_Asym_Pause
;
12468 } else if (epause
->tx_pause
) {
12469 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12470 newadv
= ADVERTISED_Asym_Pause
;
12474 if (epause
->autoneg
)
12475 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12477 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12479 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12480 u32 oldadv
= phydev
->advertising
&
12481 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12482 if (oldadv
!= newadv
) {
12483 phydev
->advertising
&=
12484 ~(ADVERTISED_Pause
|
12485 ADVERTISED_Asym_Pause
);
12486 phydev
->advertising
|= newadv
;
12487 if (phydev
->autoneg
) {
12489 * Always renegotiate the link to
12490 * inform our link partner of our
12491 * flow control settings, even if the
12492 * flow control is forced. Let
12493 * tg3_adjust_link() do the final
12494 * flow control setup.
12496 return phy_start_aneg(phydev
);
12500 if (!epause
->autoneg
)
12501 tg3_setup_flow_control(tp
, 0, 0);
12503 tp
->link_config
.advertising
&=
12504 ~(ADVERTISED_Pause
|
12505 ADVERTISED_Asym_Pause
);
12506 tp
->link_config
.advertising
|= newadv
;
12511 if (netif_running(dev
)) {
12512 tg3_netif_stop(tp
);
12516 tg3_full_lock(tp
, irq_sync
);
12518 if (epause
->autoneg
)
12519 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12521 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12522 if (epause
->rx_pause
)
12523 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12525 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12526 if (epause
->tx_pause
)
12527 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12529 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12531 if (netif_running(dev
)) {
12532 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12533 err
= tg3_restart_hw(tp
, false);
12535 tg3_netif_start(tp
);
12538 tg3_full_unlock(tp
);
12541 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12546 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12550 return TG3_NUM_TEST
;
12552 return TG3_NUM_STATS
;
12554 return -EOPNOTSUPP
;
12558 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12559 u32
*rules __always_unused
)
12561 struct tg3
*tp
= netdev_priv(dev
);
12563 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12564 return -EOPNOTSUPP
;
12566 switch (info
->cmd
) {
12567 case ETHTOOL_GRXRINGS
:
12568 if (netif_running(tp
->dev
))
12569 info
->data
= tp
->rxq_cnt
;
12571 info
->data
= num_online_cpus();
12572 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12573 info
->data
= TG3_RSS_MAX_NUM_QS
;
12579 return -EOPNOTSUPP
;
12583 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12586 struct tg3
*tp
= netdev_priv(dev
);
12588 if (tg3_flag(tp
, SUPPORT_MSIX
))
12589 size
= TG3_RSS_INDIR_TBL_SIZE
;
12594 static int tg3_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
12596 struct tg3
*tp
= netdev_priv(dev
);
12600 *hfunc
= ETH_RSS_HASH_TOP
;
12604 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12605 indir
[i
] = tp
->rss_ind_tbl
[i
];
12610 static int tg3_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
12613 struct tg3
*tp
= netdev_priv(dev
);
12616 /* We require at least one supported parameter to be changed and no
12617 * change in any of the unsupported parameters
12620 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
12621 return -EOPNOTSUPP
;
12626 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12627 tp
->rss_ind_tbl
[i
] = indir
[i
];
12629 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12632 /* It is legal to write the indirection
12633 * table while the device is running.
12635 tg3_full_lock(tp
, 0);
12636 tg3_rss_write_indir_tbl(tp
);
12637 tg3_full_unlock(tp
);
12642 static void tg3_get_channels(struct net_device
*dev
,
12643 struct ethtool_channels
*channel
)
12645 struct tg3
*tp
= netdev_priv(dev
);
12646 u32 deflt_qs
= netif_get_num_default_rss_queues();
12648 channel
->max_rx
= tp
->rxq_max
;
12649 channel
->max_tx
= tp
->txq_max
;
12651 if (netif_running(dev
)) {
12652 channel
->rx_count
= tp
->rxq_cnt
;
12653 channel
->tx_count
= tp
->txq_cnt
;
12656 channel
->rx_count
= tp
->rxq_req
;
12658 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12661 channel
->tx_count
= tp
->txq_req
;
12663 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12667 static int tg3_set_channels(struct net_device
*dev
,
12668 struct ethtool_channels
*channel
)
12670 struct tg3
*tp
= netdev_priv(dev
);
12672 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12673 return -EOPNOTSUPP
;
12675 if (channel
->rx_count
> tp
->rxq_max
||
12676 channel
->tx_count
> tp
->txq_max
)
12679 tp
->rxq_req
= channel
->rx_count
;
12680 tp
->txq_req
= channel
->tx_count
;
12682 if (!netif_running(dev
))
12687 tg3_carrier_off(tp
);
12689 tg3_start(tp
, true, false, false);
12694 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12696 switch (stringset
) {
12698 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12701 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12704 WARN_ON(1); /* we need a WARN() */
12709 static int tg3_set_phys_id(struct net_device
*dev
,
12710 enum ethtool_phys_id_state state
)
12712 struct tg3
*tp
= netdev_priv(dev
);
12714 if (!netif_running(tp
->dev
))
12718 case ETHTOOL_ID_ACTIVE
:
12719 return 1; /* cycle on/off once per second */
12721 case ETHTOOL_ID_ON
:
12722 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12723 LED_CTRL_1000MBPS_ON
|
12724 LED_CTRL_100MBPS_ON
|
12725 LED_CTRL_10MBPS_ON
|
12726 LED_CTRL_TRAFFIC_OVERRIDE
|
12727 LED_CTRL_TRAFFIC_BLINK
|
12728 LED_CTRL_TRAFFIC_LED
);
12731 case ETHTOOL_ID_OFF
:
12732 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12733 LED_CTRL_TRAFFIC_OVERRIDE
);
12736 case ETHTOOL_ID_INACTIVE
:
12737 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12744 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12745 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12747 struct tg3
*tp
= netdev_priv(dev
);
12750 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12752 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12755 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12759 u32 offset
= 0, len
= 0;
12762 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12765 if (magic
== TG3_EEPROM_MAGIC
) {
12766 for (offset
= TG3_NVM_DIR_START
;
12767 offset
< TG3_NVM_DIR_END
;
12768 offset
+= TG3_NVM_DIRENT_SIZE
) {
12769 if (tg3_nvram_read(tp
, offset
, &val
))
12772 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12773 TG3_NVM_DIRTYPE_EXTVPD
)
12777 if (offset
!= TG3_NVM_DIR_END
) {
12778 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12779 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12782 offset
= tg3_nvram_logical_addr(tp
, offset
);
12786 if (!offset
|| !len
) {
12787 offset
= TG3_NVM_VPD_OFF
;
12788 len
= TG3_NVM_VPD_LEN
;
12791 buf
= kmalloc(len
, GFP_KERNEL
);
12795 if (magic
== TG3_EEPROM_MAGIC
) {
12796 for (i
= 0; i
< len
; i
+= 4) {
12797 /* The data is in little-endian format in NVRAM.
12798 * Use the big-endian read routines to preserve
12799 * the byte order as it exists in NVRAM.
12801 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12807 unsigned int pos
= 0;
12809 ptr
= (u8
*)&buf
[0];
12810 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12811 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12813 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12831 #define NVRAM_TEST_SIZE 0x100
12832 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12833 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12834 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12835 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12836 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12837 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12838 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12839 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12841 static int tg3_test_nvram(struct tg3
*tp
)
12843 u32 csum
, magic
, len
;
12845 int i
, j
, k
, err
= 0, size
;
12847 if (tg3_flag(tp
, NO_NVRAM
))
12850 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12853 if (magic
== TG3_EEPROM_MAGIC
)
12854 size
= NVRAM_TEST_SIZE
;
12855 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12856 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12857 TG3_EEPROM_SB_FORMAT_1
) {
12858 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12859 case TG3_EEPROM_SB_REVISION_0
:
12860 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12862 case TG3_EEPROM_SB_REVISION_2
:
12863 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12865 case TG3_EEPROM_SB_REVISION_3
:
12866 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12868 case TG3_EEPROM_SB_REVISION_4
:
12869 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12871 case TG3_EEPROM_SB_REVISION_5
:
12872 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12874 case TG3_EEPROM_SB_REVISION_6
:
12875 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12882 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12883 size
= NVRAM_SELFBOOT_HW_SIZE
;
12887 buf
= kmalloc(size
, GFP_KERNEL
);
12892 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12893 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12900 /* Selfboot format */
12901 magic
= be32_to_cpu(buf
[0]);
12902 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12903 TG3_EEPROM_MAGIC_FW
) {
12904 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12906 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12907 TG3_EEPROM_SB_REVISION_2
) {
12908 /* For rev 2, the csum doesn't include the MBA. */
12909 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12911 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12914 for (i
= 0; i
< size
; i
++)
12927 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12928 TG3_EEPROM_MAGIC_HW
) {
12929 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12930 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12931 u8
*buf8
= (u8
*) buf
;
12933 /* Separate the parity bits and the data bytes. */
12934 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12935 if ((i
== 0) || (i
== 8)) {
12939 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12940 parity
[k
++] = buf8
[i
] & msk
;
12942 } else if (i
== 16) {
12946 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12947 parity
[k
++] = buf8
[i
] & msk
;
12950 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12951 parity
[k
++] = buf8
[i
] & msk
;
12954 data
[j
++] = buf8
[i
];
12958 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12959 u8 hw8
= hweight8(data
[i
]);
12961 if ((hw8
& 0x1) && parity
[i
])
12963 else if (!(hw8
& 0x1) && !parity
[i
])
12972 /* Bootstrap checksum at offset 0x10 */
12973 csum
= calc_crc((unsigned char *) buf
, 0x10);
12974 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12977 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12978 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12979 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12984 buf
= tg3_vpd_readblock(tp
, &len
);
12988 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12990 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12994 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12997 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12998 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12999 PCI_VPD_RO_KEYWORD_CHKSUM
);
13003 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13005 for (i
= 0; i
<= j
; i
++)
13006 csum8
+= ((u8
*)buf
)[i
];
13020 #define TG3_SERDES_TIMEOUT_SEC 2
13021 #define TG3_COPPER_TIMEOUT_SEC 6
13023 static int tg3_test_link(struct tg3
*tp
)
13027 if (!netif_running(tp
->dev
))
13030 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
13031 max
= TG3_SERDES_TIMEOUT_SEC
;
13033 max
= TG3_COPPER_TIMEOUT_SEC
;
13035 for (i
= 0; i
< max
; i
++) {
13039 if (msleep_interruptible(1000))
13046 /* Only test the commonly used registers */
13047 static int tg3_test_registers(struct tg3
*tp
)
13049 int i
, is_5705
, is_5750
;
13050 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
13054 #define TG3_FL_5705 0x1
13055 #define TG3_FL_NOT_5705 0x2
13056 #define TG3_FL_NOT_5788 0x4
13057 #define TG3_FL_NOT_5750 0x8
13061 /* MAC Control Registers */
13062 { MAC_MODE
, TG3_FL_NOT_5705
,
13063 0x00000000, 0x00ef6f8c },
13064 { MAC_MODE
, TG3_FL_5705
,
13065 0x00000000, 0x01ef6b8c },
13066 { MAC_STATUS
, TG3_FL_NOT_5705
,
13067 0x03800107, 0x00000000 },
13068 { MAC_STATUS
, TG3_FL_5705
,
13069 0x03800100, 0x00000000 },
13070 { MAC_ADDR_0_HIGH
, 0x0000,
13071 0x00000000, 0x0000ffff },
13072 { MAC_ADDR_0_LOW
, 0x0000,
13073 0x00000000, 0xffffffff },
13074 { MAC_RX_MTU_SIZE
, 0x0000,
13075 0x00000000, 0x0000ffff },
13076 { MAC_TX_MODE
, 0x0000,
13077 0x00000000, 0x00000070 },
13078 { MAC_TX_LENGTHS
, 0x0000,
13079 0x00000000, 0x00003fff },
13080 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
13081 0x00000000, 0x000007fc },
13082 { MAC_RX_MODE
, TG3_FL_5705
,
13083 0x00000000, 0x000007dc },
13084 { MAC_HASH_REG_0
, 0x0000,
13085 0x00000000, 0xffffffff },
13086 { MAC_HASH_REG_1
, 0x0000,
13087 0x00000000, 0xffffffff },
13088 { MAC_HASH_REG_2
, 0x0000,
13089 0x00000000, 0xffffffff },
13090 { MAC_HASH_REG_3
, 0x0000,
13091 0x00000000, 0xffffffff },
13093 /* Receive Data and Receive BD Initiator Control Registers. */
13094 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
13095 0x00000000, 0xffffffff },
13096 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
13097 0x00000000, 0xffffffff },
13098 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
13099 0x00000000, 0x00000003 },
13100 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
13101 0x00000000, 0xffffffff },
13102 { RCVDBDI_STD_BD
+0, 0x0000,
13103 0x00000000, 0xffffffff },
13104 { RCVDBDI_STD_BD
+4, 0x0000,
13105 0x00000000, 0xffffffff },
13106 { RCVDBDI_STD_BD
+8, 0x0000,
13107 0x00000000, 0xffff0002 },
13108 { RCVDBDI_STD_BD
+0xc, 0x0000,
13109 0x00000000, 0xffffffff },
13111 /* Receive BD Initiator Control Registers. */
13112 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
13113 0x00000000, 0xffffffff },
13114 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
13115 0x00000000, 0x000003ff },
13116 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13117 0x00000000, 0xffffffff },
13119 /* Host Coalescing Control Registers. */
13120 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13121 0x00000000, 0x00000004 },
13122 { HOSTCC_MODE
, TG3_FL_5705
,
13123 0x00000000, 0x000000f6 },
13124 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13125 0x00000000, 0xffffffff },
13126 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13127 0x00000000, 0x000003ff },
13128 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13129 0x00000000, 0xffffffff },
13130 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13131 0x00000000, 0x000003ff },
13132 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13133 0x00000000, 0xffffffff },
13134 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13135 0x00000000, 0x000000ff },
13136 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13137 0x00000000, 0xffffffff },
13138 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13139 0x00000000, 0x000000ff },
13140 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13141 0x00000000, 0xffffffff },
13142 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13143 0x00000000, 0xffffffff },
13144 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13145 0x00000000, 0xffffffff },
13146 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13147 0x00000000, 0x000000ff },
13148 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13149 0x00000000, 0xffffffff },
13150 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13151 0x00000000, 0x000000ff },
13152 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13153 0x00000000, 0xffffffff },
13154 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13155 0x00000000, 0xffffffff },
13156 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13157 0x00000000, 0xffffffff },
13158 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13161 0x00000000, 0xffffffff },
13162 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13163 0xffffffff, 0x00000000 },
13164 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13165 0xffffffff, 0x00000000 },
13167 /* Buffer Manager Control Registers. */
13168 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13169 0x00000000, 0x007fff80 },
13170 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13171 0x00000000, 0x007fffff },
13172 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13173 0x00000000, 0x0000003f },
13174 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13175 0x00000000, 0x000001ff },
13176 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13177 0x00000000, 0x000001ff },
13178 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13179 0xffffffff, 0x00000000 },
13180 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13181 0xffffffff, 0x00000000 },
13183 /* Mailbox Registers */
13184 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13185 0x00000000, 0x000001ff },
13186 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13187 0x00000000, 0x000001ff },
13188 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13189 0x00000000, 0x000007ff },
13190 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13191 0x00000000, 0x000001ff },
13193 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13196 is_5705
= is_5750
= 0;
13197 if (tg3_flag(tp
, 5705_PLUS
)) {
13199 if (tg3_flag(tp
, 5750_PLUS
))
13203 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13204 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13207 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13210 if (tg3_flag(tp
, IS_5788
) &&
13211 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13214 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13217 offset
= (u32
) reg_tbl
[i
].offset
;
13218 read_mask
= reg_tbl
[i
].read_mask
;
13219 write_mask
= reg_tbl
[i
].write_mask
;
13221 /* Save the original register content */
13222 save_val
= tr32(offset
);
13224 /* Determine the read-only value. */
13225 read_val
= save_val
& read_mask
;
13227 /* Write zero to the register, then make sure the read-only bits
13228 * are not changed and the read/write bits are all zeros.
13232 val
= tr32(offset
);
13234 /* Test the read-only and read/write bits. */
13235 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13238 /* Write ones to all the bits defined by RdMask and WrMask, then
13239 * make sure the read-only bits are not changed and the
13240 * read/write bits are all ones.
13242 tw32(offset
, read_mask
| write_mask
);
13244 val
= tr32(offset
);
13246 /* Test the read-only bits. */
13247 if ((val
& read_mask
) != read_val
)
13250 /* Test the read/write bits. */
13251 if ((val
& write_mask
) != write_mask
)
13254 tw32(offset
, save_val
);
13260 if (netif_msg_hw(tp
))
13261 netdev_err(tp
->dev
,
13262 "Register test failed at offset %x\n", offset
);
13263 tw32(offset
, save_val
);
13267 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13269 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13273 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13274 for (j
= 0; j
< len
; j
+= 4) {
13277 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13278 tg3_read_mem(tp
, offset
+ j
, &val
);
13279 if (val
!= test_pattern
[i
])
13286 static int tg3_test_memory(struct tg3
*tp
)
13288 static struct mem_entry
{
13291 } mem_tbl_570x
[] = {
13292 { 0x00000000, 0x00b50},
13293 { 0x00002000, 0x1c000},
13294 { 0xffffffff, 0x00000}
13295 }, mem_tbl_5705
[] = {
13296 { 0x00000100, 0x0000c},
13297 { 0x00000200, 0x00008},
13298 { 0x00004000, 0x00800},
13299 { 0x00006000, 0x01000},
13300 { 0x00008000, 0x02000},
13301 { 0x00010000, 0x0e000},
13302 { 0xffffffff, 0x00000}
13303 }, mem_tbl_5755
[] = {
13304 { 0x00000200, 0x00008},
13305 { 0x00004000, 0x00800},
13306 { 0x00006000, 0x00800},
13307 { 0x00008000, 0x02000},
13308 { 0x00010000, 0x0c000},
13309 { 0xffffffff, 0x00000}
13310 }, mem_tbl_5906
[] = {
13311 { 0x00000200, 0x00008},
13312 { 0x00004000, 0x00400},
13313 { 0x00006000, 0x00400},
13314 { 0x00008000, 0x01000},
13315 { 0x00010000, 0x01000},
13316 { 0xffffffff, 0x00000}
13317 }, mem_tbl_5717
[] = {
13318 { 0x00000200, 0x00008},
13319 { 0x00010000, 0x0a000},
13320 { 0x00020000, 0x13c00},
13321 { 0xffffffff, 0x00000}
13322 }, mem_tbl_57765
[] = {
13323 { 0x00000200, 0x00008},
13324 { 0x00004000, 0x00800},
13325 { 0x00006000, 0x09800},
13326 { 0x00010000, 0x0a000},
13327 { 0xffffffff, 0x00000}
13329 struct mem_entry
*mem_tbl
;
13333 if (tg3_flag(tp
, 5717_PLUS
))
13334 mem_tbl
= mem_tbl_5717
;
13335 else if (tg3_flag(tp
, 57765_CLASS
) ||
13336 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13337 mem_tbl
= mem_tbl_57765
;
13338 else if (tg3_flag(tp
, 5755_PLUS
))
13339 mem_tbl
= mem_tbl_5755
;
13340 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13341 mem_tbl
= mem_tbl_5906
;
13342 else if (tg3_flag(tp
, 5705_PLUS
))
13343 mem_tbl
= mem_tbl_5705
;
13345 mem_tbl
= mem_tbl_570x
;
13347 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13348 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13356 #define TG3_TSO_MSS 500
13358 #define TG3_TSO_IP_HDR_LEN 20
13359 #define TG3_TSO_TCP_HDR_LEN 20
13360 #define TG3_TSO_TCP_OPT_LEN 12
13362 static const u8 tg3_tso_header
[] = {
13364 0x45, 0x00, 0x00, 0x00,
13365 0x00, 0x00, 0x40, 0x00,
13366 0x40, 0x06, 0x00, 0x00,
13367 0x0a, 0x00, 0x00, 0x01,
13368 0x0a, 0x00, 0x00, 0x02,
13369 0x0d, 0x00, 0xe0, 0x00,
13370 0x00, 0x00, 0x01, 0x00,
13371 0x00, 0x00, 0x02, 0x00,
13372 0x80, 0x10, 0x10, 0x00,
13373 0x14, 0x09, 0x00, 0x00,
13374 0x01, 0x01, 0x08, 0x0a,
13375 0x11, 0x11, 0x11, 0x11,
13376 0x11, 0x11, 0x11, 0x11,
13379 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13381 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13382 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13384 struct sk_buff
*skb
;
13385 u8
*tx_data
, *rx_data
;
13387 int num_pkts
, tx_len
, rx_len
, i
, err
;
13388 struct tg3_rx_buffer_desc
*desc
;
13389 struct tg3_napi
*tnapi
, *rnapi
;
13390 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13392 tnapi
= &tp
->napi
[0];
13393 rnapi
= &tp
->napi
[0];
13394 if (tp
->irq_cnt
> 1) {
13395 if (tg3_flag(tp
, ENABLE_RSS
))
13396 rnapi
= &tp
->napi
[1];
13397 if (tg3_flag(tp
, ENABLE_TSS
))
13398 tnapi
= &tp
->napi
[1];
13400 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13405 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13409 tx_data
= skb_put(skb
, tx_len
);
13410 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13411 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13413 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13415 if (tso_loopback
) {
13416 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13418 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13419 TG3_TSO_TCP_OPT_LEN
;
13421 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13422 sizeof(tg3_tso_header
));
13425 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13426 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13428 /* Set the total length field in the IP header */
13429 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13431 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13432 TXD_FLAG_CPU_POST_DMA
);
13434 if (tg3_flag(tp
, HW_TSO_1
) ||
13435 tg3_flag(tp
, HW_TSO_2
) ||
13436 tg3_flag(tp
, HW_TSO_3
)) {
13438 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13439 th
= (struct tcphdr
*)&tx_data
[val
];
13442 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13444 if (tg3_flag(tp
, HW_TSO_3
)) {
13445 mss
|= (hdr_len
& 0xc) << 12;
13446 if (hdr_len
& 0x10)
13447 base_flags
|= 0x00000010;
13448 base_flags
|= (hdr_len
& 0x3e0) << 5;
13449 } else if (tg3_flag(tp
, HW_TSO_2
))
13450 mss
|= hdr_len
<< 9;
13451 else if (tg3_flag(tp
, HW_TSO_1
) ||
13452 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13453 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13455 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13458 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13461 data_off
= ETH_HLEN
;
13463 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13464 tx_len
> VLAN_ETH_FRAME_LEN
)
13465 base_flags
|= TXD_FLAG_JMB_PKT
;
13468 for (i
= data_off
; i
< tx_len
; i
++)
13469 tx_data
[i
] = (u8
) (i
& 0xff);
13471 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13472 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13473 dev_kfree_skb(skb
);
13477 val
= tnapi
->tx_prod
;
13478 tnapi
->tx_buffers
[val
].skb
= skb
;
13479 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13481 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13486 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13488 budget
= tg3_tx_avail(tnapi
);
13489 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13490 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13491 tnapi
->tx_buffers
[val
].skb
= NULL
;
13492 dev_kfree_skb(skb
);
13498 /* Sync BD data before updating mailbox */
13501 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13502 tr32_mailbox(tnapi
->prodmbox
);
13506 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13507 for (i
= 0; i
< 35; i
++) {
13508 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13513 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13514 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13515 if ((tx_idx
== tnapi
->tx_prod
) &&
13516 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13520 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13521 dev_kfree_skb(skb
);
13523 if (tx_idx
!= tnapi
->tx_prod
)
13526 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13530 while (rx_idx
!= rx_start_idx
) {
13531 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13532 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13533 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13535 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13536 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13539 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13542 if (!tso_loopback
) {
13543 if (rx_len
!= tx_len
)
13546 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13547 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13550 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13553 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13554 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13555 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13559 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13560 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13561 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13563 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13564 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13565 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13570 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13571 PCI_DMA_FROMDEVICE
);
13573 rx_data
+= TG3_RX_OFFSET(tp
);
13574 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13575 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13582 /* tg3_free_rings will unmap and free the rx_data */
13587 #define TG3_STD_LOOPBACK_FAILED 1
13588 #define TG3_JMB_LOOPBACK_FAILED 2
13589 #define TG3_TSO_LOOPBACK_FAILED 4
13590 #define TG3_LOOPBACK_FAILED \
13591 (TG3_STD_LOOPBACK_FAILED | \
13592 TG3_JMB_LOOPBACK_FAILED | \
13593 TG3_TSO_LOOPBACK_FAILED)
13595 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13599 u32 jmb_pkt_sz
= 9000;
13602 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13604 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13605 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13607 if (!netif_running(tp
->dev
)) {
13608 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13609 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13611 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13615 err
= tg3_reset_hw(tp
, true);
13617 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13618 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13620 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13624 if (tg3_flag(tp
, ENABLE_RSS
)) {
13627 /* Reroute all rx packets to the 1st queue */
13628 for (i
= MAC_RSS_INDIR_TBL_0
;
13629 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13633 /* HW errata - mac loopback fails in some cases on 5780.
13634 * Normal traffic and PHY loopback are not affected by
13635 * errata. Also, the MAC loopback test is deprecated for
13636 * all newer ASIC revisions.
13638 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13639 !tg3_flag(tp
, CPMU_PRESENT
)) {
13640 tg3_mac_loopback(tp
, true);
13642 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13643 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13645 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13646 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13647 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13649 tg3_mac_loopback(tp
, false);
13652 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13653 !tg3_flag(tp
, USE_PHYLIB
)) {
13656 tg3_phy_lpbk_set(tp
, 0, false);
13658 /* Wait for link */
13659 for (i
= 0; i
< 100; i
++) {
13660 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13665 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13666 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13667 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13668 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13669 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13670 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13671 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13672 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13675 tg3_phy_lpbk_set(tp
, 0, true);
13677 /* All link indications report up, but the hardware
13678 * isn't really ready for about 20 msec. Double it
13683 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13684 data
[TG3_EXT_LOOPB_TEST
] |=
13685 TG3_STD_LOOPBACK_FAILED
;
13686 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13687 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13688 data
[TG3_EXT_LOOPB_TEST
] |=
13689 TG3_TSO_LOOPBACK_FAILED
;
13690 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13691 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13692 data
[TG3_EXT_LOOPB_TEST
] |=
13693 TG3_JMB_LOOPBACK_FAILED
;
13696 /* Re-enable gphy autopowerdown. */
13697 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13698 tg3_phy_toggle_apd(tp
, true);
13701 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13702 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13705 tp
->phy_flags
|= eee_cap
;
13710 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13713 struct tg3
*tp
= netdev_priv(dev
);
13714 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13716 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13717 if (tg3_power_up(tp
)) {
13718 etest
->flags
|= ETH_TEST_FL_FAILED
;
13719 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13722 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13725 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13727 if (tg3_test_nvram(tp
) != 0) {
13728 etest
->flags
|= ETH_TEST_FL_FAILED
;
13729 data
[TG3_NVRAM_TEST
] = 1;
13731 if (!doextlpbk
&& tg3_test_link(tp
)) {
13732 etest
->flags
|= ETH_TEST_FL_FAILED
;
13733 data
[TG3_LINK_TEST
] = 1;
13735 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13736 int err
, err2
= 0, irq_sync
= 0;
13738 if (netif_running(dev
)) {
13740 tg3_netif_stop(tp
);
13744 tg3_full_lock(tp
, irq_sync
);
13745 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13746 err
= tg3_nvram_lock(tp
);
13747 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13748 if (!tg3_flag(tp
, 5705_PLUS
))
13749 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13751 tg3_nvram_unlock(tp
);
13753 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13756 if (tg3_test_registers(tp
) != 0) {
13757 etest
->flags
|= ETH_TEST_FL_FAILED
;
13758 data
[TG3_REGISTER_TEST
] = 1;
13761 if (tg3_test_memory(tp
) != 0) {
13762 etest
->flags
|= ETH_TEST_FL_FAILED
;
13763 data
[TG3_MEMORY_TEST
] = 1;
13767 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13769 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13770 etest
->flags
|= ETH_TEST_FL_FAILED
;
13772 tg3_full_unlock(tp
);
13774 if (tg3_test_interrupt(tp
) != 0) {
13775 etest
->flags
|= ETH_TEST_FL_FAILED
;
13776 data
[TG3_INTERRUPT_TEST
] = 1;
13779 tg3_full_lock(tp
, 0);
13781 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13782 if (netif_running(dev
)) {
13783 tg3_flag_set(tp
, INIT_COMPLETE
);
13784 err2
= tg3_restart_hw(tp
, true);
13786 tg3_netif_start(tp
);
13789 tg3_full_unlock(tp
);
13791 if (irq_sync
&& !err2
)
13794 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13795 tg3_power_down_prepare(tp
);
13799 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13801 struct tg3
*tp
= netdev_priv(dev
);
13802 struct hwtstamp_config stmpconf
;
13804 if (!tg3_flag(tp
, PTP_CAPABLE
))
13805 return -EOPNOTSUPP
;
13807 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13810 if (stmpconf
.flags
)
13813 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13814 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13817 switch (stmpconf
.rx_filter
) {
13818 case HWTSTAMP_FILTER_NONE
:
13821 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13822 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13823 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13825 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13826 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13827 TG3_RX_PTP_CTL_SYNC_EVNT
;
13829 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13830 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13831 TG3_RX_PTP_CTL_DELAY_REQ
;
13833 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13834 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13835 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13837 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13838 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13839 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13841 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13842 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13843 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13845 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13846 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13847 TG3_RX_PTP_CTL_SYNC_EVNT
;
13849 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13850 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13851 TG3_RX_PTP_CTL_SYNC_EVNT
;
13853 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13854 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13855 TG3_RX_PTP_CTL_SYNC_EVNT
;
13857 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13858 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13859 TG3_RX_PTP_CTL_DELAY_REQ
;
13861 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13862 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13863 TG3_RX_PTP_CTL_DELAY_REQ
;
13865 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13866 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13867 TG3_RX_PTP_CTL_DELAY_REQ
;
13873 if (netif_running(dev
) && tp
->rxptpctl
)
13874 tw32(TG3_RX_PTP_CTL
,
13875 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13877 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13878 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13880 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13882 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13886 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13888 struct tg3
*tp
= netdev_priv(dev
);
13889 struct hwtstamp_config stmpconf
;
13891 if (!tg3_flag(tp
, PTP_CAPABLE
))
13892 return -EOPNOTSUPP
;
13894 stmpconf
.flags
= 0;
13895 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13896 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13898 switch (tp
->rxptpctl
) {
13900 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13902 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13903 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13905 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13906 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13908 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13909 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13911 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13912 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13914 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13915 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13917 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13918 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13920 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13921 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13923 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13924 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13926 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13927 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13929 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13930 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13932 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13933 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13935 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13936 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13943 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13947 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13949 struct mii_ioctl_data
*data
= if_mii(ifr
);
13950 struct tg3
*tp
= netdev_priv(dev
);
13953 if (tg3_flag(tp
, USE_PHYLIB
)) {
13954 struct phy_device
*phydev
;
13955 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13957 phydev
= mdiobus_get_phy(tp
->mdio_bus
, tp
->phy_addr
);
13958 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13963 data
->phy_id
= tp
->phy_addr
;
13966 case SIOCGMIIREG
: {
13969 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13970 break; /* We have no PHY */
13972 if (!netif_running(dev
))
13975 spin_lock_bh(&tp
->lock
);
13976 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13977 data
->reg_num
& 0x1f, &mii_regval
);
13978 spin_unlock_bh(&tp
->lock
);
13980 data
->val_out
= mii_regval
;
13986 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13987 break; /* We have no PHY */
13989 if (!netif_running(dev
))
13992 spin_lock_bh(&tp
->lock
);
13993 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13994 data
->reg_num
& 0x1f, data
->val_in
);
13995 spin_unlock_bh(&tp
->lock
);
13999 case SIOCSHWTSTAMP
:
14000 return tg3_hwtstamp_set(dev
, ifr
);
14002 case SIOCGHWTSTAMP
:
14003 return tg3_hwtstamp_get(dev
, ifr
);
14009 return -EOPNOTSUPP
;
14012 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14014 struct tg3
*tp
= netdev_priv(dev
);
14016 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
14020 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
14022 struct tg3
*tp
= netdev_priv(dev
);
14023 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
14024 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
14026 if (!tg3_flag(tp
, 5705_PLUS
)) {
14027 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
14028 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
14029 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
14030 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
14033 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
14034 (!ec
->rx_coalesce_usecs
) ||
14035 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
14036 (!ec
->tx_coalesce_usecs
) ||
14037 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
14038 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
14039 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
14040 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
14041 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
14042 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
14043 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
14044 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
14047 /* Only copy relevant parameters, ignore all others. */
14048 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
14049 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
14050 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
14051 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
14052 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
14053 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
14054 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
14055 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
14056 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
14058 if (netif_running(dev
)) {
14059 tg3_full_lock(tp
, 0);
14060 __tg3_set_coalesce(tp
, &tp
->coal
);
14061 tg3_full_unlock(tp
);
14066 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14068 struct tg3
*tp
= netdev_priv(dev
);
14070 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14071 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
14072 return -EOPNOTSUPP
;
14075 if (edata
->advertised
!= tp
->eee
.advertised
) {
14076 netdev_warn(tp
->dev
,
14077 "Direct manipulation of EEE advertisement is not supported\n");
14081 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
14082 netdev_warn(tp
->dev
,
14083 "Maximal Tx Lpi timer supported is %#x(u)\n",
14084 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
14090 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
14091 tg3_warn_mgmt_link_flap(tp
);
14093 if (netif_running(tp
->dev
)) {
14094 tg3_full_lock(tp
, 0);
14097 tg3_full_unlock(tp
);
14103 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
14105 struct tg3
*tp
= netdev_priv(dev
);
14107 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14108 netdev_warn(tp
->dev
,
14109 "Board does not support EEE!\n");
14110 return -EOPNOTSUPP
;
14117 static const struct ethtool_ops tg3_ethtool_ops
= {
14118 .get_drvinfo
= tg3_get_drvinfo
,
14119 .get_regs_len
= tg3_get_regs_len
,
14120 .get_regs
= tg3_get_regs
,
14121 .get_wol
= tg3_get_wol
,
14122 .set_wol
= tg3_set_wol
,
14123 .get_msglevel
= tg3_get_msglevel
,
14124 .set_msglevel
= tg3_set_msglevel
,
14125 .nway_reset
= tg3_nway_reset
,
14126 .get_link
= ethtool_op_get_link
,
14127 .get_eeprom_len
= tg3_get_eeprom_len
,
14128 .get_eeprom
= tg3_get_eeprom
,
14129 .set_eeprom
= tg3_set_eeprom
,
14130 .get_ringparam
= tg3_get_ringparam
,
14131 .set_ringparam
= tg3_set_ringparam
,
14132 .get_pauseparam
= tg3_get_pauseparam
,
14133 .set_pauseparam
= tg3_set_pauseparam
,
14134 .self_test
= tg3_self_test
,
14135 .get_strings
= tg3_get_strings
,
14136 .set_phys_id
= tg3_set_phys_id
,
14137 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14138 .get_coalesce
= tg3_get_coalesce
,
14139 .set_coalesce
= tg3_set_coalesce
,
14140 .get_sset_count
= tg3_get_sset_count
,
14141 .get_rxnfc
= tg3_get_rxnfc
,
14142 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14143 .get_rxfh
= tg3_get_rxfh
,
14144 .set_rxfh
= tg3_set_rxfh
,
14145 .get_channels
= tg3_get_channels
,
14146 .set_channels
= tg3_set_channels
,
14147 .get_ts_info
= tg3_get_ts_info
,
14148 .get_eee
= tg3_get_eee
,
14149 .set_eee
= tg3_set_eee
,
14150 .get_link_ksettings
= tg3_get_link_ksettings
,
14151 .set_link_ksettings
= tg3_set_link_ksettings
,
14154 static void tg3_get_stats64(struct net_device
*dev
,
14155 struct rtnl_link_stats64
*stats
)
14157 struct tg3
*tp
= netdev_priv(dev
);
14159 spin_lock_bh(&tp
->lock
);
14160 if (!tp
->hw_stats
) {
14161 *stats
= tp
->net_stats_prev
;
14162 spin_unlock_bh(&tp
->lock
);
14166 tg3_get_nstats(tp
, stats
);
14167 spin_unlock_bh(&tp
->lock
);
14170 static void tg3_set_rx_mode(struct net_device
*dev
)
14172 struct tg3
*tp
= netdev_priv(dev
);
14174 if (!netif_running(dev
))
14177 tg3_full_lock(tp
, 0);
14178 __tg3_set_rx_mode(dev
);
14179 tg3_full_unlock(tp
);
14182 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14185 dev
->mtu
= new_mtu
;
14187 if (new_mtu
> ETH_DATA_LEN
) {
14188 if (tg3_flag(tp
, 5780_CLASS
)) {
14189 netdev_update_features(dev
);
14190 tg3_flag_clear(tp
, TSO_CAPABLE
);
14192 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14195 if (tg3_flag(tp
, 5780_CLASS
)) {
14196 tg3_flag_set(tp
, TSO_CAPABLE
);
14197 netdev_update_features(dev
);
14199 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14203 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14205 struct tg3
*tp
= netdev_priv(dev
);
14207 bool reset_phy
= false;
14209 if (!netif_running(dev
)) {
14210 /* We'll just catch it later when the
14213 tg3_set_mtu(dev
, tp
, new_mtu
);
14219 tg3_netif_stop(tp
);
14221 tg3_set_mtu(dev
, tp
, new_mtu
);
14223 tg3_full_lock(tp
, 1);
14225 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14227 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14228 * breaks all requests to 256 bytes.
14230 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
14233 err
= tg3_restart_hw(tp
, reset_phy
);
14236 tg3_netif_start(tp
);
14238 tg3_full_unlock(tp
);
14246 static const struct net_device_ops tg3_netdev_ops
= {
14247 .ndo_open
= tg3_open
,
14248 .ndo_stop
= tg3_close
,
14249 .ndo_start_xmit
= tg3_start_xmit
,
14250 .ndo_get_stats64
= tg3_get_stats64
,
14251 .ndo_validate_addr
= eth_validate_addr
,
14252 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14253 .ndo_set_mac_address
= tg3_set_mac_addr
,
14254 .ndo_do_ioctl
= tg3_ioctl
,
14255 .ndo_tx_timeout
= tg3_tx_timeout
,
14256 .ndo_change_mtu
= tg3_change_mtu
,
14257 .ndo_fix_features
= tg3_fix_features
,
14258 .ndo_set_features
= tg3_set_features
,
14259 #ifdef CONFIG_NET_POLL_CONTROLLER
14260 .ndo_poll_controller
= tg3_poll_controller
,
14264 static void tg3_get_eeprom_size(struct tg3
*tp
)
14266 u32 cursize
, val
, magic
;
14268 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14270 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14273 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14274 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14275 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14279 * Size the chip by reading offsets at increasing powers of two.
14280 * When we encounter our validation signature, we know the addressing
14281 * has wrapped around, and thus have our chip size.
14285 while (cursize
< tp
->nvram_size
) {
14286 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14295 tp
->nvram_size
= cursize
;
14298 static void tg3_get_nvram_size(struct tg3
*tp
)
14302 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14305 /* Selfboot format */
14306 if (val
!= TG3_EEPROM_MAGIC
) {
14307 tg3_get_eeprom_size(tp
);
14311 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14313 /* This is confusing. We want to operate on the
14314 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14315 * call will read from NVRAM and byteswap the data
14316 * according to the byteswapping settings for all
14317 * other register accesses. This ensures the data we
14318 * want will always reside in the lower 16-bits.
14319 * However, the data in NVRAM is in LE format, which
14320 * means the data from the NVRAM read will always be
14321 * opposite the endianness of the CPU. The 16-bit
14322 * byteswap then brings the data to CPU endianness.
14324 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14328 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14331 static void tg3_get_nvram_info(struct tg3
*tp
)
14335 nvcfg1
= tr32(NVRAM_CFG1
);
14336 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14337 tg3_flag_set(tp
, FLASH
);
14339 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14340 tw32(NVRAM_CFG1
, nvcfg1
);
14343 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14344 tg3_flag(tp
, 5780_CLASS
)) {
14345 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14346 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14347 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14348 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14349 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14351 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14352 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14353 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14355 case FLASH_VENDOR_ATMEL_EEPROM
:
14356 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14357 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14358 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14360 case FLASH_VENDOR_ST
:
14361 tp
->nvram_jedecnum
= JEDEC_ST
;
14362 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14363 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14365 case FLASH_VENDOR_SAIFUN
:
14366 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14367 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14369 case FLASH_VENDOR_SST_SMALL
:
14370 case FLASH_VENDOR_SST_LARGE
:
14371 tp
->nvram_jedecnum
= JEDEC_SST
;
14372 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14376 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14377 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14378 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14382 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14384 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14385 case FLASH_5752PAGE_SIZE_256
:
14386 tp
->nvram_pagesize
= 256;
14388 case FLASH_5752PAGE_SIZE_512
:
14389 tp
->nvram_pagesize
= 512;
14391 case FLASH_5752PAGE_SIZE_1K
:
14392 tp
->nvram_pagesize
= 1024;
14394 case FLASH_5752PAGE_SIZE_2K
:
14395 tp
->nvram_pagesize
= 2048;
14397 case FLASH_5752PAGE_SIZE_4K
:
14398 tp
->nvram_pagesize
= 4096;
14400 case FLASH_5752PAGE_SIZE_264
:
14401 tp
->nvram_pagesize
= 264;
14403 case FLASH_5752PAGE_SIZE_528
:
14404 tp
->nvram_pagesize
= 528;
14409 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14413 nvcfg1
= tr32(NVRAM_CFG1
);
14415 /* NVRAM protection for TPM */
14416 if (nvcfg1
& (1 << 27))
14417 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14419 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14420 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14421 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14422 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14423 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14425 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14426 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14427 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14428 tg3_flag_set(tp
, FLASH
);
14430 case FLASH_5752VENDOR_ST_M45PE10
:
14431 case FLASH_5752VENDOR_ST_M45PE20
:
14432 case FLASH_5752VENDOR_ST_M45PE40
:
14433 tp
->nvram_jedecnum
= JEDEC_ST
;
14434 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14435 tg3_flag_set(tp
, FLASH
);
14439 if (tg3_flag(tp
, FLASH
)) {
14440 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14442 /* For eeprom, set pagesize to maximum eeprom size */
14443 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14445 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14446 tw32(NVRAM_CFG1
, nvcfg1
);
14450 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14452 u32 nvcfg1
, protect
= 0;
14454 nvcfg1
= tr32(NVRAM_CFG1
);
14456 /* NVRAM protection for TPM */
14457 if (nvcfg1
& (1 << 27)) {
14458 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14462 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14464 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14465 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14466 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14467 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14468 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14469 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14470 tg3_flag_set(tp
, FLASH
);
14471 tp
->nvram_pagesize
= 264;
14472 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14473 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14474 tp
->nvram_size
= (protect
? 0x3e200 :
14475 TG3_NVRAM_SIZE_512KB
);
14476 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14477 tp
->nvram_size
= (protect
? 0x1f200 :
14478 TG3_NVRAM_SIZE_256KB
);
14480 tp
->nvram_size
= (protect
? 0x1f200 :
14481 TG3_NVRAM_SIZE_128KB
);
14483 case FLASH_5752VENDOR_ST_M45PE10
:
14484 case FLASH_5752VENDOR_ST_M45PE20
:
14485 case FLASH_5752VENDOR_ST_M45PE40
:
14486 tp
->nvram_jedecnum
= JEDEC_ST
;
14487 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14488 tg3_flag_set(tp
, FLASH
);
14489 tp
->nvram_pagesize
= 256;
14490 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14491 tp
->nvram_size
= (protect
?
14492 TG3_NVRAM_SIZE_64KB
:
14493 TG3_NVRAM_SIZE_128KB
);
14494 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14495 tp
->nvram_size
= (protect
?
14496 TG3_NVRAM_SIZE_64KB
:
14497 TG3_NVRAM_SIZE_256KB
);
14499 tp
->nvram_size
= (protect
?
14500 TG3_NVRAM_SIZE_128KB
:
14501 TG3_NVRAM_SIZE_512KB
);
14506 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14510 nvcfg1
= tr32(NVRAM_CFG1
);
14512 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14513 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14514 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14515 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14516 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14517 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14518 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14519 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14521 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14522 tw32(NVRAM_CFG1
, nvcfg1
);
14524 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14525 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14526 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14527 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14528 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14529 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14530 tg3_flag_set(tp
, FLASH
);
14531 tp
->nvram_pagesize
= 264;
14533 case FLASH_5752VENDOR_ST_M45PE10
:
14534 case FLASH_5752VENDOR_ST_M45PE20
:
14535 case FLASH_5752VENDOR_ST_M45PE40
:
14536 tp
->nvram_jedecnum
= JEDEC_ST
;
14537 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14538 tg3_flag_set(tp
, FLASH
);
14539 tp
->nvram_pagesize
= 256;
14544 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14546 u32 nvcfg1
, protect
= 0;
14548 nvcfg1
= tr32(NVRAM_CFG1
);
14550 /* NVRAM protection for TPM */
14551 if (nvcfg1
& (1 << 27)) {
14552 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14556 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14558 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14559 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14560 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14561 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14562 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14563 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14564 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14565 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14566 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14567 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14568 tg3_flag_set(tp
, FLASH
);
14569 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14570 tp
->nvram_pagesize
= 256;
14572 case FLASH_5761VENDOR_ST_A_M45PE20
:
14573 case FLASH_5761VENDOR_ST_A_M45PE40
:
14574 case FLASH_5761VENDOR_ST_A_M45PE80
:
14575 case FLASH_5761VENDOR_ST_A_M45PE16
:
14576 case FLASH_5761VENDOR_ST_M_M45PE20
:
14577 case FLASH_5761VENDOR_ST_M_M45PE40
:
14578 case FLASH_5761VENDOR_ST_M_M45PE80
:
14579 case FLASH_5761VENDOR_ST_M_M45PE16
:
14580 tp
->nvram_jedecnum
= JEDEC_ST
;
14581 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14582 tg3_flag_set(tp
, FLASH
);
14583 tp
->nvram_pagesize
= 256;
14588 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14591 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14592 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14593 case FLASH_5761VENDOR_ST_A_M45PE16
:
14594 case FLASH_5761VENDOR_ST_M_M45PE16
:
14595 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14597 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14598 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14599 case FLASH_5761VENDOR_ST_A_M45PE80
:
14600 case FLASH_5761VENDOR_ST_M_M45PE80
:
14601 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14603 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14604 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14605 case FLASH_5761VENDOR_ST_A_M45PE40
:
14606 case FLASH_5761VENDOR_ST_M_M45PE40
:
14607 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14609 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14610 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14611 case FLASH_5761VENDOR_ST_A_M45PE20
:
14612 case FLASH_5761VENDOR_ST_M_M45PE20
:
14613 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14619 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14621 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14622 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14623 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14626 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14630 nvcfg1
= tr32(NVRAM_CFG1
);
14632 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14633 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14634 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14635 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14636 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14637 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14639 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14640 tw32(NVRAM_CFG1
, nvcfg1
);
14642 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14643 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14644 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14645 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14646 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14647 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14648 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14649 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14650 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14651 tg3_flag_set(tp
, FLASH
);
14653 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14654 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14655 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14656 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14657 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14659 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14661 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14663 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14664 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14665 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14669 case FLASH_5752VENDOR_ST_M45PE10
:
14670 case FLASH_5752VENDOR_ST_M45PE20
:
14671 case FLASH_5752VENDOR_ST_M45PE40
:
14672 tp
->nvram_jedecnum
= JEDEC_ST
;
14673 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14674 tg3_flag_set(tp
, FLASH
);
14676 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14677 case FLASH_5752VENDOR_ST_M45PE10
:
14678 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14680 case FLASH_5752VENDOR_ST_M45PE20
:
14681 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14683 case FLASH_5752VENDOR_ST_M45PE40
:
14684 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14689 tg3_flag_set(tp
, NO_NVRAM
);
14693 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14694 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14695 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14699 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14703 nvcfg1
= tr32(NVRAM_CFG1
);
14705 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14706 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14707 case FLASH_5717VENDOR_MICRO_EEPROM
:
14708 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14709 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14710 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14712 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14713 tw32(NVRAM_CFG1
, nvcfg1
);
14715 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14716 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14717 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14718 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14719 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14720 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14721 case FLASH_5717VENDOR_ATMEL_45USPT
:
14722 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14723 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14724 tg3_flag_set(tp
, FLASH
);
14726 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14727 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14728 /* Detect size with tg3_nvram_get_size() */
14730 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14731 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14732 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14735 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14739 case FLASH_5717VENDOR_ST_M_M25PE10
:
14740 case FLASH_5717VENDOR_ST_A_M25PE10
:
14741 case FLASH_5717VENDOR_ST_M_M45PE10
:
14742 case FLASH_5717VENDOR_ST_A_M45PE10
:
14743 case FLASH_5717VENDOR_ST_M_M25PE20
:
14744 case FLASH_5717VENDOR_ST_A_M25PE20
:
14745 case FLASH_5717VENDOR_ST_M_M45PE20
:
14746 case FLASH_5717VENDOR_ST_A_M45PE20
:
14747 case FLASH_5717VENDOR_ST_25USPT
:
14748 case FLASH_5717VENDOR_ST_45USPT
:
14749 tp
->nvram_jedecnum
= JEDEC_ST
;
14750 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14751 tg3_flag_set(tp
, FLASH
);
14753 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14754 case FLASH_5717VENDOR_ST_M_M25PE20
:
14755 case FLASH_5717VENDOR_ST_M_M45PE20
:
14756 /* Detect size with tg3_nvram_get_size() */
14758 case FLASH_5717VENDOR_ST_A_M25PE20
:
14759 case FLASH_5717VENDOR_ST_A_M45PE20
:
14760 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14763 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14768 tg3_flag_set(tp
, NO_NVRAM
);
14772 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14773 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14774 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14777 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14779 u32 nvcfg1
, nvmpinstrp
;
14781 nvcfg1
= tr32(NVRAM_CFG1
);
14782 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14784 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14785 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14786 tg3_flag_set(tp
, NO_NVRAM
);
14790 switch (nvmpinstrp
) {
14791 case FLASH_5762_EEPROM_HD
:
14792 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14794 case FLASH_5762_EEPROM_LD
:
14795 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14797 case FLASH_5720VENDOR_M_ST_M45PE20
:
14798 /* This pinstrap supports multiple sizes, so force it
14799 * to read the actual size from location 0xf0.
14801 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14806 switch (nvmpinstrp
) {
14807 case FLASH_5720_EEPROM_HD
:
14808 case FLASH_5720_EEPROM_LD
:
14809 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14810 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14812 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14813 tw32(NVRAM_CFG1
, nvcfg1
);
14814 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14815 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14817 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14819 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14820 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14821 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14822 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14823 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14824 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14825 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14826 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14827 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14828 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14829 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14830 case FLASH_5720VENDOR_ATMEL_45USPT
:
14831 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14832 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14833 tg3_flag_set(tp
, FLASH
);
14835 switch (nvmpinstrp
) {
14836 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14837 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14838 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14839 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14841 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14842 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14843 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14844 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14846 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14847 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14848 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14851 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14852 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14856 case FLASH_5720VENDOR_M_ST_M25PE10
:
14857 case FLASH_5720VENDOR_M_ST_M45PE10
:
14858 case FLASH_5720VENDOR_A_ST_M25PE10
:
14859 case FLASH_5720VENDOR_A_ST_M45PE10
:
14860 case FLASH_5720VENDOR_M_ST_M25PE20
:
14861 case FLASH_5720VENDOR_M_ST_M45PE20
:
14862 case FLASH_5720VENDOR_A_ST_M25PE20
:
14863 case FLASH_5720VENDOR_A_ST_M45PE20
:
14864 case FLASH_5720VENDOR_M_ST_M25PE40
:
14865 case FLASH_5720VENDOR_M_ST_M45PE40
:
14866 case FLASH_5720VENDOR_A_ST_M25PE40
:
14867 case FLASH_5720VENDOR_A_ST_M45PE40
:
14868 case FLASH_5720VENDOR_M_ST_M25PE80
:
14869 case FLASH_5720VENDOR_M_ST_M45PE80
:
14870 case FLASH_5720VENDOR_A_ST_M25PE80
:
14871 case FLASH_5720VENDOR_A_ST_M45PE80
:
14872 case FLASH_5720VENDOR_ST_25USPT
:
14873 case FLASH_5720VENDOR_ST_45USPT
:
14874 tp
->nvram_jedecnum
= JEDEC_ST
;
14875 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14876 tg3_flag_set(tp
, FLASH
);
14878 switch (nvmpinstrp
) {
14879 case FLASH_5720VENDOR_M_ST_M25PE20
:
14880 case FLASH_5720VENDOR_M_ST_M45PE20
:
14881 case FLASH_5720VENDOR_A_ST_M25PE20
:
14882 case FLASH_5720VENDOR_A_ST_M45PE20
:
14883 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14885 case FLASH_5720VENDOR_M_ST_M25PE40
:
14886 case FLASH_5720VENDOR_M_ST_M45PE40
:
14887 case FLASH_5720VENDOR_A_ST_M25PE40
:
14888 case FLASH_5720VENDOR_A_ST_M45PE40
:
14889 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14891 case FLASH_5720VENDOR_M_ST_M25PE80
:
14892 case FLASH_5720VENDOR_M_ST_M45PE80
:
14893 case FLASH_5720VENDOR_A_ST_M25PE80
:
14894 case FLASH_5720VENDOR_A_ST_M45PE80
:
14895 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14898 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14899 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14904 tg3_flag_set(tp
, NO_NVRAM
);
14908 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14909 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14910 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14912 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14915 if (tg3_nvram_read(tp
, 0, &val
))
14918 if (val
!= TG3_EEPROM_MAGIC
&&
14919 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14920 tg3_flag_set(tp
, NO_NVRAM
);
14924 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14925 static void tg3_nvram_init(struct tg3
*tp
)
14927 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14928 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14929 tg3_flag_clear(tp
, NVRAM
);
14930 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14931 tg3_flag_set(tp
, NO_NVRAM
);
14935 tw32_f(GRC_EEPROM_ADDR
,
14936 (EEPROM_ADDR_FSM_RESET
|
14937 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14938 EEPROM_ADDR_CLKPERD_SHIFT
)));
14942 /* Enable seeprom accesses. */
14943 tw32_f(GRC_LOCAL_CTRL
,
14944 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14947 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14948 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14949 tg3_flag_set(tp
, NVRAM
);
14951 if (tg3_nvram_lock(tp
)) {
14952 netdev_warn(tp
->dev
,
14953 "Cannot get nvram lock, %s failed\n",
14957 tg3_enable_nvram_access(tp
);
14959 tp
->nvram_size
= 0;
14961 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14962 tg3_get_5752_nvram_info(tp
);
14963 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14964 tg3_get_5755_nvram_info(tp
);
14965 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14966 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14967 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14968 tg3_get_5787_nvram_info(tp
);
14969 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14970 tg3_get_5761_nvram_info(tp
);
14971 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14972 tg3_get_5906_nvram_info(tp
);
14973 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14974 tg3_flag(tp
, 57765_CLASS
))
14975 tg3_get_57780_nvram_info(tp
);
14976 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14977 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14978 tg3_get_5717_nvram_info(tp
);
14979 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14980 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14981 tg3_get_5720_nvram_info(tp
);
14983 tg3_get_nvram_info(tp
);
14985 if (tp
->nvram_size
== 0)
14986 tg3_get_nvram_size(tp
);
14988 tg3_disable_nvram_access(tp
);
14989 tg3_nvram_unlock(tp
);
14992 tg3_flag_clear(tp
, NVRAM
);
14993 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14995 tg3_get_eeprom_size(tp
);
14999 struct subsys_tbl_ent
{
15000 u16 subsys_vendor
, subsys_devid
;
15004 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
15005 /* Broadcom boards. */
15006 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15007 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
15008 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15009 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
15010 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15011 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
15012 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15013 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
15014 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15015 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
15016 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15017 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
15018 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15019 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
15020 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15021 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
15022 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15023 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
15024 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15025 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
15026 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
15027 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
15030 { TG3PCI_SUBVENDOR_ID_3COM
,
15031 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
15032 { TG3PCI_SUBVENDOR_ID_3COM
,
15033 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
15034 { TG3PCI_SUBVENDOR_ID_3COM
,
15035 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
15036 { TG3PCI_SUBVENDOR_ID_3COM
,
15037 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
15038 { TG3PCI_SUBVENDOR_ID_3COM
,
15039 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
15042 { TG3PCI_SUBVENDOR_ID_DELL
,
15043 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
15044 { TG3PCI_SUBVENDOR_ID_DELL
,
15045 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
15046 { TG3PCI_SUBVENDOR_ID_DELL
,
15047 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
15048 { TG3PCI_SUBVENDOR_ID_DELL
,
15049 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
15051 /* Compaq boards. */
15052 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15053 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
15054 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15055 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
15056 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15057 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
15058 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15059 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
15060 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
15061 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
15064 { TG3PCI_SUBVENDOR_ID_IBM
,
15065 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
15068 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
15072 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
15073 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
15074 tp
->pdev
->subsystem_vendor
) &&
15075 (subsys_id_to_phy_id
[i
].subsys_devid
==
15076 tp
->pdev
->subsystem_device
))
15077 return &subsys_id_to_phy_id
[i
];
15082 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
15086 tp
->phy_id
= TG3_PHY_ID_INVALID
;
15087 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15089 /* Assume an onboard device and WOL capable by default. */
15090 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15091 tg3_flag_set(tp
, WOL_CAP
);
15093 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15094 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
15095 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15096 tg3_flag_set(tp
, IS_NIC
);
15098 val
= tr32(VCPU_CFGSHDW
);
15099 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
15100 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15101 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
15102 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
15103 tg3_flag_set(tp
, WOL_ENABLE
);
15104 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15109 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15110 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15111 u32 nic_cfg
, led_cfg
;
15112 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15113 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15114 int eeprom_phy_serdes
= 0;
15116 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15117 tp
->nic_sram_data_cfg
= nic_cfg
;
15119 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15120 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15121 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15122 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15123 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15124 (ver
> 0) && (ver
< 0x100))
15125 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15127 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15128 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15130 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15131 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15132 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15133 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15135 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15136 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15137 eeprom_phy_serdes
= 1;
15139 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15140 if (nic_phy_id
!= 0) {
15141 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15142 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15144 eeprom_phy_id
= (id1
>> 16) << 10;
15145 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15146 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15150 tp
->phy_id
= eeprom_phy_id
;
15151 if (eeprom_phy_serdes
) {
15152 if (!tg3_flag(tp
, 5705_PLUS
))
15153 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15155 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15158 if (tg3_flag(tp
, 5750_PLUS
))
15159 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15160 SHASTA_EXT_LED_MODE_MASK
);
15162 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15166 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15167 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15170 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15171 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15174 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15175 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15177 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15178 * read on some older 5700/5701 bootcode.
15180 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15181 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15182 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15186 case SHASTA_EXT_LED_SHARED
:
15187 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15188 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15189 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15190 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15191 LED_CTRL_MODE_PHY_2
);
15193 if (tg3_flag(tp
, 5717_PLUS
) ||
15194 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15195 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15196 LED_CTRL_BLINK_RATE_MASK
;
15200 case SHASTA_EXT_LED_MAC
:
15201 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15204 case SHASTA_EXT_LED_COMBO
:
15205 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15206 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15207 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15208 LED_CTRL_MODE_PHY_2
);
15213 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15214 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15215 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15216 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15218 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15219 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15221 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15222 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15223 if ((tp
->pdev
->subsystem_vendor
==
15224 PCI_VENDOR_ID_ARIMA
) &&
15225 (tp
->pdev
->subsystem_device
== 0x205a ||
15226 tp
->pdev
->subsystem_device
== 0x2063))
15227 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15229 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15230 tg3_flag_set(tp
, IS_NIC
);
15233 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15234 tg3_flag_set(tp
, ENABLE_ASF
);
15235 if (tg3_flag(tp
, 5750_PLUS
))
15236 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15239 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15240 tg3_flag(tp
, 5750_PLUS
))
15241 tg3_flag_set(tp
, ENABLE_APE
);
15243 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15244 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15245 tg3_flag_clear(tp
, WOL_CAP
);
15247 if (tg3_flag(tp
, WOL_CAP
) &&
15248 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15249 tg3_flag_set(tp
, WOL_ENABLE
);
15250 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15253 if (cfg2
& (1 << 17))
15254 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15256 /* serdes signal pre-emphasis in register 0x590 set by */
15257 /* bootcode if bit 18 is set */
15258 if (cfg2
& (1 << 18))
15259 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15261 if ((tg3_flag(tp
, 57765_PLUS
) ||
15262 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15263 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15264 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15265 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15267 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15270 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15271 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15272 !tg3_flag(tp
, 57765_PLUS
) &&
15273 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15274 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15275 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15276 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15277 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15278 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15281 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15282 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15283 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15284 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15285 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15286 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15288 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15289 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15292 if (tg3_flag(tp
, WOL_CAP
))
15293 device_set_wakeup_enable(&tp
->pdev
->dev
,
15294 tg3_flag(tp
, WOL_ENABLE
));
15296 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15299 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15302 u32 val2
, off
= offset
* 8;
15304 err
= tg3_nvram_lock(tp
);
15308 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15309 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15310 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15311 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15314 for (i
= 0; i
< 100; i
++) {
15315 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15316 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15317 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15323 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15325 tg3_nvram_unlock(tp
);
15326 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15332 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15337 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15338 tw32(OTP_CTRL
, cmd
);
15340 /* Wait for up to 1 ms for command to execute. */
15341 for (i
= 0; i
< 100; i
++) {
15342 val
= tr32(OTP_STATUS
);
15343 if (val
& OTP_STATUS_CMD_DONE
)
15348 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15351 /* Read the gphy configuration from the OTP region of the chip. The gphy
15352 * configuration is a 32-bit value that straddles the alignment boundary.
15353 * We do two 32-bit reads and then shift and merge the results.
15355 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15357 u32 bhalf_otp
, thalf_otp
;
15359 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15361 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15364 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15366 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15369 thalf_otp
= tr32(OTP_READ_DATA
);
15371 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15373 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15376 bhalf_otp
= tr32(OTP_READ_DATA
);
15378 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15381 static void tg3_phy_init_link_config(struct tg3
*tp
)
15383 u32 adv
= ADVERTISED_Autoneg
;
15385 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15386 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15387 adv
|= ADVERTISED_1000baseT_Half
;
15388 adv
|= ADVERTISED_1000baseT_Full
;
15391 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15392 adv
|= ADVERTISED_100baseT_Half
|
15393 ADVERTISED_100baseT_Full
|
15394 ADVERTISED_10baseT_Half
|
15395 ADVERTISED_10baseT_Full
|
15398 adv
|= ADVERTISED_FIBRE
;
15400 tp
->link_config
.advertising
= adv
;
15401 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15402 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15403 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15404 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15405 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15410 static int tg3_phy_probe(struct tg3
*tp
)
15412 u32 hw_phy_id_1
, hw_phy_id_2
;
15413 u32 hw_phy_id
, hw_phy_id_masked
;
15416 /* flow control autonegotiation is default behavior */
15417 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15418 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15420 if (tg3_flag(tp
, ENABLE_APE
)) {
15421 switch (tp
->pci_fn
) {
15423 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15426 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15429 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15432 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15437 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15438 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15439 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15440 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15441 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15443 if (tg3_flag(tp
, USE_PHYLIB
))
15444 return tg3_phy_init(tp
);
15446 /* Reading the PHY ID register can conflict with ASF
15447 * firmware access to the PHY hardware.
15450 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15451 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15453 /* Now read the physical PHY_ID from the chip and verify
15454 * that it is sane. If it doesn't look good, we fall back
15455 * to either the hard-coded table based PHY_ID and failing
15456 * that the value found in the eeprom area.
15458 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15459 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15461 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15462 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15463 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15465 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15468 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15469 tp
->phy_id
= hw_phy_id
;
15470 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15471 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15473 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15475 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15476 /* Do nothing, phy ID already set up in
15477 * tg3_get_eeprom_hw_cfg().
15480 struct subsys_tbl_ent
*p
;
15482 /* No eeprom signature? Try the hardcoded
15483 * subsys device table.
15485 p
= tg3_lookup_by_subsys(tp
);
15487 tp
->phy_id
= p
->phy_id
;
15488 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15489 /* For now we saw the IDs 0xbc050cd0,
15490 * 0xbc050f80 and 0xbc050c30 on devices
15491 * connected to an BCM4785 and there are
15492 * probably more. Just assume that the phy is
15493 * supported when it is connected to a SSB core
15500 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15501 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15505 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15506 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15507 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15508 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15509 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15510 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15511 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15512 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15513 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15514 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15516 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15517 SUPPORTED_1000baseT_Full
;
15518 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15519 ADVERTISED_1000baseT_Full
;
15520 tp
->eee
.eee_enabled
= 1;
15521 tp
->eee
.tx_lpi_enabled
= 1;
15522 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15525 tg3_phy_init_link_config(tp
);
15527 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15528 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15529 !tg3_flag(tp
, ENABLE_APE
) &&
15530 !tg3_flag(tp
, ENABLE_ASF
)) {
15533 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15534 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15535 (bmsr
& BMSR_LSTATUS
))
15536 goto skip_phy_reset
;
15538 err
= tg3_phy_reset(tp
);
15542 tg3_phy_set_wirespeed(tp
);
15544 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15545 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15546 tp
->link_config
.flowctrl
);
15548 tg3_writephy(tp
, MII_BMCR
,
15549 BMCR_ANENABLE
| BMCR_ANRESTART
);
15554 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15555 err
= tg3_init_5401phy_dsp(tp
);
15559 err
= tg3_init_5401phy_dsp(tp
);
15565 static void tg3_read_vpd(struct tg3
*tp
)
15568 unsigned int block_end
, rosize
, len
;
15572 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15576 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15578 goto out_not_found
;
15580 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15581 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15582 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15584 if (block_end
> vpdlen
)
15585 goto out_not_found
;
15587 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15588 PCI_VPD_RO_KEYWORD_MFR_ID
);
15590 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15592 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15593 if (j
+ len
> block_end
|| len
!= 4 ||
15594 memcmp(&vpd_data
[j
], "1028", 4))
15597 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15598 PCI_VPD_RO_KEYWORD_VENDOR0
);
15602 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15604 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15605 if (j
+ len
> block_end
)
15608 if (len
>= sizeof(tp
->fw_ver
))
15609 len
= sizeof(tp
->fw_ver
) - 1;
15610 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15611 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15616 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15617 PCI_VPD_RO_KEYWORD_PARTNO
);
15619 goto out_not_found
;
15621 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15623 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15624 if (len
> TG3_BPN_SIZE
||
15625 (len
+ i
) > vpdlen
)
15626 goto out_not_found
;
15628 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15632 if (tp
->board_part_number
[0])
15636 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15637 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15638 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15639 strcpy(tp
->board_part_number
, "BCM5717");
15640 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15641 strcpy(tp
->board_part_number
, "BCM5718");
15644 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15645 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15646 strcpy(tp
->board_part_number
, "BCM57780");
15647 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15648 strcpy(tp
->board_part_number
, "BCM57760");
15649 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15650 strcpy(tp
->board_part_number
, "BCM57790");
15651 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15652 strcpy(tp
->board_part_number
, "BCM57788");
15655 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15656 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15657 strcpy(tp
->board_part_number
, "BCM57761");
15658 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15659 strcpy(tp
->board_part_number
, "BCM57765");
15660 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15661 strcpy(tp
->board_part_number
, "BCM57781");
15662 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15663 strcpy(tp
->board_part_number
, "BCM57785");
15664 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15665 strcpy(tp
->board_part_number
, "BCM57791");
15666 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15667 strcpy(tp
->board_part_number
, "BCM57795");
15670 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15671 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15672 strcpy(tp
->board_part_number
, "BCM57762");
15673 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15674 strcpy(tp
->board_part_number
, "BCM57766");
15675 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15676 strcpy(tp
->board_part_number
, "BCM57782");
15677 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15678 strcpy(tp
->board_part_number
, "BCM57786");
15681 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15682 strcpy(tp
->board_part_number
, "BCM95906");
15685 strcpy(tp
->board_part_number
, "none");
15689 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15693 if (tg3_nvram_read(tp
, offset
, &val
) ||
15694 (val
& 0xfc000000) != 0x0c000000 ||
15695 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15702 static void tg3_read_bc_ver(struct tg3
*tp
)
15704 u32 val
, offset
, start
, ver_offset
;
15706 bool newver
= false;
15708 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15709 tg3_nvram_read(tp
, 0x4, &start
))
15712 offset
= tg3_nvram_logical_addr(tp
, offset
);
15714 if (tg3_nvram_read(tp
, offset
, &val
))
15717 if ((val
& 0xfc000000) == 0x0c000000) {
15718 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15725 dst_off
= strlen(tp
->fw_ver
);
15728 if (TG3_VER_SIZE
- dst_off
< 16 ||
15729 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15732 offset
= offset
+ ver_offset
- start
;
15733 for (i
= 0; i
< 16; i
+= 4) {
15735 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15738 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15743 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15746 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15747 TG3_NVM_BCVER_MAJSFT
;
15748 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15749 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15750 "v%d.%02d", major
, minor
);
15754 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15756 u32 val
, major
, minor
;
15758 /* Use native endian representation */
15759 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15762 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15763 TG3_NVM_HWSB_CFG1_MAJSFT
;
15764 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15765 TG3_NVM_HWSB_CFG1_MINSFT
;
15767 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15770 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15772 u32 offset
, major
, minor
, build
;
15774 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15776 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15779 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15780 case TG3_EEPROM_SB_REVISION_0
:
15781 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15783 case TG3_EEPROM_SB_REVISION_2
:
15784 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15786 case TG3_EEPROM_SB_REVISION_3
:
15787 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15789 case TG3_EEPROM_SB_REVISION_4
:
15790 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15792 case TG3_EEPROM_SB_REVISION_5
:
15793 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15795 case TG3_EEPROM_SB_REVISION_6
:
15796 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15802 if (tg3_nvram_read(tp
, offset
, &val
))
15805 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15806 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15807 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15808 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15809 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15811 if (minor
> 99 || build
> 26)
15814 offset
= strlen(tp
->fw_ver
);
15815 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15816 " v%d.%02d", major
, minor
);
15819 offset
= strlen(tp
->fw_ver
);
15820 if (offset
< TG3_VER_SIZE
- 1)
15821 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15825 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15827 u32 val
, offset
, start
;
15830 for (offset
= TG3_NVM_DIR_START
;
15831 offset
< TG3_NVM_DIR_END
;
15832 offset
+= TG3_NVM_DIRENT_SIZE
) {
15833 if (tg3_nvram_read(tp
, offset
, &val
))
15836 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15840 if (offset
== TG3_NVM_DIR_END
)
15843 if (!tg3_flag(tp
, 5705_PLUS
))
15844 start
= 0x08000000;
15845 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15848 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15849 !tg3_fw_img_is_valid(tp
, offset
) ||
15850 tg3_nvram_read(tp
, offset
+ 8, &val
))
15853 offset
+= val
- start
;
15855 vlen
= strlen(tp
->fw_ver
);
15857 tp
->fw_ver
[vlen
++] = ',';
15858 tp
->fw_ver
[vlen
++] = ' ';
15860 for (i
= 0; i
< 4; i
++) {
15862 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15865 offset
+= sizeof(v
);
15867 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15868 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15872 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15877 static void tg3_probe_ncsi(struct tg3
*tp
)
15881 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15882 if (apedata
!= APE_SEG_SIG_MAGIC
)
15885 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15886 if (!(apedata
& APE_FW_STATUS_READY
))
15889 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15890 tg3_flag_set(tp
, APE_HAS_NCSI
);
15893 static void tg3_read_dash_ver(struct tg3
*tp
)
15899 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15901 if (tg3_flag(tp
, APE_HAS_NCSI
))
15903 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15908 vlen
= strlen(tp
->fw_ver
);
15910 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15912 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15913 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15914 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15915 (apedata
& APE_FW_VERSION_BLDMSK
));
15918 static void tg3_read_otp_ver(struct tg3
*tp
)
15922 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15925 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15926 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15927 TG3_OTP_MAGIC0_VALID(val
)) {
15928 u64 val64
= (u64
) val
<< 32 | val2
;
15932 for (i
= 0; i
< 7; i
++) {
15933 if ((val64
& 0xff) == 0)
15935 ver
= val64
& 0xff;
15938 vlen
= strlen(tp
->fw_ver
);
15939 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15943 static void tg3_read_fw_ver(struct tg3
*tp
)
15946 bool vpd_vers
= false;
15948 if (tp
->fw_ver
[0] != 0)
15951 if (tg3_flag(tp
, NO_NVRAM
)) {
15952 strcat(tp
->fw_ver
, "sb");
15953 tg3_read_otp_ver(tp
);
15957 if (tg3_nvram_read(tp
, 0, &val
))
15960 if (val
== TG3_EEPROM_MAGIC
)
15961 tg3_read_bc_ver(tp
);
15962 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15963 tg3_read_sb_ver(tp
, val
);
15964 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15965 tg3_read_hwsb_ver(tp
);
15967 if (tg3_flag(tp
, ENABLE_ASF
)) {
15968 if (tg3_flag(tp
, ENABLE_APE
)) {
15969 tg3_probe_ncsi(tp
);
15971 tg3_read_dash_ver(tp
);
15972 } else if (!vpd_vers
) {
15973 tg3_read_mgmtfw_ver(tp
);
15977 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15980 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15982 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15983 return TG3_RX_RET_MAX_SIZE_5717
;
15984 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15985 return TG3_RX_RET_MAX_SIZE_5700
;
15987 return TG3_RX_RET_MAX_SIZE_5705
;
15990 static const struct pci_device_id tg3_write_reorder_chipsets
[] = {
15991 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15992 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15993 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15997 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15999 struct pci_dev
*peer
;
16000 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
16002 for (func
= 0; func
< 8; func
++) {
16003 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
16004 if (peer
&& peer
!= tp
->pdev
)
16008 /* 5704 can be configured in single-port mode, set peer to
16009 * tp->pdev in that case.
16017 * We don't need to keep the refcount elevated; there's no way
16018 * to remove one half of this device without removing the other
16025 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
16027 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
16028 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
16031 /* All devices that use the alternate
16032 * ASIC REV location have a CPMU.
16034 tg3_flag_set(tp
, CPMU_PRESENT
);
16036 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16037 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16038 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16039 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16040 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16041 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
16042 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
16043 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16044 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16045 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
16046 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
16047 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
16048 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
16049 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
16050 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
16051 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
16052 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
16053 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
16054 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
16055 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
16056 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
16057 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
16058 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
16060 reg
= TG3PCI_PRODID_ASICREV
;
16062 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
16065 /* Wrong chip ID in 5752 A0. This code can be removed later
16066 * as A0 is not in production.
16068 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
16069 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
16071 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
16072 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
16074 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16075 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16076 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16077 tg3_flag_set(tp
, 5717_PLUS
);
16079 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
16080 tg3_asic_rev(tp
) == ASIC_REV_57766
)
16081 tg3_flag_set(tp
, 57765_CLASS
);
16083 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
16084 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16085 tg3_flag_set(tp
, 57765_PLUS
);
16087 /* Intentionally exclude ASIC_REV_5906 */
16088 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16089 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16090 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16091 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16092 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16093 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16094 tg3_flag(tp
, 57765_PLUS
))
16095 tg3_flag_set(tp
, 5755_PLUS
);
16097 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
16098 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16099 tg3_flag_set(tp
, 5780_CLASS
);
16101 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16102 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16103 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16104 tg3_flag(tp
, 5755_PLUS
) ||
16105 tg3_flag(tp
, 5780_CLASS
))
16106 tg3_flag_set(tp
, 5750_PLUS
);
16108 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16109 tg3_flag(tp
, 5750_PLUS
))
16110 tg3_flag_set(tp
, 5705_PLUS
);
16113 static bool tg3_10_100_only_device(struct tg3
*tp
,
16114 const struct pci_device_id
*ent
)
16116 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16118 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16119 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16120 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16123 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16124 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16125 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16135 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16138 u32 pci_state_reg
, grc_misc_cfg
;
16143 /* Force memory write invalidate off. If we leave it on,
16144 * then on 5700_BX chips we have to enable a workaround.
16145 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16146 * to match the cacheline size. The Broadcom driver have this
16147 * workaround but turns MWI off all the times so never uses
16148 * it. This seems to suggest that the workaround is insufficient.
16150 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16151 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16152 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16154 /* Important! -- Make sure register accesses are byteswapped
16155 * correctly. Also, for those chips that require it, make
16156 * sure that indirect register accesses are enabled before
16157 * the first operation.
16159 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16161 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16162 MISC_HOST_CTRL_CHIPREV
);
16163 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16164 tp
->misc_host_ctrl
);
16166 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16168 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16169 * we need to disable memory and use config. cycles
16170 * only to access all registers. The 5702/03 chips
16171 * can mistakenly decode the special cycles from the
16172 * ICH chipsets as memory write cycles, causing corruption
16173 * of register and memory space. Only certain ICH bridges
16174 * will drive special cycles with non-zero data during the
16175 * address phase which can fall within the 5703's address
16176 * range. This is not an ICH bug as the PCI spec allows
16177 * non-zero address during special cycles. However, only
16178 * these ICH bridges are known to drive non-zero addresses
16179 * during special cycles.
16181 * Since special cycles do not cross PCI bridges, we only
16182 * enable this workaround if the 5703 is on the secondary
16183 * bus of these ICH bridges.
16185 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16186 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16187 static struct tg3_dev_id
{
16191 } ich_chipsets
[] = {
16192 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16194 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16196 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16198 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16202 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16203 struct pci_dev
*bridge
= NULL
;
16205 while (pci_id
->vendor
!= 0) {
16206 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16212 if (pci_id
->rev
!= PCI_ANY_ID
) {
16213 if (bridge
->revision
> pci_id
->rev
)
16216 if (bridge
->subordinate
&&
16217 (bridge
->subordinate
->number
==
16218 tp
->pdev
->bus
->number
)) {
16219 tg3_flag_set(tp
, ICH_WORKAROUND
);
16220 pci_dev_put(bridge
);
16226 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16227 static struct tg3_dev_id
{
16230 } bridge_chipsets
[] = {
16231 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16232 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16235 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16236 struct pci_dev
*bridge
= NULL
;
16238 while (pci_id
->vendor
!= 0) {
16239 bridge
= pci_get_device(pci_id
->vendor
,
16246 if (bridge
->subordinate
&&
16247 (bridge
->subordinate
->number
<=
16248 tp
->pdev
->bus
->number
) &&
16249 (bridge
->subordinate
->busn_res
.end
>=
16250 tp
->pdev
->bus
->number
)) {
16251 tg3_flag_set(tp
, 5701_DMA_BUG
);
16252 pci_dev_put(bridge
);
16258 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16259 * DMA addresses > 40-bit. This bridge may have other additional
16260 * 57xx devices behind it in some 4-port NIC designs for example.
16261 * Any tg3 device found behind the bridge will also need the 40-bit
16264 if (tg3_flag(tp
, 5780_CLASS
)) {
16265 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16266 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16268 struct pci_dev
*bridge
= NULL
;
16271 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16272 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16274 if (bridge
&& bridge
->subordinate
&&
16275 (bridge
->subordinate
->number
<=
16276 tp
->pdev
->bus
->number
) &&
16277 (bridge
->subordinate
->busn_res
.end
>=
16278 tp
->pdev
->bus
->number
)) {
16279 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16280 pci_dev_put(bridge
);
16286 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16287 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16288 tp
->pdev_peer
= tg3_find_peer(tp
);
16290 /* Determine TSO capabilities */
16291 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16292 ; /* Do nothing. HW bug. */
16293 else if (tg3_flag(tp
, 57765_PLUS
))
16294 tg3_flag_set(tp
, HW_TSO_3
);
16295 else if (tg3_flag(tp
, 5755_PLUS
) ||
16296 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16297 tg3_flag_set(tp
, HW_TSO_2
);
16298 else if (tg3_flag(tp
, 5750_PLUS
)) {
16299 tg3_flag_set(tp
, HW_TSO_1
);
16300 tg3_flag_set(tp
, TSO_BUG
);
16301 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16302 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16303 tg3_flag_clear(tp
, TSO_BUG
);
16304 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16305 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16306 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16307 tg3_flag_set(tp
, FW_TSO
);
16308 tg3_flag_set(tp
, TSO_BUG
);
16309 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16310 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16312 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16315 /* Selectively allow TSO based on operating conditions */
16316 if (tg3_flag(tp
, HW_TSO_1
) ||
16317 tg3_flag(tp
, HW_TSO_2
) ||
16318 tg3_flag(tp
, HW_TSO_3
) ||
16319 tg3_flag(tp
, FW_TSO
)) {
16320 /* For firmware TSO, assume ASF is disabled.
16321 * We'll disable TSO later if we discover ASF
16322 * is enabled in tg3_get_eeprom_hw_cfg().
16324 tg3_flag_set(tp
, TSO_CAPABLE
);
16326 tg3_flag_clear(tp
, TSO_CAPABLE
);
16327 tg3_flag_clear(tp
, TSO_BUG
);
16328 tp
->fw_needed
= NULL
;
16331 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16332 tp
->fw_needed
= FIRMWARE_TG3
;
16334 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16335 tp
->fw_needed
= FIRMWARE_TG357766
;
16339 if (tg3_flag(tp
, 5750_PLUS
)) {
16340 tg3_flag_set(tp
, SUPPORT_MSI
);
16341 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16342 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16343 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16344 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16345 tp
->pdev_peer
== tp
->pdev
))
16346 tg3_flag_clear(tp
, SUPPORT_MSI
);
16348 if (tg3_flag(tp
, 5755_PLUS
) ||
16349 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16350 tg3_flag_set(tp
, 1SHOT_MSI
);
16353 if (tg3_flag(tp
, 57765_PLUS
)) {
16354 tg3_flag_set(tp
, SUPPORT_MSIX
);
16355 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16361 if (tp
->irq_max
> 1) {
16362 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16363 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16365 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16366 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16367 tp
->txq_max
= tp
->irq_max
- 1;
16370 if (tg3_flag(tp
, 5755_PLUS
) ||
16371 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16372 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16374 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16375 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16377 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16378 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16379 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16380 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16381 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16383 if (tg3_flag(tp
, 57765_PLUS
) &&
16384 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16385 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16387 if (!tg3_flag(tp
, 5705_PLUS
) ||
16388 tg3_flag(tp
, 5780_CLASS
) ||
16389 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16390 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16392 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16395 if (pci_is_pcie(tp
->pdev
)) {
16398 tg3_flag_set(tp
, PCI_EXPRESS
);
16400 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16401 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16402 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16403 tg3_flag_clear(tp
, HW_TSO_2
);
16404 tg3_flag_clear(tp
, TSO_CAPABLE
);
16406 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16407 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16408 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16409 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16410 tg3_flag_set(tp
, CLKREQ_BUG
);
16411 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16412 tg3_flag_set(tp
, L1PLLPD_EN
);
16414 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16415 /* BCM5785 devices are effectively PCIe devices, and should
16416 * follow PCIe codepaths, but do not have a PCIe capabilities
16419 tg3_flag_set(tp
, PCI_EXPRESS
);
16420 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16421 tg3_flag(tp
, 5780_CLASS
)) {
16422 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16423 if (!tp
->pcix_cap
) {
16424 dev_err(&tp
->pdev
->dev
,
16425 "Cannot find PCI-X capability, aborting\n");
16429 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16430 tg3_flag_set(tp
, PCIX_MODE
);
16433 /* If we have an AMD 762 or VIA K8T800 chipset, write
16434 * reordering to the mailbox registers done by the host
16435 * controller can cause major troubles. We read back from
16436 * every mailbox register write to force the writes to be
16437 * posted to the chip in order.
16439 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16440 !tg3_flag(tp
, PCI_EXPRESS
))
16441 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16443 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16444 &tp
->pci_cacheline_sz
);
16445 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16446 &tp
->pci_lat_timer
);
16447 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16448 tp
->pci_lat_timer
< 64) {
16449 tp
->pci_lat_timer
= 64;
16450 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16451 tp
->pci_lat_timer
);
16454 /* Important! -- It is critical that the PCI-X hw workaround
16455 * situation is decided before the first MMIO register access.
16457 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16458 /* 5700 BX chips need to have their TX producer index
16459 * mailboxes written twice to workaround a bug.
16461 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16463 /* If we are in PCI-X mode, enable register write workaround.
16465 * The workaround is to use indirect register accesses
16466 * for all chip writes not to mailbox registers.
16468 if (tg3_flag(tp
, PCIX_MODE
)) {
16471 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16473 /* The chip can have it's power management PCI config
16474 * space registers clobbered due to this bug.
16475 * So explicitly force the chip into D0 here.
16477 pci_read_config_dword(tp
->pdev
,
16478 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16480 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16481 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16482 pci_write_config_dword(tp
->pdev
,
16483 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16486 /* Also, force SERR#/PERR# in PCI command. */
16487 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16488 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16489 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16493 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16494 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16495 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16496 tg3_flag_set(tp
, PCI_32BIT
);
16498 /* Chip-specific fixup from Broadcom driver */
16499 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16500 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16501 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16502 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16505 /* Default fast path register access methods */
16506 tp
->read32
= tg3_read32
;
16507 tp
->write32
= tg3_write32
;
16508 tp
->read32_mbox
= tg3_read32
;
16509 tp
->write32_mbox
= tg3_write32
;
16510 tp
->write32_tx_mbox
= tg3_write32
;
16511 tp
->write32_rx_mbox
= tg3_write32
;
16513 /* Various workaround register access methods */
16514 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16515 tp
->write32
= tg3_write_indirect_reg32
;
16516 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16517 (tg3_flag(tp
, PCI_EXPRESS
) &&
16518 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16520 * Back to back register writes can cause problems on these
16521 * chips, the workaround is to read back all reg writes
16522 * except those to mailbox regs.
16524 * See tg3_write_indirect_reg32().
16526 tp
->write32
= tg3_write_flush_reg32
;
16529 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16530 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16531 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16532 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16535 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16536 tp
->read32
= tg3_read_indirect_reg32
;
16537 tp
->write32
= tg3_write_indirect_reg32
;
16538 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16539 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16540 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16541 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16546 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16547 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16548 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16550 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16551 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16552 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16553 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16554 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16557 if (tp
->write32
== tg3_write_indirect_reg32
||
16558 (tg3_flag(tp
, PCIX_MODE
) &&
16559 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16560 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16561 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16563 /* The memory arbiter has to be enabled in order for SRAM accesses
16564 * to succeed. Normally on powerup the tg3 chip firmware will make
16565 * sure it is enabled, but other entities such as system netboot
16566 * code might disable it.
16568 val
= tr32(MEMARB_MODE
);
16569 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16571 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16572 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16573 tg3_flag(tp
, 5780_CLASS
)) {
16574 if (tg3_flag(tp
, PCIX_MODE
)) {
16575 pci_read_config_dword(tp
->pdev
,
16576 tp
->pcix_cap
+ PCI_X_STATUS
,
16578 tp
->pci_fn
= val
& 0x7;
16580 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16581 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16582 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16583 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16584 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16585 val
= tr32(TG3_CPMU_STATUS
);
16587 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16588 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16590 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16591 TG3_CPMU_STATUS_FSHFT_5719
;
16594 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16595 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16596 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16599 /* Get eeprom hw config before calling tg3_set_power_state().
16600 * In particular, the TG3_FLAG_IS_NIC flag must be
16601 * determined before calling tg3_set_power_state() so that
16602 * we know whether or not to switch out of Vaux power.
16603 * When the flag is set, it means that GPIO1 is used for eeprom
16604 * write protect and also implies that it is a LOM where GPIOs
16605 * are not used to switch power.
16607 tg3_get_eeprom_hw_cfg(tp
);
16609 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16610 tg3_flag_clear(tp
, TSO_CAPABLE
);
16611 tg3_flag_clear(tp
, TSO_BUG
);
16612 tp
->fw_needed
= NULL
;
16615 if (tg3_flag(tp
, ENABLE_APE
)) {
16616 /* Allow reads and writes to the
16617 * APE register and memory space.
16619 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16620 PCISTATE_ALLOW_APE_SHMEM_WR
|
16621 PCISTATE_ALLOW_APE_PSPACE_WR
;
16622 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16625 tg3_ape_lock_init(tp
);
16628 /* Set up tp->grc_local_ctrl before calling
16629 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16630 * will bring 5700's external PHY out of reset.
16631 * It is also used as eeprom write protect on LOMs.
16633 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16634 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16635 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16636 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16637 GRC_LCLCTRL_GPIO_OUTPUT1
);
16638 /* Unused GPIO3 must be driven as output on 5752 because there
16639 * are no pull-up resistors on unused GPIO pins.
16641 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16642 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16644 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16645 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16646 tg3_flag(tp
, 57765_CLASS
))
16647 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16649 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16650 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16651 /* Turn off the debug UART. */
16652 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16653 if (tg3_flag(tp
, IS_NIC
))
16654 /* Keep VMain power. */
16655 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16656 GRC_LCLCTRL_GPIO_OUTPUT0
;
16659 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16660 tp
->grc_local_ctrl
|=
16661 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16663 /* Switch out of Vaux if it is a NIC */
16664 tg3_pwrsrc_switch_to_vmain(tp
);
16666 /* Derive initial jumbo mode from MTU assigned in
16667 * ether_setup() via the alloc_etherdev() call
16669 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16670 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16672 /* Determine WakeOnLan speed to use. */
16673 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16674 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16675 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16676 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16677 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16679 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16682 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16683 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16685 /* A few boards don't want Ethernet@WireSpeed phy feature */
16686 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16687 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16688 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16689 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16690 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16691 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16692 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16694 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16695 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16696 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16697 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16698 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16700 if (tg3_flag(tp
, 5705_PLUS
) &&
16701 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16702 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16703 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16704 !tg3_flag(tp
, 57765_PLUS
)) {
16705 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16706 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16707 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16708 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16709 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16710 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16711 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16712 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16713 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16715 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16718 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16719 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16720 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16721 if (tp
->phy_otp
== 0)
16722 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16725 if (tg3_flag(tp
, CPMU_PRESENT
))
16726 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16728 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16730 tp
->coalesce_mode
= 0;
16731 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16732 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16733 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16735 /* Set these bits to enable statistics workaround. */
16736 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16737 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16738 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16739 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16740 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16741 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16744 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16745 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16746 tg3_flag_set(tp
, USE_PHYLIB
);
16748 err
= tg3_mdio_init(tp
);
16752 /* Initialize data/descriptor byte/word swapping. */
16753 val
= tr32(GRC_MODE
);
16754 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16755 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16756 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16757 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16758 GRC_MODE_B2HRX_ENABLE
|
16759 GRC_MODE_HTX2B_ENABLE
|
16760 GRC_MODE_HOST_STACKUP
);
16762 val
&= GRC_MODE_HOST_STACKUP
;
16764 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16766 tg3_switch_clocks(tp
);
16768 /* Clear this out for sanity. */
16769 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16771 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16772 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16774 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16776 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16777 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16778 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16779 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16780 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16781 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16782 void __iomem
*sram_base
;
16784 /* Write some dummy words into the SRAM status block
16785 * area, see if it reads back correctly. If the return
16786 * value is bad, force enable the PCIX workaround.
16788 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16790 writel(0x00000000, sram_base
);
16791 writel(0x00000000, sram_base
+ 4);
16792 writel(0xffffffff, sram_base
+ 4);
16793 if (readl(sram_base
) != 0x00000000)
16794 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16799 tg3_nvram_init(tp
);
16801 /* If the device has an NVRAM, no need to load patch firmware */
16802 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16803 !tg3_flag(tp
, NO_NVRAM
))
16804 tp
->fw_needed
= NULL
;
16806 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16807 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16809 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16810 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16811 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16812 tg3_flag_set(tp
, IS_5788
);
16814 if (!tg3_flag(tp
, IS_5788
) &&
16815 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16816 tg3_flag_set(tp
, TAGGED_STATUS
);
16817 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16818 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16819 HOSTCC_MODE_CLRTICK_TXBD
);
16821 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16822 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16823 tp
->misc_host_ctrl
);
16826 /* Preserve the APE MAC_MODE bits */
16827 if (tg3_flag(tp
, ENABLE_APE
))
16828 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16832 if (tg3_10_100_only_device(tp
, ent
))
16833 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16835 err
= tg3_phy_probe(tp
);
16837 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16838 /* ... but do not return immediately ... */
16843 tg3_read_fw_ver(tp
);
16845 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16846 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16848 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16849 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16851 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16854 /* 5700 {AX,BX} chips have a broken status block link
16855 * change bit implementation, so we must use the
16856 * status register in those cases.
16858 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16859 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16861 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16863 /* The led_ctrl is set during tg3_phy_probe, here we might
16864 * have to force the link status polling mechanism based
16865 * upon subsystem IDs.
16867 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16868 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16869 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16870 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16871 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16874 /* For all SERDES we poll the MAC status register. */
16875 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16876 tg3_flag_set(tp
, POLL_SERDES
);
16878 tg3_flag_clear(tp
, POLL_SERDES
);
16880 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16881 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16883 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16884 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16885 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16886 tg3_flag(tp
, PCIX_MODE
)) {
16887 tp
->rx_offset
= NET_SKB_PAD
;
16888 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16889 tp
->rx_copy_thresh
= ~(u16
)0;
16893 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16894 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16895 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16897 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16899 /* Increment the rx prod index on the rx std ring by at most
16900 * 8 for these chips to workaround hw errata.
16902 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16903 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16904 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16905 tp
->rx_std_max_post
= 8;
16907 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16908 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16909 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16914 #ifdef CONFIG_SPARC
16915 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16917 struct net_device
*dev
= tp
->dev
;
16918 struct pci_dev
*pdev
= tp
->pdev
;
16919 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16920 const unsigned char *addr
;
16923 addr
= of_get_property(dp
, "local-mac-address", &len
);
16924 if (addr
&& len
== ETH_ALEN
) {
16925 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
16931 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16933 struct net_device
*dev
= tp
->dev
;
16935 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, ETH_ALEN
);
16940 static int tg3_get_device_address(struct tg3
*tp
)
16942 struct net_device
*dev
= tp
->dev
;
16943 u32 hi
, lo
, mac_offset
;
16947 #ifdef CONFIG_SPARC
16948 if (!tg3_get_macaddr_sparc(tp
))
16952 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16953 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16954 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16959 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16960 tg3_flag(tp
, 5780_CLASS
)) {
16961 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16963 if (tg3_nvram_lock(tp
))
16964 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16966 tg3_nvram_unlock(tp
);
16967 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16968 if (tp
->pci_fn
& 1)
16970 if (tp
->pci_fn
> 1)
16971 mac_offset
+= 0x18c;
16972 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16975 /* First try to get it from MAC address mailbox. */
16976 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16977 if ((hi
>> 16) == 0x484b) {
16978 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16979 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16981 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16982 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16983 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16984 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16985 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16987 /* Some old bootcode may report a 0 MAC address in SRAM */
16988 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16991 /* Next, try NVRAM. */
16992 if (!tg3_flag(tp
, NO_NVRAM
) &&
16993 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16994 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16995 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16996 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16998 /* Finally just fetch it out of the MAC control regs. */
17000 hi
= tr32(MAC_ADDR_0_HIGH
);
17001 lo
= tr32(MAC_ADDR_0_LOW
);
17003 dev
->dev_addr
[5] = lo
& 0xff;
17004 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
17005 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
17006 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
17007 dev
->dev_addr
[1] = hi
& 0xff;
17008 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
17012 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
17013 #ifdef CONFIG_SPARC
17014 if (!tg3_get_default_macaddr_sparc(tp
))
17022 #define BOUNDARY_SINGLE_CACHELINE 1
17023 #define BOUNDARY_MULTI_CACHELINE 2
17025 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
17027 int cacheline_size
;
17031 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
17033 cacheline_size
= 1024;
17035 cacheline_size
= (int) byte
* 4;
17037 /* On 5703 and later chips, the boundary bits have no
17040 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17041 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
17042 !tg3_flag(tp
, PCI_EXPRESS
))
17045 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17046 goal
= BOUNDARY_MULTI_CACHELINE
;
17048 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17049 goal
= BOUNDARY_SINGLE_CACHELINE
;
17055 if (tg3_flag(tp
, 57765_PLUS
)) {
17056 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
17063 /* PCI controllers on most RISC systems tend to disconnect
17064 * when a device tries to burst across a cache-line boundary.
17065 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17067 * Unfortunately, for PCI-E there are only limited
17068 * write-side controls for this, and thus for reads
17069 * we will still get the disconnects. We'll also waste
17070 * these PCI cycles for both read and write for chips
17071 * other than 5700 and 5701 which do not implement the
17074 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
17075 switch (cacheline_size
) {
17080 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17081 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
17082 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
17084 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17085 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17090 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
17091 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
17095 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
17096 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
17099 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
17100 switch (cacheline_size
) {
17104 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17105 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17106 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17112 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17113 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17117 switch (cacheline_size
) {
17119 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17120 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17121 DMA_RWCTRL_WRITE_BNDRY_16
);
17126 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17127 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17128 DMA_RWCTRL_WRITE_BNDRY_32
);
17133 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17134 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17135 DMA_RWCTRL_WRITE_BNDRY_64
);
17140 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17141 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17142 DMA_RWCTRL_WRITE_BNDRY_128
);
17147 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17148 DMA_RWCTRL_WRITE_BNDRY_256
);
17151 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17152 DMA_RWCTRL_WRITE_BNDRY_512
);
17156 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17157 DMA_RWCTRL_WRITE_BNDRY_1024
);
17166 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17167 int size
, bool to_device
)
17169 struct tg3_internal_buffer_desc test_desc
;
17170 u32 sram_dma_descs
;
17173 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17175 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17176 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17177 tw32(RDMAC_STATUS
, 0);
17178 tw32(WDMAC_STATUS
, 0);
17180 tw32(BUFMGR_MODE
, 0);
17181 tw32(FTQ_RESET
, 0);
17183 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17184 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17185 test_desc
.nic_mbuf
= 0x00002100;
17186 test_desc
.len
= size
;
17189 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17190 * the *second* time the tg3 driver was getting loaded after an
17193 * Broadcom tells me:
17194 * ...the DMA engine is connected to the GRC block and a DMA
17195 * reset may affect the GRC block in some unpredictable way...
17196 * The behavior of resets to individual blocks has not been tested.
17198 * Broadcom noted the GRC reset will also reset all sub-components.
17201 test_desc
.cqid_sqid
= (13 << 8) | 2;
17203 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17206 test_desc
.cqid_sqid
= (16 << 8) | 7;
17208 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17211 test_desc
.flags
= 0x00000005;
17213 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17216 val
= *(((u32
*)&test_desc
) + i
);
17217 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17218 sram_dma_descs
+ (i
* sizeof(u32
)));
17219 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17221 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17224 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17226 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17229 for (i
= 0; i
< 40; i
++) {
17233 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17235 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17236 if ((val
& 0xffff) == sram_dma_descs
) {
17247 #define TEST_BUFFER_SIZE 0x2000
17249 static const struct pci_device_id tg3_dma_wait_state_chipsets
[] = {
17250 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17254 static int tg3_test_dma(struct tg3
*tp
)
17256 dma_addr_t buf_dma
;
17257 u32
*buf
, saved_dma_rwctrl
;
17260 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17261 &buf_dma
, GFP_KERNEL
);
17267 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17268 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17270 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17272 if (tg3_flag(tp
, 57765_PLUS
))
17275 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17276 /* DMA read watermark not used on PCIE */
17277 tp
->dma_rwctrl
|= 0x00180000;
17278 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17279 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17280 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17281 tp
->dma_rwctrl
|= 0x003f0000;
17283 tp
->dma_rwctrl
|= 0x003f000f;
17285 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17286 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17287 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17288 u32 read_water
= 0x7;
17290 /* If the 5704 is behind the EPB bridge, we can
17291 * do the less restrictive ONE_DMA workaround for
17292 * better performance.
17294 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17295 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17296 tp
->dma_rwctrl
|= 0x8000;
17297 else if (ccval
== 0x6 || ccval
== 0x7)
17298 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17300 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17302 /* Set bit 23 to enable PCIX hw bug fix */
17304 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17305 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17307 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17308 /* 5780 always in PCIX mode */
17309 tp
->dma_rwctrl
|= 0x00144000;
17310 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17311 /* 5714 always in PCIX mode */
17312 tp
->dma_rwctrl
|= 0x00148000;
17314 tp
->dma_rwctrl
|= 0x001b000f;
17317 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17318 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17320 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17321 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17322 tp
->dma_rwctrl
&= 0xfffffff0;
17324 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17325 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17326 /* Remove this if it causes problems for some boards. */
17327 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17329 /* On 5700/5701 chips, we need to set this bit.
17330 * Otherwise the chip will issue cacheline transactions
17331 * to streamable DMA memory with not all the byte
17332 * enables turned on. This is an error on several
17333 * RISC PCI controllers, in particular sparc64.
17335 * On 5703/5704 chips, this bit has been reassigned
17336 * a different meaning. In particular, it is used
17337 * on those chips to enable a PCI-X workaround.
17339 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17342 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17345 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17346 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17349 /* It is best to perform DMA test with maximum write burst size
17350 * to expose the 5700/5701 write DMA bug.
17352 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17353 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17354 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17359 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17362 /* Send the buffer to the chip. */
17363 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17365 dev_err(&tp
->pdev
->dev
,
17366 "%s: Buffer write failed. err = %d\n",
17371 /* Now read it back. */
17372 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17374 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17375 "err = %d\n", __func__
, ret
);
17380 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17384 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17385 DMA_RWCTRL_WRITE_BNDRY_16
) {
17386 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17387 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17388 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17391 dev_err(&tp
->pdev
->dev
,
17392 "%s: Buffer corrupted on read back! "
17393 "(%d != %d)\n", __func__
, p
[i
], i
);
17399 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17405 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17406 DMA_RWCTRL_WRITE_BNDRY_16
) {
17407 /* DMA test passed without adjusting DMA boundary,
17408 * now look for chipsets that are known to expose the
17409 * DMA bug without failing the test.
17411 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17412 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17413 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17415 /* Safe to use the calculated DMA boundary. */
17416 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17419 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17423 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17428 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17430 if (tg3_flag(tp
, 57765_PLUS
)) {
17431 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17432 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17433 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17434 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17435 tp
->bufmgr_config
.mbuf_high_water
=
17436 DEFAULT_MB_HIGH_WATER_57765
;
17438 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17439 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17440 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17441 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17442 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17443 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17444 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17445 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17446 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17447 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17448 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17449 tp
->bufmgr_config
.mbuf_high_water
=
17450 DEFAULT_MB_HIGH_WATER_5705
;
17451 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17452 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17453 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17454 tp
->bufmgr_config
.mbuf_high_water
=
17455 DEFAULT_MB_HIGH_WATER_5906
;
17458 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17459 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17460 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17461 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17462 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17463 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17465 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17466 DEFAULT_MB_RDMA_LOW_WATER
;
17467 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17468 DEFAULT_MB_MACRX_LOW_WATER
;
17469 tp
->bufmgr_config
.mbuf_high_water
=
17470 DEFAULT_MB_HIGH_WATER
;
17472 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17473 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17474 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17475 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17476 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17477 DEFAULT_MB_HIGH_WATER_JUMBO
;
17480 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17481 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17484 static char *tg3_phy_string(struct tg3
*tp
)
17486 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17487 case TG3_PHY_ID_BCM5400
: return "5400";
17488 case TG3_PHY_ID_BCM5401
: return "5401";
17489 case TG3_PHY_ID_BCM5411
: return "5411";
17490 case TG3_PHY_ID_BCM5701
: return "5701";
17491 case TG3_PHY_ID_BCM5703
: return "5703";
17492 case TG3_PHY_ID_BCM5704
: return "5704";
17493 case TG3_PHY_ID_BCM5705
: return "5705";
17494 case TG3_PHY_ID_BCM5750
: return "5750";
17495 case TG3_PHY_ID_BCM5752
: return "5752";
17496 case TG3_PHY_ID_BCM5714
: return "5714";
17497 case TG3_PHY_ID_BCM5780
: return "5780";
17498 case TG3_PHY_ID_BCM5755
: return "5755";
17499 case TG3_PHY_ID_BCM5787
: return "5787";
17500 case TG3_PHY_ID_BCM5784
: return "5784";
17501 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17502 case TG3_PHY_ID_BCM5906
: return "5906";
17503 case TG3_PHY_ID_BCM5761
: return "5761";
17504 case TG3_PHY_ID_BCM5718C
: return "5718C";
17505 case TG3_PHY_ID_BCM5718S
: return "5718S";
17506 case TG3_PHY_ID_BCM57765
: return "57765";
17507 case TG3_PHY_ID_BCM5719C
: return "5719C";
17508 case TG3_PHY_ID_BCM5720C
: return "5720C";
17509 case TG3_PHY_ID_BCM5762
: return "5762C";
17510 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17511 case 0: return "serdes";
17512 default: return "unknown";
17516 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17518 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17519 strcpy(str
, "PCI Express");
17521 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17522 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17524 strcpy(str
, "PCIX:");
17526 if ((clock_ctrl
== 7) ||
17527 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17528 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17529 strcat(str
, "133MHz");
17530 else if (clock_ctrl
== 0)
17531 strcat(str
, "33MHz");
17532 else if (clock_ctrl
== 2)
17533 strcat(str
, "50MHz");
17534 else if (clock_ctrl
== 4)
17535 strcat(str
, "66MHz");
17536 else if (clock_ctrl
== 6)
17537 strcat(str
, "100MHz");
17539 strcpy(str
, "PCI:");
17540 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17541 strcat(str
, "66MHz");
17543 strcat(str
, "33MHz");
17545 if (tg3_flag(tp
, PCI_32BIT
))
17546 strcat(str
, ":32-bit");
17548 strcat(str
, ":64-bit");
17552 static void tg3_init_coal(struct tg3
*tp
)
17554 struct ethtool_coalesce
*ec
= &tp
->coal
;
17556 memset(ec
, 0, sizeof(*ec
));
17557 ec
->cmd
= ETHTOOL_GCOALESCE
;
17558 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17559 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17560 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17561 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17562 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17563 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17564 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17565 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17566 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17568 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17569 HOSTCC_MODE_CLRTICK_TXBD
)) {
17570 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17571 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17572 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17573 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17576 if (tg3_flag(tp
, 5705_PLUS
)) {
17577 ec
->rx_coalesce_usecs_irq
= 0;
17578 ec
->tx_coalesce_usecs_irq
= 0;
17579 ec
->stats_block_coalesce_usecs
= 0;
17583 static int tg3_init_one(struct pci_dev
*pdev
,
17584 const struct pci_device_id
*ent
)
17586 struct net_device
*dev
;
17589 u32 sndmbx
, rcvmbx
, intmbx
;
17591 u64 dma_mask
, persist_dma_mask
;
17592 netdev_features_t features
= 0;
17594 printk_once(KERN_INFO
"%s\n", version
);
17596 err
= pci_enable_device(pdev
);
17598 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17602 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17604 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17605 goto err_out_disable_pdev
;
17608 pci_set_master(pdev
);
17610 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17613 goto err_out_free_res
;
17616 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17618 tp
= netdev_priv(dev
);
17621 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17622 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17624 tp
->pcierr_recovery
= false;
17627 tp
->msg_enable
= tg3_debug
;
17629 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17631 if (pdev_is_ssb_gige_core(pdev
)) {
17632 tg3_flag_set(tp
, IS_SSB_CORE
);
17633 if (ssb_gige_must_flush_posted_writes(pdev
))
17634 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17635 if (ssb_gige_one_dma_at_once(pdev
))
17636 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17637 if (ssb_gige_have_roboswitch(pdev
)) {
17638 tg3_flag_set(tp
, USE_PHYLIB
);
17639 tg3_flag_set(tp
, ROBOSWITCH
);
17641 if (ssb_gige_is_rgmii(pdev
))
17642 tg3_flag_set(tp
, RGMII_MODE
);
17645 /* The word/byte swap controls here control register access byte
17646 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17649 tp
->misc_host_ctrl
=
17650 MISC_HOST_CTRL_MASK_PCI_INT
|
17651 MISC_HOST_CTRL_WORD_SWAP
|
17652 MISC_HOST_CTRL_INDIR_ACCESS
|
17653 MISC_HOST_CTRL_PCISTATE_RW
;
17655 /* The NONFRM (non-frame) byte/word swap controls take effect
17656 * on descriptor entries, anything which isn't packet data.
17658 * The StrongARM chips on the board (one for tx, one for rx)
17659 * are running in big-endian mode.
17661 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17662 GRC_MODE_WSWAP_NONFRM_DATA
);
17663 #ifdef __BIG_ENDIAN
17664 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17666 spin_lock_init(&tp
->lock
);
17667 spin_lock_init(&tp
->indirect_lock
);
17668 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17670 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17672 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17674 goto err_out_free_dev
;
17677 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17678 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17679 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17680 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17681 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17682 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17683 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17684 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17685 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17686 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17687 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17688 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17689 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17690 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17691 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17692 tg3_flag_set(tp
, ENABLE_APE
);
17693 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17694 if (!tp
->aperegs
) {
17695 dev_err(&pdev
->dev
,
17696 "Cannot map APE registers, aborting\n");
17698 goto err_out_iounmap
;
17702 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17703 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17705 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17706 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17707 dev
->netdev_ops
= &tg3_netdev_ops
;
17708 dev
->irq
= pdev
->irq
;
17710 err
= tg3_get_invariants(tp
, ent
);
17712 dev_err(&pdev
->dev
,
17713 "Problem fetching invariants of chip, aborting\n");
17714 goto err_out_apeunmap
;
17717 /* The EPB bridge inside 5714, 5715, and 5780 and any
17718 * device behind the EPB cannot support DMA addresses > 40-bit.
17719 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17720 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17721 * do DMA address check in tg3_start_xmit().
17723 if (tg3_flag(tp
, IS_5788
))
17724 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17725 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17726 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17727 #ifdef CONFIG_HIGHMEM
17728 dma_mask
= DMA_BIT_MASK(64);
17731 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17733 /* Configure DMA attributes. */
17734 if (dma_mask
> DMA_BIT_MASK(32)) {
17735 err
= pci_set_dma_mask(pdev
, dma_mask
);
17737 features
|= NETIF_F_HIGHDMA
;
17738 err
= pci_set_consistent_dma_mask(pdev
,
17741 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17742 "DMA for consistent allocations\n");
17743 goto err_out_apeunmap
;
17747 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17748 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17750 dev_err(&pdev
->dev
,
17751 "No usable DMA configuration, aborting\n");
17752 goto err_out_apeunmap
;
17756 tg3_init_bufmgr_config(tp
);
17758 /* 5700 B0 chips do not support checksumming correctly due
17759 * to hardware bugs.
17761 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17762 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17764 if (tg3_flag(tp
, 5755_PLUS
))
17765 features
|= NETIF_F_IPV6_CSUM
;
17768 /* TSO is on by default on chips that support hardware TSO.
17769 * Firmware TSO on older chips gives lower performance, so it
17770 * is off by default, but can be enabled using ethtool.
17772 if ((tg3_flag(tp
, HW_TSO_1
) ||
17773 tg3_flag(tp
, HW_TSO_2
) ||
17774 tg3_flag(tp
, HW_TSO_3
)) &&
17775 (features
& NETIF_F_IP_CSUM
))
17776 features
|= NETIF_F_TSO
;
17777 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17778 if (features
& NETIF_F_IPV6_CSUM
)
17779 features
|= NETIF_F_TSO6
;
17780 if (tg3_flag(tp
, HW_TSO_3
) ||
17781 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17782 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17783 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17784 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17785 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17786 features
|= NETIF_F_TSO_ECN
;
17789 dev
->features
|= features
| NETIF_F_HW_VLAN_CTAG_TX
|
17790 NETIF_F_HW_VLAN_CTAG_RX
;
17791 dev
->vlan_features
|= features
;
17794 * Add loopback capability only for a subset of devices that support
17795 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17796 * loopback for the remaining devices.
17798 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17799 !tg3_flag(tp
, CPMU_PRESENT
))
17800 /* Add the loopback capability */
17801 features
|= NETIF_F_LOOPBACK
;
17803 dev
->hw_features
|= features
;
17804 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17806 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17807 dev
->min_mtu
= TG3_MIN_MTU
;
17808 dev
->max_mtu
= TG3_MAX_MTU(tp
);
17810 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17811 !tg3_flag(tp
, TSO_CAPABLE
) &&
17812 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17813 tg3_flag_set(tp
, MAX_RXPEND_64
);
17814 tp
->rx_pending
= 63;
17817 err
= tg3_get_device_address(tp
);
17819 dev_err(&pdev
->dev
,
17820 "Could not obtain valid ethernet address, aborting\n");
17821 goto err_out_apeunmap
;
17824 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17825 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17826 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17827 for (i
= 0; i
< tp
->irq_max
; i
++) {
17828 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17831 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17833 tnapi
->int_mbox
= intmbx
;
17839 tnapi
->consmbox
= rcvmbx
;
17840 tnapi
->prodmbox
= sndmbx
;
17843 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17845 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17847 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17851 * If we support MSIX, we'll be using RSS. If we're using
17852 * RSS, the first vector only handles link interrupts and the
17853 * remaining vectors handle rx and tx interrupts. Reuse the
17854 * mailbox values for the next iteration. The values we setup
17855 * above are still useful for the single vectored mode.
17869 * Reset chip in case UNDI or EFI driver did not shutdown
17870 * DMA self test will enable WDMAC and we'll see (spurious)
17871 * pending DMA on the PCI bus at that point.
17873 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17874 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17875 tg3_full_lock(tp
, 0);
17876 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17877 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17878 tg3_full_unlock(tp
);
17881 err
= tg3_test_dma(tp
);
17883 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17884 goto err_out_apeunmap
;
17889 pci_set_drvdata(pdev
, dev
);
17891 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17892 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17893 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17894 tg3_flag_set(tp
, PTP_CAPABLE
);
17896 tg3_timer_init(tp
);
17898 tg3_carrier_off(tp
);
17900 err
= register_netdev(dev
);
17902 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17903 goto err_out_apeunmap
;
17906 if (tg3_flag(tp
, PTP_CAPABLE
)) {
17908 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
17910 if (IS_ERR(tp
->ptp_clock
))
17911 tp
->ptp_clock
= NULL
;
17914 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17915 tp
->board_part_number
,
17916 tg3_chip_rev_id(tp
),
17917 tg3_bus_string(tp
, str
),
17920 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)) {
17923 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17924 ethtype
= "10/100Base-TX";
17925 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17926 ethtype
= "1000Base-SX";
17928 ethtype
= "10/100/1000Base-T";
17930 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17931 "(WireSpeed[%d], EEE[%d])\n",
17932 tg3_phy_string(tp
), ethtype
,
17933 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17934 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17937 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17938 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17939 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17940 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17941 tg3_flag(tp
, ENABLE_ASF
) != 0,
17942 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17943 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17945 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17946 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17948 pci_save_state(pdev
);
17954 iounmap(tp
->aperegs
);
17955 tp
->aperegs
= NULL
;
17968 pci_release_regions(pdev
);
17970 err_out_disable_pdev
:
17971 if (pci_is_enabled(pdev
))
17972 pci_disable_device(pdev
);
17976 static void tg3_remove_one(struct pci_dev
*pdev
)
17978 struct net_device
*dev
= pci_get_drvdata(pdev
);
17981 struct tg3
*tp
= netdev_priv(dev
);
17985 release_firmware(tp
->fw
);
17987 tg3_reset_task_cancel(tp
);
17989 if (tg3_flag(tp
, USE_PHYLIB
)) {
17994 unregister_netdev(dev
);
17996 iounmap(tp
->aperegs
);
17997 tp
->aperegs
= NULL
;
18004 pci_release_regions(pdev
);
18005 pci_disable_device(pdev
);
18009 #ifdef CONFIG_PM_SLEEP
18010 static int tg3_suspend(struct device
*device
)
18012 struct pci_dev
*pdev
= to_pci_dev(device
);
18013 struct net_device
*dev
= pci_get_drvdata(pdev
);
18014 struct tg3
*tp
= netdev_priv(dev
);
18019 if (!netif_running(dev
))
18022 tg3_reset_task_cancel(tp
);
18024 tg3_netif_stop(tp
);
18026 tg3_timer_stop(tp
);
18028 tg3_full_lock(tp
, 1);
18029 tg3_disable_ints(tp
);
18030 tg3_full_unlock(tp
);
18032 netif_device_detach(dev
);
18034 tg3_full_lock(tp
, 0);
18035 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
18036 tg3_flag_clear(tp
, INIT_COMPLETE
);
18037 tg3_full_unlock(tp
);
18039 err
= tg3_power_down_prepare(tp
);
18043 tg3_full_lock(tp
, 0);
18045 tg3_flag_set(tp
, INIT_COMPLETE
);
18046 err2
= tg3_restart_hw(tp
, true);
18050 tg3_timer_start(tp
);
18052 netif_device_attach(dev
);
18053 tg3_netif_start(tp
);
18056 tg3_full_unlock(tp
);
18067 static int tg3_resume(struct device
*device
)
18069 struct pci_dev
*pdev
= to_pci_dev(device
);
18070 struct net_device
*dev
= pci_get_drvdata(pdev
);
18071 struct tg3
*tp
= netdev_priv(dev
);
18076 if (!netif_running(dev
))
18079 netif_device_attach(dev
);
18081 tg3_full_lock(tp
, 0);
18083 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18085 tg3_flag_set(tp
, INIT_COMPLETE
);
18086 err
= tg3_restart_hw(tp
,
18087 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
18091 tg3_timer_start(tp
);
18093 tg3_netif_start(tp
);
18096 tg3_full_unlock(tp
);
18105 #endif /* CONFIG_PM_SLEEP */
18107 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
18109 static void tg3_shutdown(struct pci_dev
*pdev
)
18111 struct net_device
*dev
= pci_get_drvdata(pdev
);
18112 struct tg3
*tp
= netdev_priv(dev
);
18115 netif_device_detach(dev
);
18117 if (netif_running(dev
))
18120 if (system_state
== SYSTEM_POWER_OFF
)
18121 tg3_power_down(tp
);
18127 * tg3_io_error_detected - called when PCI error is detected
18128 * @pdev: Pointer to PCI device
18129 * @state: The current pci connection state
18131 * This function is called after a PCI bus error affecting
18132 * this device has been detected.
18134 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18135 pci_channel_state_t state
)
18137 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18138 struct tg3
*tp
= netdev_priv(netdev
);
18139 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18141 netdev_info(netdev
, "PCI I/O error detected\n");
18145 /* We probably don't have netdev yet */
18146 if (!netdev
|| !netif_running(netdev
))
18149 /* We needn't recover from permanent error */
18150 if (state
== pci_channel_io_frozen
)
18151 tp
->pcierr_recovery
= true;
18155 tg3_netif_stop(tp
);
18157 tg3_timer_stop(tp
);
18159 /* Want to make sure that the reset task doesn't run */
18160 tg3_reset_task_cancel(tp
);
18162 netif_device_detach(netdev
);
18164 /* Clean up software state, even if MMIO is blocked */
18165 tg3_full_lock(tp
, 0);
18166 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18167 tg3_full_unlock(tp
);
18170 if (state
== pci_channel_io_perm_failure
) {
18172 tg3_napi_enable(tp
);
18175 err
= PCI_ERS_RESULT_DISCONNECT
;
18177 pci_disable_device(pdev
);
18186 * tg3_io_slot_reset - called after the pci bus has been reset.
18187 * @pdev: Pointer to PCI device
18189 * Restart the card from scratch, as if from a cold-boot.
18190 * At this point, the card has exprienced a hard reset,
18191 * followed by fixups by BIOS, and has its config space
18192 * set up identically to what it was at cold boot.
18194 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18196 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18197 struct tg3
*tp
= netdev_priv(netdev
);
18198 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18203 if (pci_enable_device(pdev
)) {
18204 dev_err(&pdev
->dev
,
18205 "Cannot re-enable PCI device after reset.\n");
18209 pci_set_master(pdev
);
18210 pci_restore_state(pdev
);
18211 pci_save_state(pdev
);
18213 if (!netdev
|| !netif_running(netdev
)) {
18214 rc
= PCI_ERS_RESULT_RECOVERED
;
18218 err
= tg3_power_up(tp
);
18222 rc
= PCI_ERS_RESULT_RECOVERED
;
18225 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18226 tg3_napi_enable(tp
);
18235 * tg3_io_resume - called when traffic can start flowing again.
18236 * @pdev: Pointer to PCI device
18238 * This callback is called when the error recovery driver tells
18239 * us that its OK to resume normal operation.
18241 static void tg3_io_resume(struct pci_dev
*pdev
)
18243 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18244 struct tg3
*tp
= netdev_priv(netdev
);
18249 if (!netdev
|| !netif_running(netdev
))
18252 tg3_full_lock(tp
, 0);
18253 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18254 tg3_flag_set(tp
, INIT_COMPLETE
);
18255 err
= tg3_restart_hw(tp
, true);
18257 tg3_full_unlock(tp
);
18258 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18262 netif_device_attach(netdev
);
18264 tg3_timer_start(tp
);
18266 tg3_netif_start(tp
);
18268 tg3_full_unlock(tp
);
18273 tp
->pcierr_recovery
= false;
18277 static const struct pci_error_handlers tg3_err_handler
= {
18278 .error_detected
= tg3_io_error_detected
,
18279 .slot_reset
= tg3_io_slot_reset
,
18280 .resume
= tg3_io_resume
18283 static struct pci_driver tg3_driver
= {
18284 .name
= DRV_MODULE_NAME
,
18285 .id_table
= tg3_pci_tbl
,
18286 .probe
= tg3_init_one
,
18287 .remove
= tg3_remove_one
,
18288 .err_handler
= &tg3_err_handler
,
18289 .driver
.pm
= &tg3_pm_ops
,
18290 .shutdown
= tg3_shutdown
,
18293 module_pci_driver(tg3_driver
);