2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 136
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Jan 03, 2014"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
222 static char version
[] =
223 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION
);
229 MODULE_FIRMWARE(FIRMWARE_TG3
);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
233 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug
, int, 0);
235 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
240 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
267 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
268 TG3_DRV_DATA_FLAG_5705_10_100
},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
275 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
281 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
290 PCI_VENDOR_ID_LENOVO
,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
295 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
318 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
323 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
333 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
335 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
361 static const struct {
362 const char string
[ETH_GSTRING_LEN
];
363 } ethtool_stats_keys
[] = {
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
396 { "tx_flow_control" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
429 { "rx_threshold_hit" },
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
441 { "mbuf_lwm_thresh_hit" },
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
455 static const struct {
456 const char string
[ETH_GSTRING_LEN
];
457 } ethtool_test_keys
[] = {
458 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
459 [TG3_LINK_TEST
] = { "link test (online) " },
460 [TG3_REGISTER_TEST
] = { "register test (offline)" },
461 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
471 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
473 writel(val
, tp
->regs
+ off
);
476 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
478 return readl(tp
->regs
+ off
);
481 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
483 writel(val
, tp
->aperegs
+ off
);
486 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
488 return readl(tp
->aperegs
+ off
);
491 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
495 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
496 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
497 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
498 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
501 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
503 writel(val
, tp
->regs
+ off
);
504 readl(tp
->regs
+ off
);
507 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
512 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
513 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
514 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
515 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
519 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
523 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
524 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
525 TG3_64BIT_REG_LOW
, val
);
528 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
529 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
530 TG3_64BIT_REG_LOW
, val
);
534 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
535 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
536 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
537 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
542 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
544 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
545 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
549 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
554 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
555 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
556 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
557 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
568 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
569 /* Non-posted methods */
570 tp
->write32(tp
, off
, val
);
573 tg3_write32(tp
, off
, val
);
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
585 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
587 tp
->write32_mbox(tp
, off
, val
);
588 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
589 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
590 !tg3_flag(tp
, ICH_WORKAROUND
)))
591 tp
->read32_mbox(tp
, off
);
594 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
596 void __iomem
*mbox
= tp
->regs
+ off
;
598 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
600 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
601 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
605 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
607 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
610 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
612 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
626 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
630 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
631 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
634 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
635 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
636 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
637 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
643 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
648 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
651 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
655 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
656 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
661 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
662 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
663 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
664 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
670 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
675 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
678 static void tg3_ape_lock_init(struct tg3
*tp
)
683 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
684 regbase
= TG3_APE_LOCK_GRANT
;
686 regbase
= TG3_APE_PER_LOCK_GRANT
;
688 /* Make sure the driver hasn't any stale locks. */
689 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
691 case TG3_APE_LOCK_PHY0
:
692 case TG3_APE_LOCK_PHY1
:
693 case TG3_APE_LOCK_PHY2
:
694 case TG3_APE_LOCK_PHY3
:
695 bit
= APE_LOCK_GRANT_DRIVER
;
699 bit
= APE_LOCK_GRANT_DRIVER
;
701 bit
= 1 << tp
->pci_fn
;
703 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
708 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
712 u32 status
, req
, gnt
, bit
;
714 if (!tg3_flag(tp
, ENABLE_APE
))
718 case TG3_APE_LOCK_GPIO
:
719 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
721 case TG3_APE_LOCK_GRC
:
722 case TG3_APE_LOCK_MEM
:
724 bit
= APE_LOCK_REQ_DRIVER
;
726 bit
= 1 << tp
->pci_fn
;
728 case TG3_APE_LOCK_PHY0
:
729 case TG3_APE_LOCK_PHY1
:
730 case TG3_APE_LOCK_PHY2
:
731 case TG3_APE_LOCK_PHY3
:
732 bit
= APE_LOCK_REQ_DRIVER
;
738 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
739 req
= TG3_APE_LOCK_REQ
;
740 gnt
= TG3_APE_LOCK_GRANT
;
742 req
= TG3_APE_PER_LOCK_REQ
;
743 gnt
= TG3_APE_PER_LOCK_GRANT
;
748 tg3_ape_write32(tp
, req
+ off
, bit
);
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i
= 0; i
< 100; i
++) {
752 status
= tg3_ape_read32(tp
, gnt
+ off
);
755 if (pci_channel_offline(tp
->pdev
))
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp
, gnt
+ off
, bit
);
770 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
774 if (!tg3_flag(tp
, ENABLE_APE
))
778 case TG3_APE_LOCK_GPIO
:
779 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
781 case TG3_APE_LOCK_GRC
:
782 case TG3_APE_LOCK_MEM
:
784 bit
= APE_LOCK_GRANT_DRIVER
;
786 bit
= 1 << tp
->pci_fn
;
788 case TG3_APE_LOCK_PHY0
:
789 case TG3_APE_LOCK_PHY1
:
790 case TG3_APE_LOCK_PHY2
:
791 case TG3_APE_LOCK_PHY3
:
792 bit
= APE_LOCK_GRANT_DRIVER
;
798 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
799 gnt
= TG3_APE_LOCK_GRANT
;
801 gnt
= TG3_APE_PER_LOCK_GRANT
;
803 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
806 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
811 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
814 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
815 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
818 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
821 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
824 return timeout_us
? 0 : -EBUSY
;
827 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
831 for (i
= 0; i
< timeout_us
/ 10; i
++) {
832 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
834 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
840 return i
== timeout_us
/ 10;
843 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
847 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
849 if (!tg3_flag(tp
, APE_HAS_NCSI
))
852 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
853 if (apedata
!= APE_SEG_SIG_MAGIC
)
856 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
857 if (!(apedata
& APE_FW_STATUS_READY
))
860 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
862 msgoff
= bufoff
+ 2 * sizeof(u32
);
863 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
868 /* Cap xfer sizes to scratchpad limits. */
869 length
= (len
> maxlen
) ? maxlen
: len
;
872 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
873 if (!(apedata
& APE_FW_STATUS_READY
))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err
= tg3_ape_event_lock(tp
, 1000);
881 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
882 APE_EVENT_STATUS_SCRTCHPD_READ
|
883 APE_EVENT_STATUS_EVENT_PENDING
;
884 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
886 tg3_ape_write32(tp
, bufoff
, base_off
);
887 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
889 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
890 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
894 if (tg3_ape_wait_for_event(tp
, 30000))
897 for (i
= 0; length
; i
+= 4, length
-= 4) {
898 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
899 memcpy(data
, &val
, sizeof(u32
));
907 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
912 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
913 if (apedata
!= APE_SEG_SIG_MAGIC
)
916 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
917 if (!(apedata
& APE_FW_STATUS_READY
))
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err
= tg3_ape_event_lock(tp
, 1000);
925 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
926 event
| APE_EVENT_STATUS_EVENT_PENDING
);
928 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
929 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
934 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
939 if (!tg3_flag(tp
, ENABLE_APE
))
943 case RESET_KIND_INIT
:
944 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
945 APE_HOST_SEG_SIG_MAGIC
);
946 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
947 APE_HOST_SEG_LEN_MAGIC
);
948 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
949 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
950 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
952 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
953 APE_HOST_BEHAV_NO_PHYLOCK
);
954 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
955 TG3_APE_HOST_DRVR_STATE_START
);
957 event
= APE_EVENT_STATUS_STATE_START
;
959 case RESET_KIND_SHUTDOWN
:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
965 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
967 if (device_may_wakeup(&tp
->pdev
->dev
) &&
968 tg3_flag(tp
, WOL_ENABLE
)) {
969 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
970 TG3_APE_HOST_WOL_SPEED_AUTO
);
971 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
973 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
975 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
977 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
983 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
985 tg3_ape_send_event(tp
, event
);
988 static void tg3_disable_ints(struct tg3
*tp
)
992 tw32(TG3PCI_MISC_HOST_CTRL
,
993 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
994 for (i
= 0; i
< tp
->irq_max
; i
++)
995 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
998 static void tg3_enable_ints(struct tg3
*tp
)
1005 tw32(TG3PCI_MISC_HOST_CTRL
,
1006 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1008 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1009 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1010 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1012 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1013 if (tg3_flag(tp
, 1SHOT_MSI
))
1014 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1016 tp
->coal_now
|= tnapi
->coal_now
;
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1021 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1022 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1024 tw32(HOSTCC_MODE
, tp
->coal_now
);
1026 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1029 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1031 struct tg3
*tp
= tnapi
->tp
;
1032 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1033 unsigned int work_exists
= 0;
1035 /* check for phy events */
1036 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1037 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1041 /* check for TX work to do */
1042 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1045 /* check for RX work to do */
1046 if (tnapi
->rx_rcb_prod_idx
&&
1047 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1058 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1060 struct tg3
*tp
= tnapi
->tp
;
1062 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1069 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1070 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1071 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1074 static void tg3_switch_clocks(struct tg3
*tp
)
1077 u32 orig_clock_ctrl
;
1079 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1082 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1084 orig_clock_ctrl
= clock_ctrl
;
1085 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1086 CLOCK_CTRL_CLKRUN_OENABLE
|
1088 tp
->pci_clock_ctrl
= clock_ctrl
;
1090 if (tg3_flag(tp
, 5705_PLUS
)) {
1091 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1093 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1095 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1098 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1101 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1107 #define PHY_BUSY_LOOPS 5000
1109 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1116 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1118 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1122 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1126 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1127 MI_COM_PHY_ADDR_MASK
);
1128 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1129 MI_COM_REG_ADDR_MASK
);
1130 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1132 tw32_f(MAC_MI_COM
, frame_val
);
1134 loops
= PHY_BUSY_LOOPS
;
1135 while (loops
!= 0) {
1137 frame_val
= tr32(MAC_MI_COM
);
1139 if ((frame_val
& MI_COM_BUSY
) == 0) {
1141 frame_val
= tr32(MAC_MI_COM
);
1149 *val
= frame_val
& MI_COM_DATA_MASK
;
1153 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1154 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1158 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1163 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1165 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1168 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1175 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1176 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1179 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1181 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1185 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1187 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1188 MI_COM_PHY_ADDR_MASK
);
1189 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1190 MI_COM_REG_ADDR_MASK
);
1191 frame_val
|= (val
& MI_COM_DATA_MASK
);
1192 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1194 tw32_f(MAC_MI_COM
, frame_val
);
1196 loops
= PHY_BUSY_LOOPS
;
1197 while (loops
!= 0) {
1199 frame_val
= tr32(MAC_MI_COM
);
1200 if ((frame_val
& MI_COM_BUSY
) == 0) {
1202 frame_val
= tr32(MAC_MI_COM
);
1212 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1213 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1217 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1222 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1224 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1227 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1231 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1235 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1239 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1240 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1244 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1250 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1254 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1258 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1262 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1263 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1267 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1273 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1277 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1279 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1284 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1288 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1290 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1295 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1299 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1300 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1303 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1308 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1310 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1311 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1313 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1321 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1327 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1329 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1331 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1332 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1337 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1339 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1340 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1343 static int tg3_bmcr_reset(struct tg3
*tp
)
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1351 phy_control
= BMCR_RESET
;
1352 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1358 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1362 if ((phy_control
& BMCR_RESET
) == 0) {
1374 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1376 struct tg3
*tp
= bp
->priv
;
1379 spin_lock_bh(&tp
->lock
);
1381 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1384 spin_unlock_bh(&tp
->lock
);
1389 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1391 struct tg3
*tp
= bp
->priv
;
1394 spin_lock_bh(&tp
->lock
);
1396 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1399 spin_unlock_bh(&tp
->lock
);
1404 static int tg3_mdio_reset(struct mii_bus
*bp
)
1409 static void tg3_mdio_config_5785(struct tg3
*tp
)
1412 struct phy_device
*phydev
;
1414 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
1415 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1416 case PHY_ID_BCM50610
:
1417 case PHY_ID_BCM50610M
:
1418 val
= MAC_PHYCFG2_50610_LED_MODES
;
1420 case PHY_ID_BCMAC131
:
1421 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1423 case PHY_ID_RTL8211C
:
1424 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1426 case PHY_ID_RTL8201E
:
1427 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1433 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1434 tw32(MAC_PHYCFG2
, val
);
1436 val
= tr32(MAC_PHYCFG1
);
1437 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1438 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1439 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1440 tw32(MAC_PHYCFG1
, val
);
1445 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1446 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1447 MAC_PHYCFG2_FMODE_MASK_MASK
|
1448 MAC_PHYCFG2_GMODE_MASK_MASK
|
1449 MAC_PHYCFG2_ACT_MASK_MASK
|
1450 MAC_PHYCFG2_QUAL_MASK_MASK
|
1451 MAC_PHYCFG2_INBAND_ENABLE
;
1453 tw32(MAC_PHYCFG2
, val
);
1455 val
= tr32(MAC_PHYCFG1
);
1456 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1457 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1458 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1459 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1460 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1461 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1462 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1464 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1465 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1466 tw32(MAC_PHYCFG1
, val
);
1468 val
= tr32(MAC_EXT_RGMII_MODE
);
1469 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1470 MAC_RGMII_MODE_RX_QUALITY
|
1471 MAC_RGMII_MODE_RX_ACTIVITY
|
1472 MAC_RGMII_MODE_RX_ENG_DET
|
1473 MAC_RGMII_MODE_TX_ENABLE
|
1474 MAC_RGMII_MODE_TX_LOWPWR
|
1475 MAC_RGMII_MODE_TX_RESET
);
1476 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1477 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1478 val
|= MAC_RGMII_MODE_RX_INT_B
|
1479 MAC_RGMII_MODE_RX_QUALITY
|
1480 MAC_RGMII_MODE_RX_ACTIVITY
|
1481 MAC_RGMII_MODE_RX_ENG_DET
;
1482 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1483 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1484 MAC_RGMII_MODE_TX_LOWPWR
|
1485 MAC_RGMII_MODE_TX_RESET
;
1487 tw32(MAC_EXT_RGMII_MODE
, val
);
1490 static void tg3_mdio_start(struct tg3
*tp
)
1492 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1493 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1496 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1497 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1498 tg3_mdio_config_5785(tp
);
1501 static int tg3_mdio_init(struct tg3
*tp
)
1505 struct phy_device
*phydev
;
1507 if (tg3_flag(tp
, 5717_PLUS
)) {
1510 tp
->phy_addr
= tp
->pci_fn
+ 1;
1512 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1513 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1515 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1516 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1519 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1522 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1525 tp
->phy_addr
= addr
;
1527 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1531 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1534 tp
->mdio_bus
= mdiobus_alloc();
1535 if (tp
->mdio_bus
== NULL
)
1538 tp
->mdio_bus
->name
= "tg3 mdio bus";
1539 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1540 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1541 tp
->mdio_bus
->priv
= tp
;
1542 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1543 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1544 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1545 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1546 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1547 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1549 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1550 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1552 /* The bus registration will look for all the PHYs on the mdio bus.
1553 * Unfortunately, it does not ensure the PHY is powered up before
1554 * accessing the PHY ID registers. A chip reset is the
1555 * quickest way to bring the device back to an operational state..
1557 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1560 i
= mdiobus_register(tp
->mdio_bus
);
1562 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1563 mdiobus_free(tp
->mdio_bus
);
1567 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
1569 if (!phydev
|| !phydev
->drv
) {
1570 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1571 mdiobus_unregister(tp
->mdio_bus
);
1572 mdiobus_free(tp
->mdio_bus
);
1576 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1577 case PHY_ID_BCM57780
:
1578 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1579 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1581 case PHY_ID_BCM50610
:
1582 case PHY_ID_BCM50610M
:
1583 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1584 PHY_BRCM_RX_REFCLK_UNUSED
|
1585 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1586 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1587 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1588 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1589 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1590 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1591 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1592 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1594 case PHY_ID_RTL8211C
:
1595 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1597 case PHY_ID_RTL8201E
:
1598 case PHY_ID_BCMAC131
:
1599 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1600 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1601 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1605 tg3_flag_set(tp
, MDIOBUS_INITED
);
1607 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1608 tg3_mdio_config_5785(tp
);
1613 static void tg3_mdio_fini(struct tg3
*tp
)
1615 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1616 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1617 mdiobus_unregister(tp
->mdio_bus
);
1618 mdiobus_free(tp
->mdio_bus
);
1622 /* tp->lock is held. */
1623 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1627 val
= tr32(GRC_RX_CPU_EVENT
);
1628 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1629 tw32_f(GRC_RX_CPU_EVENT
, val
);
1631 tp
->last_event_jiffies
= jiffies
;
1634 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1636 /* tp->lock is held. */
1637 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1640 unsigned int delay_cnt
;
1643 /* If enough time has passed, no wait is necessary. */
1644 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1645 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1647 if (time_remain
< 0)
1650 /* Check if we can shorten the wait time. */
1651 delay_cnt
= jiffies_to_usecs(time_remain
);
1652 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1653 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1654 delay_cnt
= (delay_cnt
>> 3) + 1;
1656 for (i
= 0; i
< delay_cnt
; i
++) {
1657 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1659 if (pci_channel_offline(tp
->pdev
))
1666 /* tp->lock is held. */
1667 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1672 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1674 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1675 val
|= (reg
& 0xffff);
1679 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1681 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1682 val
|= (reg
& 0xffff);
1686 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1687 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1689 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1690 val
|= (reg
& 0xffff);
1694 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1701 /* tp->lock is held. */
1702 static void tg3_ump_link_report(struct tg3
*tp
)
1706 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1709 tg3_phy_gather_ump_data(tp
, data
);
1711 tg3_wait_for_event_ack(tp
);
1713 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1714 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1715 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1716 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1717 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1718 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1720 tg3_generate_fw_event(tp
);
1723 /* tp->lock is held. */
1724 static void tg3_stop_fw(struct tg3
*tp
)
1726 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1727 /* Wait for RX cpu to ACK the previous event. */
1728 tg3_wait_for_event_ack(tp
);
1730 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1732 tg3_generate_fw_event(tp
);
1734 /* Wait for RX cpu to ACK this event. */
1735 tg3_wait_for_event_ack(tp
);
1739 /* tp->lock is held. */
1740 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1742 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1743 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1745 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1747 case RESET_KIND_INIT
:
1748 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1752 case RESET_KIND_SHUTDOWN
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1757 case RESET_KIND_SUSPEND
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1771 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1773 case RESET_KIND_INIT
:
1774 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1775 DRV_STATE_START_DONE
);
1778 case RESET_KIND_SHUTDOWN
:
1779 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1780 DRV_STATE_UNLOAD_DONE
);
1789 /* tp->lock is held. */
1790 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1792 if (tg3_flag(tp
, ENABLE_ASF
)) {
1794 case RESET_KIND_INIT
:
1795 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1799 case RESET_KIND_SHUTDOWN
:
1800 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1804 case RESET_KIND_SUSPEND
:
1805 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1815 static int tg3_poll_fw(struct tg3
*tp
)
1820 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1823 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1824 /* We don't use firmware. */
1828 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1829 /* Wait up to 20ms for init done. */
1830 for (i
= 0; i
< 200; i
++) {
1831 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1833 if (pci_channel_offline(tp
->pdev
))
1841 /* Wait for firmware initialization to complete. */
1842 for (i
= 0; i
< 100000; i
++) {
1843 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1844 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1846 if (pci_channel_offline(tp
->pdev
)) {
1847 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1848 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1849 netdev_info(tp
->dev
, "No firmware running\n");
1858 /* Chip might not be fitted with firmware. Some Sun onboard
1859 * parts are configured like that. So don't signal the timeout
1860 * of the above loop as an error, but do report the lack of
1861 * running firmware once.
1863 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1864 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1866 netdev_info(tp
->dev
, "No firmware running\n");
1869 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1870 /* The 57765 A0 needs a little more
1871 * time to do some important work.
1879 static void tg3_link_report(struct tg3
*tp
)
1881 if (!netif_carrier_ok(tp
->dev
)) {
1882 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1883 tg3_ump_link_report(tp
);
1884 } else if (netif_msg_link(tp
)) {
1885 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1886 (tp
->link_config
.active_speed
== SPEED_1000
?
1888 (tp
->link_config
.active_speed
== SPEED_100
?
1890 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1893 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1894 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1896 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1899 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1900 netdev_info(tp
->dev
, "EEE is %s\n",
1901 tp
->setlpicnt
? "enabled" : "disabled");
1903 tg3_ump_link_report(tp
);
1906 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1909 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1913 if (adv
& ADVERTISE_PAUSE_CAP
) {
1914 flowctrl
|= FLOW_CTRL_RX
;
1915 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1916 flowctrl
|= FLOW_CTRL_TX
;
1917 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1918 flowctrl
|= FLOW_CTRL_TX
;
1923 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1927 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1928 miireg
= ADVERTISE_1000XPAUSE
;
1929 else if (flow_ctrl
& FLOW_CTRL_TX
)
1930 miireg
= ADVERTISE_1000XPSE_ASYM
;
1931 else if (flow_ctrl
& FLOW_CTRL_RX
)
1932 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1939 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1943 if (adv
& ADVERTISE_1000XPAUSE
) {
1944 flowctrl
|= FLOW_CTRL_RX
;
1945 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1946 flowctrl
|= FLOW_CTRL_TX
;
1947 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1948 flowctrl
|= FLOW_CTRL_TX
;
1953 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1957 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1958 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1959 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1960 if (lcladv
& ADVERTISE_1000XPAUSE
)
1962 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1969 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1973 u32 old_rx_mode
= tp
->rx_mode
;
1974 u32 old_tx_mode
= tp
->tx_mode
;
1976 if (tg3_flag(tp
, USE_PHYLIB
))
1977 autoneg
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
]->autoneg
;
1979 autoneg
= tp
->link_config
.autoneg
;
1981 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1982 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1983 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1985 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1987 flowctrl
= tp
->link_config
.flowctrl
;
1989 tp
->link_config
.active_flowctrl
= flowctrl
;
1991 if (flowctrl
& FLOW_CTRL_RX
)
1992 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1994 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1996 if (old_rx_mode
!= tp
->rx_mode
)
1997 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1999 if (flowctrl
& FLOW_CTRL_TX
)
2000 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
2002 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2004 if (old_tx_mode
!= tp
->tx_mode
)
2005 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2008 static void tg3_adjust_link(struct net_device
*dev
)
2010 u8 oldflowctrl
, linkmesg
= 0;
2011 u32 mac_mode
, lcl_adv
, rmt_adv
;
2012 struct tg3
*tp
= netdev_priv(dev
);
2013 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2015 spin_lock_bh(&tp
->lock
);
2017 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2018 MAC_MODE_HALF_DUPLEX
);
2020 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2026 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2027 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2028 else if (phydev
->speed
== SPEED_1000
||
2029 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2030 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2032 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2034 if (phydev
->duplex
== DUPLEX_HALF
)
2035 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2037 lcl_adv
= mii_advertise_flowctrl(
2038 tp
->link_config
.flowctrl
);
2041 rmt_adv
= LPA_PAUSE_CAP
;
2042 if (phydev
->asym_pause
)
2043 rmt_adv
|= LPA_PAUSE_ASYM
;
2046 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2048 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2050 if (mac_mode
!= tp
->mac_mode
) {
2051 tp
->mac_mode
= mac_mode
;
2052 tw32_f(MAC_MODE
, tp
->mac_mode
);
2056 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2057 if (phydev
->speed
== SPEED_10
)
2059 MAC_MI_STAT_10MBPS_MODE
|
2060 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2062 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2065 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2066 tw32(MAC_TX_LENGTHS
,
2067 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2068 (6 << TX_LENGTHS_IPG_SHIFT
) |
2069 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2071 tw32(MAC_TX_LENGTHS
,
2072 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2073 (6 << TX_LENGTHS_IPG_SHIFT
) |
2074 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2076 if (phydev
->link
!= tp
->old_link
||
2077 phydev
->speed
!= tp
->link_config
.active_speed
||
2078 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2079 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2082 tp
->old_link
= phydev
->link
;
2083 tp
->link_config
.active_speed
= phydev
->speed
;
2084 tp
->link_config
.active_duplex
= phydev
->duplex
;
2086 spin_unlock_bh(&tp
->lock
);
2089 tg3_link_report(tp
);
2092 static int tg3_phy_init(struct tg3
*tp
)
2094 struct phy_device
*phydev
;
2096 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2099 /* Bring the PHY back to a known state. */
2102 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2104 /* Attach the MAC to the PHY. */
2105 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2106 tg3_adjust_link
, phydev
->interface
);
2107 if (IS_ERR(phydev
)) {
2108 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2109 return PTR_ERR(phydev
);
2112 /* Mask with MAC supported features. */
2113 switch (phydev
->interface
) {
2114 case PHY_INTERFACE_MODE_GMII
:
2115 case PHY_INTERFACE_MODE_RGMII
:
2116 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2117 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2119 SUPPORTED_Asym_Pause
);
2123 case PHY_INTERFACE_MODE_MII
:
2124 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2126 SUPPORTED_Asym_Pause
);
2129 phy_disconnect(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2133 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2135 phydev
->advertising
= phydev
->supported
;
2140 static void tg3_phy_start(struct tg3
*tp
)
2142 struct phy_device
*phydev
;
2144 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2147 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2149 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2150 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2151 phydev
->speed
= tp
->link_config
.speed
;
2152 phydev
->duplex
= tp
->link_config
.duplex
;
2153 phydev
->autoneg
= tp
->link_config
.autoneg
;
2154 phydev
->advertising
= tp
->link_config
.advertising
;
2159 phy_start_aneg(phydev
);
2162 static void tg3_phy_stop(struct tg3
*tp
)
2164 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2167 phy_stop(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2170 static void tg3_phy_fini(struct tg3
*tp
)
2172 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2173 phy_disconnect(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2174 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2178 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2183 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2186 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2187 /* Cannot do read-modify-write on 5401 */
2188 err
= tg3_phy_auxctl_write(tp
,
2189 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2190 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2195 err
= tg3_phy_auxctl_read(tp
,
2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2200 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2201 err
= tg3_phy_auxctl_write(tp
,
2202 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2208 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2212 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2215 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2216 phytest
| MII_TG3_FET_SHADOW_EN
);
2217 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2219 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2221 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2222 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2224 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2228 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2232 if (!tg3_flag(tp
, 5705_PLUS
) ||
2233 (tg3_flag(tp
, 5717_PLUS
) &&
2234 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2237 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2238 tg3_phy_fet_toggle_apd(tp
, enable
);
2242 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2243 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2244 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2245 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2246 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2247 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2249 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2252 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2254 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2256 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2259 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2263 if (!tg3_flag(tp
, 5705_PLUS
) ||
2264 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2267 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2270 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2271 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2273 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2274 ephy
| MII_TG3_FET_SHADOW_EN
);
2275 if (!tg3_readphy(tp
, reg
, &phy
)) {
2277 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2279 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2280 tg3_writephy(tp
, reg
, phy
);
2282 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2287 ret
= tg3_phy_auxctl_read(tp
,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2291 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2293 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2294 tg3_phy_auxctl_write(tp
,
2295 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2300 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2305 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2308 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2310 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2311 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2314 static void tg3_phy_apply_otp(struct tg3
*tp
)
2323 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2326 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2327 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2328 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2330 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2331 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2332 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2334 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2335 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2336 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2338 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2339 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2341 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2342 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2344 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2345 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2346 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2348 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2351 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2354 struct ethtool_eee
*dest
= &tp
->eee
;
2356 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2362 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2365 /* Pull eee_active */
2366 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2367 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2368 dest
->eee_active
= 1;
2370 dest
->eee_active
= 0;
2372 /* Pull lp advertised settings */
2373 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2375 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2377 /* Pull advertised and eee_enabled settings */
2378 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2380 dest
->eee_enabled
= !!val
;
2381 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2383 /* Pull tx_lpi_enabled */
2384 val
= tr32(TG3_CPMU_EEE_MODE
);
2385 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2387 /* Pull lpi timer value */
2388 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2391 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2395 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2400 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2402 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2403 (tp
->link_config
.active_speed
== SPEED_100
||
2404 tp
->link_config
.active_speed
== SPEED_1000
)) {
2407 if (tp
->link_config
.active_speed
== SPEED_1000
)
2408 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2410 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2412 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2414 tg3_eee_pull_config(tp
, NULL
);
2415 if (tp
->eee
.eee_active
)
2419 if (!tp
->setlpicnt
) {
2420 if (current_link_up
&&
2421 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2422 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2423 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2426 val
= tr32(TG3_CPMU_EEE_MODE
);
2427 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2431 static void tg3_phy_eee_enable(struct tg3
*tp
)
2435 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2436 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2437 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2438 tg3_flag(tp
, 57765_CLASS
)) &&
2439 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2440 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2441 MII_TG3_DSP_TAP26_RMRXSTO
;
2442 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2443 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2446 val
= tr32(TG3_CPMU_EEE_MODE
);
2447 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2450 static int tg3_wait_macro_done(struct tg3
*tp
)
2457 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2458 if ((tmp32
& 0x1000) == 0)
2468 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2470 static const u32 test_pat
[4][6] = {
2471 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2472 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2473 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2474 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2478 for (chan
= 0; chan
< 4; chan
++) {
2481 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2482 (chan
* 0x2000) | 0x0200);
2483 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2485 for (i
= 0; i
< 6; i
++)
2486 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2489 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2490 if (tg3_wait_macro_done(tp
)) {
2495 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2496 (chan
* 0x2000) | 0x0200);
2497 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2498 if (tg3_wait_macro_done(tp
)) {
2503 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2504 if (tg3_wait_macro_done(tp
)) {
2509 for (i
= 0; i
< 6; i
+= 2) {
2512 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2513 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2514 tg3_wait_macro_done(tp
)) {
2520 if (low
!= test_pat
[chan
][i
] ||
2521 high
!= test_pat
[chan
][i
+1]) {
2522 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2523 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2524 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2534 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2538 for (chan
= 0; chan
< 4; chan
++) {
2541 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2542 (chan
* 0x2000) | 0x0200);
2543 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2544 for (i
= 0; i
< 6; i
++)
2545 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2546 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2547 if (tg3_wait_macro_done(tp
))
2554 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2556 u32 reg32
, phy9_orig
;
2557 int retries
, do_phy_reset
, err
;
2563 err
= tg3_bmcr_reset(tp
);
2569 /* Disable transmitter and interrupt. */
2570 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2574 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2576 /* Set full-duplex, 1000 mbps. */
2577 tg3_writephy(tp
, MII_BMCR
,
2578 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2580 /* Set to master mode. */
2581 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2584 tg3_writephy(tp
, MII_CTRL1000
,
2585 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2587 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2591 /* Block the PHY control access. */
2592 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2594 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2597 } while (--retries
);
2599 err
= tg3_phy_reset_chanpat(tp
);
2603 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2605 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2606 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2608 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2610 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2612 err
= tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
);
2617 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2622 static void tg3_carrier_off(struct tg3
*tp
)
2624 netif_carrier_off(tp
->dev
);
2625 tp
->link_up
= false;
2628 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2630 if (tg3_flag(tp
, ENABLE_ASF
))
2631 netdev_warn(tp
->dev
,
2632 "Management side-band traffic will be interrupted during phy settings change\n");
2635 /* This will reset the tigon3 PHY if there is no valid
2636 * link unless the FORCE argument is non-zero.
2638 static int tg3_phy_reset(struct tg3
*tp
)
2643 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2644 val
= tr32(GRC_MISC_CFG
);
2645 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2648 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2649 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2653 if (netif_running(tp
->dev
) && tp
->link_up
) {
2654 netif_carrier_off(tp
->dev
);
2655 tg3_link_report(tp
);
2658 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2659 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2660 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2661 err
= tg3_phy_reset_5703_4_5(tp
);
2668 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2669 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2670 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2671 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2673 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2676 err
= tg3_bmcr_reset(tp
);
2680 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2681 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2682 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2684 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2687 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2688 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2689 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2690 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2691 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2692 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2694 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2698 if (tg3_flag(tp
, 5717_PLUS
) &&
2699 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2702 tg3_phy_apply_otp(tp
);
2704 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2705 tg3_phy_toggle_apd(tp
, true);
2707 tg3_phy_toggle_apd(tp
, false);
2710 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2711 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2712 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2713 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2714 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2717 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2718 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2719 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2722 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2723 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2724 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2725 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2726 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2727 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2729 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2730 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2731 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2732 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2733 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2734 tg3_writephy(tp
, MII_TG3_TEST1
,
2735 MII_TG3_TEST1_TRIM_EN
| 0x4);
2737 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2739 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2743 /* Set Extended packet length bit (bit 14) on all chips that */
2744 /* support jumbo frames */
2745 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2746 /* Cannot do read-modify-write on 5401 */
2747 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2748 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2749 /* Set bit 14 with read-modify-write to preserve other bits */
2750 err
= tg3_phy_auxctl_read(tp
,
2751 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2753 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2754 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2757 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2758 * jumbo frames transmission.
2760 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2761 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2762 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2763 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2766 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2767 /* adjust output voltage */
2768 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2771 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2772 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2774 tg3_phy_toggle_automdix(tp
, true);
2775 tg3_phy_set_wirespeed(tp
);
2779 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2780 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2781 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2782 TG3_GPIO_MSG_NEED_VAUX)
2783 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2784 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2785 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2786 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2787 (TG3_GPIO_MSG_DRVR_PRES << 12))
2789 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2790 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2791 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2792 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2793 (TG3_GPIO_MSG_NEED_VAUX << 12))
2795 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2799 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2800 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2801 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2803 status
= tr32(TG3_CPMU_DRV_STATUS
);
2805 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2806 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2807 status
|= (newstat
<< shift
);
2809 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2810 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2811 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2813 tw32(TG3_CPMU_DRV_STATUS
, status
);
2815 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2818 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2820 if (!tg3_flag(tp
, IS_NIC
))
2823 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2824 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2825 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2826 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2829 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2831 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2834 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2836 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2843 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2847 if (!tg3_flag(tp
, IS_NIC
) ||
2848 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2849 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2852 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2854 tw32_wait_f(GRC_LOCAL_CTRL
,
2855 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2858 tw32_wait_f(GRC_LOCAL_CTRL
,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2862 tw32_wait_f(GRC_LOCAL_CTRL
,
2863 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2867 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2869 if (!tg3_flag(tp
, IS_NIC
))
2872 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2873 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2874 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2875 (GRC_LCLCTRL_GPIO_OE0
|
2876 GRC_LCLCTRL_GPIO_OE1
|
2877 GRC_LCLCTRL_GPIO_OE2
|
2878 GRC_LCLCTRL_GPIO_OUTPUT0
|
2879 GRC_LCLCTRL_GPIO_OUTPUT1
),
2880 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2881 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2882 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2883 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2884 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2885 GRC_LCLCTRL_GPIO_OE1
|
2886 GRC_LCLCTRL_GPIO_OE2
|
2887 GRC_LCLCTRL_GPIO_OUTPUT0
|
2888 GRC_LCLCTRL_GPIO_OUTPUT1
|
2890 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2893 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2894 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2895 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2897 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2898 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2902 u32 grc_local_ctrl
= 0;
2904 /* Workaround to prevent overdrawing Amps. */
2905 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2906 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2907 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2909 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2912 /* On 5753 and variants, GPIO2 cannot be used. */
2913 no_gpio2
= tp
->nic_sram_data_cfg
&
2914 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2916 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2917 GRC_LCLCTRL_GPIO_OE1
|
2918 GRC_LCLCTRL_GPIO_OE2
|
2919 GRC_LCLCTRL_GPIO_OUTPUT1
|
2920 GRC_LCLCTRL_GPIO_OUTPUT2
;
2922 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2923 GRC_LCLCTRL_GPIO_OUTPUT2
);
2925 tw32_wait_f(GRC_LOCAL_CTRL
,
2926 tp
->grc_local_ctrl
| grc_local_ctrl
,
2927 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2929 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2931 tw32_wait_f(GRC_LOCAL_CTRL
,
2932 tp
->grc_local_ctrl
| grc_local_ctrl
,
2933 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2936 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2937 tw32_wait_f(GRC_LOCAL_CTRL
,
2938 tp
->grc_local_ctrl
| grc_local_ctrl
,
2939 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2944 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2948 /* Serialize power state transitions */
2949 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2952 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2953 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2955 msg
= tg3_set_function_status(tp
, msg
);
2957 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2960 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2961 tg3_pwrsrc_switch_to_vaux(tp
);
2963 tg3_pwrsrc_die_with_vmain(tp
);
2966 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2969 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2971 bool need_vaux
= false;
2973 /* The GPIOs do something completely different on 57765. */
2974 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2977 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2978 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2979 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2980 tg3_frob_aux_power_5717(tp
, include_wol
?
2981 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2985 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2986 struct net_device
*dev_peer
;
2988 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2990 /* remove_one() may have been run on the peer. */
2992 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2994 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2997 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2998 tg3_flag(tp_peer
, ENABLE_ASF
))
3003 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
3004 tg3_flag(tp
, ENABLE_ASF
))
3008 tg3_pwrsrc_switch_to_vaux(tp
);
3010 tg3_pwrsrc_die_with_vmain(tp
);
3013 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3015 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3017 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3018 if (speed
!= SPEED_10
)
3020 } else if (speed
== SPEED_10
)
3026 static bool tg3_phy_power_bug(struct tg3
*tp
)
3028 switch (tg3_asic_rev(tp
)) {
3033 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3042 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3051 static bool tg3_phy_led_bug(struct tg3
*tp
)
3053 switch (tg3_asic_rev(tp
)) {
3056 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3065 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3069 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3072 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3073 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3074 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3075 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3078 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3079 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3080 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3085 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3087 val
= tr32(GRC_MISC_CFG
);
3088 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3091 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3093 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3096 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3097 tg3_writephy(tp
, MII_BMCR
,
3098 BMCR_ANENABLE
| BMCR_ANRESTART
);
3100 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3101 phytest
| MII_TG3_FET_SHADOW_EN
);
3102 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3103 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3105 MII_TG3_FET_SHDW_AUXMODE4
,
3108 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3111 } else if (do_low_power
) {
3112 if (!tg3_phy_led_bug(tp
))
3113 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3114 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3116 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3117 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3118 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3119 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3122 /* The PHY should not be powered down on some chips because
3125 if (tg3_phy_power_bug(tp
))
3128 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3129 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3130 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3131 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3132 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3133 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3136 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3139 /* tp->lock is held. */
3140 static int tg3_nvram_lock(struct tg3
*tp
)
3142 if (tg3_flag(tp
, NVRAM
)) {
3145 if (tp
->nvram_lock_cnt
== 0) {
3146 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3147 for (i
= 0; i
< 8000; i
++) {
3148 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3153 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3157 tp
->nvram_lock_cnt
++;
3162 /* tp->lock is held. */
3163 static void tg3_nvram_unlock(struct tg3
*tp
)
3165 if (tg3_flag(tp
, NVRAM
)) {
3166 if (tp
->nvram_lock_cnt
> 0)
3167 tp
->nvram_lock_cnt
--;
3168 if (tp
->nvram_lock_cnt
== 0)
3169 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3173 /* tp->lock is held. */
3174 static void tg3_enable_nvram_access(struct tg3
*tp
)
3176 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3177 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3179 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3183 /* tp->lock is held. */
3184 static void tg3_disable_nvram_access(struct tg3
*tp
)
3186 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3187 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3189 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3193 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3194 u32 offset
, u32
*val
)
3199 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3202 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3203 EEPROM_ADDR_DEVID_MASK
|
3205 tw32(GRC_EEPROM_ADDR
,
3207 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3208 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3209 EEPROM_ADDR_ADDR_MASK
) |
3210 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3212 for (i
= 0; i
< 1000; i
++) {
3213 tmp
= tr32(GRC_EEPROM_ADDR
);
3215 if (tmp
& EEPROM_ADDR_COMPLETE
)
3219 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3222 tmp
= tr32(GRC_EEPROM_DATA
);
3225 * The data will always be opposite the native endian
3226 * format. Perform a blind byteswap to compensate.
3233 #define NVRAM_CMD_TIMEOUT 10000
3235 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3239 tw32(NVRAM_CMD
, nvram_cmd
);
3240 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3242 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3248 if (i
== NVRAM_CMD_TIMEOUT
)
3254 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3256 if (tg3_flag(tp
, NVRAM
) &&
3257 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3258 tg3_flag(tp
, FLASH
) &&
3259 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3260 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3262 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3263 ATMEL_AT45DB0X1B_PAGE_POS
) +
3264 (addr
% tp
->nvram_pagesize
);
3269 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3271 if (tg3_flag(tp
, NVRAM
) &&
3272 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3273 tg3_flag(tp
, FLASH
) &&
3274 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3275 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3277 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3278 tp
->nvram_pagesize
) +
3279 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3284 /* NOTE: Data read in from NVRAM is byteswapped according to
3285 * the byteswapping settings for all other register accesses.
3286 * tg3 devices are BE devices, so on a BE machine, the data
3287 * returned will be exactly as it is seen in NVRAM. On a LE
3288 * machine, the 32-bit value will be byteswapped.
3290 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3294 if (!tg3_flag(tp
, NVRAM
))
3295 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3297 offset
= tg3_nvram_phys_addr(tp
, offset
);
3299 if (offset
> NVRAM_ADDR_MSK
)
3302 ret
= tg3_nvram_lock(tp
);
3306 tg3_enable_nvram_access(tp
);
3308 tw32(NVRAM_ADDR
, offset
);
3309 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3310 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3313 *val
= tr32(NVRAM_RDDATA
);
3315 tg3_disable_nvram_access(tp
);
3317 tg3_nvram_unlock(tp
);
3322 /* Ensures NVRAM data is in bytestream format. */
3323 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3326 int res
= tg3_nvram_read(tp
, offset
, &v
);
3328 *val
= cpu_to_be32(v
);
3332 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3333 u32 offset
, u32 len
, u8
*buf
)
3338 for (i
= 0; i
< len
; i
+= 4) {
3344 memcpy(&data
, buf
+ i
, 4);
3347 * The SEEPROM interface expects the data to always be opposite
3348 * the native endian format. We accomplish this by reversing
3349 * all the operations that would have been performed on the
3350 * data from a call to tg3_nvram_read_be32().
3352 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3354 val
= tr32(GRC_EEPROM_ADDR
);
3355 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3357 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3359 tw32(GRC_EEPROM_ADDR
, val
|
3360 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3361 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3365 for (j
= 0; j
< 1000; j
++) {
3366 val
= tr32(GRC_EEPROM_ADDR
);
3368 if (val
& EEPROM_ADDR_COMPLETE
)
3372 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3381 /* offset and length are dword aligned */
3382 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3386 u32 pagesize
= tp
->nvram_pagesize
;
3387 u32 pagemask
= pagesize
- 1;
3391 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3397 u32 phy_addr
, page_off
, size
;
3399 phy_addr
= offset
& ~pagemask
;
3401 for (j
= 0; j
< pagesize
; j
+= 4) {
3402 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3403 (__be32
*) (tmp
+ j
));
3410 page_off
= offset
& pagemask
;
3417 memcpy(tmp
+ page_off
, buf
, size
);
3419 offset
= offset
+ (pagesize
- page_off
);
3421 tg3_enable_nvram_access(tp
);
3424 * Before we can erase the flash page, we need
3425 * to issue a special "write enable" command.
3427 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3429 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3432 /* Erase the target page */
3433 tw32(NVRAM_ADDR
, phy_addr
);
3435 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3436 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3438 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3441 /* Issue another write enable to start the write. */
3442 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3444 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3447 for (j
= 0; j
< pagesize
; j
+= 4) {
3450 data
= *((__be32
*) (tmp
+ j
));
3452 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3454 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3456 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3460 nvram_cmd
|= NVRAM_CMD_FIRST
;
3461 else if (j
== (pagesize
- 4))
3462 nvram_cmd
|= NVRAM_CMD_LAST
;
3464 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3472 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3473 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3480 /* offset and length are dword aligned */
3481 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3486 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3487 u32 page_off
, phy_addr
, nvram_cmd
;
3490 memcpy(&data
, buf
+ i
, 4);
3491 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3493 page_off
= offset
% tp
->nvram_pagesize
;
3495 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3497 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3499 if (page_off
== 0 || i
== 0)
3500 nvram_cmd
|= NVRAM_CMD_FIRST
;
3501 if (page_off
== (tp
->nvram_pagesize
- 4))
3502 nvram_cmd
|= NVRAM_CMD_LAST
;
3505 nvram_cmd
|= NVRAM_CMD_LAST
;
3507 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3508 !tg3_flag(tp
, FLASH
) ||
3509 !tg3_flag(tp
, 57765_PLUS
))
3510 tw32(NVRAM_ADDR
, phy_addr
);
3512 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3513 !tg3_flag(tp
, 5755_PLUS
) &&
3514 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3515 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3518 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3519 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3523 if (!tg3_flag(tp
, FLASH
)) {
3524 /* We always do complete word writes to eeprom. */
3525 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3528 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3535 /* offset and length are dword aligned */
3536 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3540 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3541 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3542 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3546 if (!tg3_flag(tp
, NVRAM
)) {
3547 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3551 ret
= tg3_nvram_lock(tp
);
3555 tg3_enable_nvram_access(tp
);
3556 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3557 tw32(NVRAM_WRITE1
, 0x406);
3559 grc_mode
= tr32(GRC_MODE
);
3560 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3562 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3563 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3566 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3570 grc_mode
= tr32(GRC_MODE
);
3571 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3573 tg3_disable_nvram_access(tp
);
3574 tg3_nvram_unlock(tp
);
3577 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3578 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3585 #define RX_CPU_SCRATCH_BASE 0x30000
3586 #define RX_CPU_SCRATCH_SIZE 0x04000
3587 #define TX_CPU_SCRATCH_BASE 0x34000
3588 #define TX_CPU_SCRATCH_SIZE 0x04000
3590 /* tp->lock is held. */
3591 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3594 const int iters
= 10000;
3596 for (i
= 0; i
< iters
; i
++) {
3597 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3598 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3599 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3601 if (pci_channel_offline(tp
->pdev
))
3605 return (i
== iters
) ? -EBUSY
: 0;
3608 /* tp->lock is held. */
3609 static int tg3_rxcpu_pause(struct tg3
*tp
)
3611 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3613 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3614 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3620 /* tp->lock is held. */
3621 static int tg3_txcpu_pause(struct tg3
*tp
)
3623 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3626 /* tp->lock is held. */
3627 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3629 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3630 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3633 /* tp->lock is held. */
3634 static void tg3_rxcpu_resume(struct tg3
*tp
)
3636 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3639 /* tp->lock is held. */
3640 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3644 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3646 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3647 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3649 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3652 if (cpu_base
== RX_CPU_BASE
) {
3653 rc
= tg3_rxcpu_pause(tp
);
3656 * There is only an Rx CPU for the 5750 derivative in the
3659 if (tg3_flag(tp
, IS_SSB_CORE
))
3662 rc
= tg3_txcpu_pause(tp
);
3666 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3667 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3671 /* Clear firmware's nvram arbitration. */
3672 if (tg3_flag(tp
, NVRAM
))
3673 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3677 static int tg3_fw_data_len(struct tg3
*tp
,
3678 const struct tg3_firmware_hdr
*fw_hdr
)
3682 /* Non fragmented firmware have one firmware header followed by a
3683 * contiguous chunk of data to be written. The length field in that
3684 * header is not the length of data to be written but the complete
3685 * length of the bss. The data length is determined based on
3686 * tp->fw->size minus headers.
3688 * Fragmented firmware have a main header followed by multiple
3689 * fragments. Each fragment is identical to non fragmented firmware
3690 * with a firmware header followed by a contiguous chunk of data. In
3691 * the main header, the length field is unused and set to 0xffffffff.
3692 * In each fragment header the length is the entire size of that
3693 * fragment i.e. fragment data + header length. Data length is
3694 * therefore length field in the header minus TG3_FW_HDR_LEN.
3696 if (tp
->fw_len
== 0xffffffff)
3697 fw_len
= be32_to_cpu(fw_hdr
->len
);
3699 fw_len
= tp
->fw
->size
;
3701 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3704 /* tp->lock is held. */
3705 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3706 u32 cpu_scratch_base
, int cpu_scratch_size
,
3707 const struct tg3_firmware_hdr
*fw_hdr
)
3710 void (*write_op
)(struct tg3
*, u32
, u32
);
3711 int total_len
= tp
->fw
->size
;
3713 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3715 "%s: Trying to load TX cpu firmware which is 5705\n",
3720 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3721 write_op
= tg3_write_mem
;
3723 write_op
= tg3_write_indirect_reg32
;
3725 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3726 /* It is possible that bootcode is still loading at this point.
3727 * Get the nvram lock first before halting the cpu.
3729 int lock_err
= tg3_nvram_lock(tp
);
3730 err
= tg3_halt_cpu(tp
, cpu_base
);
3732 tg3_nvram_unlock(tp
);
3736 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3737 write_op(tp
, cpu_scratch_base
+ i
, 0);
3738 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3739 tw32(cpu_base
+ CPU_MODE
,
3740 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3742 /* Subtract additional main header for fragmented firmware and
3743 * advance to the first fragment
3745 total_len
-= TG3_FW_HDR_LEN
;
3750 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3751 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3752 write_op(tp
, cpu_scratch_base
+
3753 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3755 be32_to_cpu(fw_data
[i
]));
3757 total_len
-= be32_to_cpu(fw_hdr
->len
);
3759 /* Advance to next fragment */
3760 fw_hdr
= (struct tg3_firmware_hdr
*)
3761 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3762 } while (total_len
> 0);
3770 /* tp->lock is held. */
3771 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3774 const int iters
= 5;
3776 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3777 tw32_f(cpu_base
+ CPU_PC
, pc
);
3779 for (i
= 0; i
< iters
; i
++) {
3780 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3782 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3783 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3784 tw32_f(cpu_base
+ CPU_PC
, pc
);
3788 return (i
== iters
) ? -EBUSY
: 0;
3791 /* tp->lock is held. */
3792 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3794 const struct tg3_firmware_hdr
*fw_hdr
;
3797 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3799 /* Firmware blob starts with version numbers, followed by
3800 start address and length. We are setting complete length.
3801 length = end_address_of_bss - start_address_of_text.
3802 Remainder is the blob to be loaded contiguously
3803 from start address. */
3805 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3806 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3811 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3812 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3817 /* Now startup only the RX cpu. */
3818 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3819 be32_to_cpu(fw_hdr
->base_addr
));
3821 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3822 "should be %08x\n", __func__
,
3823 tr32(RX_CPU_BASE
+ CPU_PC
),
3824 be32_to_cpu(fw_hdr
->base_addr
));
3828 tg3_rxcpu_resume(tp
);
3833 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3835 const int iters
= 1000;
3839 /* Wait for boot code to complete initialization and enter service
3840 * loop. It is then safe to download service patches
3842 for (i
= 0; i
< iters
; i
++) {
3843 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3850 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3854 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3856 netdev_warn(tp
->dev
,
3857 "Other patches exist. Not downloading EEE patch\n");
3864 /* tp->lock is held. */
3865 static void tg3_load_57766_firmware(struct tg3
*tp
)
3867 struct tg3_firmware_hdr
*fw_hdr
;
3869 if (!tg3_flag(tp
, NO_NVRAM
))
3872 if (tg3_validate_rxcpu_state(tp
))
3878 /* This firmware blob has a different format than older firmware
3879 * releases as given below. The main difference is we have fragmented
3880 * data to be written to non-contiguous locations.
3882 * In the beginning we have a firmware header identical to other
3883 * firmware which consists of version, base addr and length. The length
3884 * here is unused and set to 0xffffffff.
3886 * This is followed by a series of firmware fragments which are
3887 * individually identical to previous firmware. i.e. they have the
3888 * firmware header and followed by data for that fragment. The version
3889 * field of the individual fragment header is unused.
3892 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3893 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3896 if (tg3_rxcpu_pause(tp
))
3899 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3900 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3902 tg3_rxcpu_resume(tp
);
3905 /* tp->lock is held. */
3906 static int tg3_load_tso_firmware(struct tg3
*tp
)
3908 const struct tg3_firmware_hdr
*fw_hdr
;
3909 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3912 if (!tg3_flag(tp
, FW_TSO
))
3915 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3917 /* Firmware blob starts with version numbers, followed by
3918 start address and length. We are setting complete length.
3919 length = end_address_of_bss - start_address_of_text.
3920 Remainder is the blob to be loaded contiguously
3921 from start address. */
3923 cpu_scratch_size
= tp
->fw_len
;
3925 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3926 cpu_base
= RX_CPU_BASE
;
3927 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3929 cpu_base
= TX_CPU_BASE
;
3930 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3931 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3934 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3935 cpu_scratch_base
, cpu_scratch_size
,
3940 /* Now startup the cpu. */
3941 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3942 be32_to_cpu(fw_hdr
->base_addr
));
3945 "%s fails to set CPU PC, is %08x should be %08x\n",
3946 __func__
, tr32(cpu_base
+ CPU_PC
),
3947 be32_to_cpu(fw_hdr
->base_addr
));
3951 tg3_resume_cpu(tp
, cpu_base
);
3955 /* tp->lock is held. */
3956 static void __tg3_set_one_mac_addr(struct tg3
*tp
, u8
*mac_addr
, int index
)
3958 u32 addr_high
, addr_low
;
3960 addr_high
= ((mac_addr
[0] << 8) | mac_addr
[1]);
3961 addr_low
= ((mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
3962 (mac_addr
[4] << 8) | mac_addr
[5]);
3965 tw32(MAC_ADDR_0_HIGH
+ (index
* 8), addr_high
);
3966 tw32(MAC_ADDR_0_LOW
+ (index
* 8), addr_low
);
3969 tw32(MAC_EXTADDR_0_HIGH
+ (index
* 8), addr_high
);
3970 tw32(MAC_EXTADDR_0_LOW
+ (index
* 8), addr_low
);
3974 /* tp->lock is held. */
3975 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3980 for (i
= 0; i
< 4; i
++) {
3981 if (i
== 1 && skip_mac_1
)
3983 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3986 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3987 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3988 for (i
= 4; i
< 16; i
++)
3989 __tg3_set_one_mac_addr(tp
, tp
->dev
->dev_addr
, i
);
3992 addr_high
= (tp
->dev
->dev_addr
[0] +
3993 tp
->dev
->dev_addr
[1] +
3994 tp
->dev
->dev_addr
[2] +
3995 tp
->dev
->dev_addr
[3] +
3996 tp
->dev
->dev_addr
[4] +
3997 tp
->dev
->dev_addr
[5]) &
3998 TX_BACKOFF_SEED_MASK
;
3999 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
4002 static void tg3_enable_register_access(struct tg3
*tp
)
4005 * Make sure register accesses (indirect or otherwise) will function
4008 pci_write_config_dword(tp
->pdev
,
4009 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
4012 static int tg3_power_up(struct tg3
*tp
)
4016 tg3_enable_register_access(tp
);
4018 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4020 /* Switch out of Vaux if it is a NIC */
4021 tg3_pwrsrc_switch_to_vmain(tp
);
4023 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4029 static int tg3_setup_phy(struct tg3
*, bool);
4031 static int tg3_power_down_prepare(struct tg3
*tp
)
4034 bool device_should_wake
, do_low_power
;
4036 tg3_enable_register_access(tp
);
4038 /* Restore the CLKREQ setting. */
4039 if (tg3_flag(tp
, CLKREQ_BUG
))
4040 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4041 PCI_EXP_LNKCTL_CLKREQ_EN
);
4043 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4044 tw32(TG3PCI_MISC_HOST_CTRL
,
4045 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4047 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4048 tg3_flag(tp
, WOL_ENABLE
);
4050 if (tg3_flag(tp
, USE_PHYLIB
)) {
4051 do_low_power
= false;
4052 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4053 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4054 struct phy_device
*phydev
;
4055 u32 phyid
, advertising
;
4057 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
4059 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4061 tp
->link_config
.speed
= phydev
->speed
;
4062 tp
->link_config
.duplex
= phydev
->duplex
;
4063 tp
->link_config
.autoneg
= phydev
->autoneg
;
4064 tp
->link_config
.advertising
= phydev
->advertising
;
4066 advertising
= ADVERTISED_TP
|
4068 ADVERTISED_Autoneg
|
4069 ADVERTISED_10baseT_Half
;
4071 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4072 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4074 ADVERTISED_100baseT_Half
|
4075 ADVERTISED_100baseT_Full
|
4076 ADVERTISED_10baseT_Full
;
4078 advertising
|= ADVERTISED_10baseT_Full
;
4081 phydev
->advertising
= advertising
;
4083 phy_start_aneg(phydev
);
4085 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4086 if (phyid
!= PHY_ID_BCMAC131
) {
4087 phyid
&= PHY_BCM_OUI_MASK
;
4088 if (phyid
== PHY_BCM_OUI_1
||
4089 phyid
== PHY_BCM_OUI_2
||
4090 phyid
== PHY_BCM_OUI_3
)
4091 do_low_power
= true;
4095 do_low_power
= true;
4097 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4098 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4100 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4101 tg3_setup_phy(tp
, false);
4104 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4107 val
= tr32(GRC_VCPU_EXT_CTRL
);
4108 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4109 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4113 for (i
= 0; i
< 200; i
++) {
4114 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4115 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4120 if (tg3_flag(tp
, WOL_CAP
))
4121 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4122 WOL_DRV_STATE_SHUTDOWN
|
4126 if (device_should_wake
) {
4129 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4131 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4132 tg3_phy_auxctl_write(tp
,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4140 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4141 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4142 else if (tp
->phy_flags
&
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4144 if (tp
->link_config
.active_speed
== SPEED_1000
)
4145 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4147 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4149 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4151 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4152 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4153 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4154 SPEED_100
: SPEED_10
;
4155 if (tg3_5700_link_polarity(tp
, speed
))
4156 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4158 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4161 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4164 if (!tg3_flag(tp
, 5750_PLUS
))
4165 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4167 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4168 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4169 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4170 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4172 if (tg3_flag(tp
, ENABLE_APE
))
4173 mac_mode
|= MAC_MODE_APE_TX_EN
|
4174 MAC_MODE_APE_RX_EN
|
4175 MAC_MODE_TDE_ENABLE
;
4177 tw32_f(MAC_MODE
, mac_mode
);
4180 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4184 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4185 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4186 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4189 base_val
= tp
->pci_clock_ctrl
;
4190 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4191 CLOCK_CTRL_TXCLK_DISABLE
);
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4194 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4195 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4196 tg3_flag(tp
, CPMU_PRESENT
) ||
4197 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4199 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4200 u32 newbits1
, newbits2
;
4202 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4203 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4204 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4205 CLOCK_CTRL_TXCLK_DISABLE
|
4207 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4208 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4209 newbits1
= CLOCK_CTRL_625_CORE
;
4210 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4212 newbits1
= CLOCK_CTRL_ALTCLK
;
4213 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4222 if (!tg3_flag(tp
, 5705_PLUS
)) {
4225 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4226 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4227 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4228 CLOCK_CTRL_TXCLK_DISABLE
|
4229 CLOCK_CTRL_44MHZ_CORE
);
4231 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4235 tp
->pci_clock_ctrl
| newbits3
, 40);
4239 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4240 tg3_power_down_phy(tp
, do_low_power
);
4242 tg3_frob_aux_power(tp
, true);
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4246 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4247 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4248 u32 val
= tr32(0x7d00);
4250 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4255 err
= tg3_nvram_lock(tp
);
4256 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4258 tg3_nvram_unlock(tp
);
4262 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4264 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4269 static void tg3_power_down(struct tg3
*tp
)
4271 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4272 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4277 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4278 case MII_TG3_AUX_STAT_10HALF
:
4280 *duplex
= DUPLEX_HALF
;
4283 case MII_TG3_AUX_STAT_10FULL
:
4285 *duplex
= DUPLEX_FULL
;
4288 case MII_TG3_AUX_STAT_100HALF
:
4290 *duplex
= DUPLEX_HALF
;
4293 case MII_TG3_AUX_STAT_100FULL
:
4295 *duplex
= DUPLEX_FULL
;
4298 case MII_TG3_AUX_STAT_1000HALF
:
4299 *speed
= SPEED_1000
;
4300 *duplex
= DUPLEX_HALF
;
4303 case MII_TG3_AUX_STAT_1000FULL
:
4304 *speed
= SPEED_1000
;
4305 *duplex
= DUPLEX_FULL
;
4309 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4310 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4312 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4316 *speed
= SPEED_UNKNOWN
;
4317 *duplex
= DUPLEX_UNKNOWN
;
4322 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4327 new_adv
= ADVERTISE_CSMA
;
4328 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4329 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4331 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4335 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4336 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4338 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4339 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4340 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4342 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4347 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4350 tw32(TG3_CPMU_EEE_MODE
,
4351 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4353 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise
& ADVERTISED_100baseT_Full
)
4360 val
|= MDIO_AN_EEE_ADV_100TX
;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise
& ADVERTISED_1000baseT_Full
)
4363 val
|= MDIO_AN_EEE_ADV_1000T
;
4365 if (!tp
->eee
.eee_enabled
) {
4367 tp
->eee
.advertised
= 0;
4369 tp
->eee
.advertised
= advertise
&
4370 (ADVERTISED_100baseT_Full
|
4371 ADVERTISED_1000baseT_Full
);
4374 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4378 switch (tg3_asic_rev(tp
)) {
4380 case ASIC_REV_57765
:
4381 case ASIC_REV_57766
:
4383 /* If we advertised any eee advertisements above... */
4385 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4386 MII_TG3_DSP_TAP26_RMRXSTO
|
4387 MII_TG3_DSP_TAP26_OPCSINPT
;
4388 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4392 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4393 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4394 MII_TG3_DSP_CH34TP2_HIBW01
);
4397 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4406 static void tg3_phy_copper_begin(struct tg3
*tp
)
4408 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4409 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4412 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4413 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4414 adv
= ADVERTISED_10baseT_Half
|
4415 ADVERTISED_10baseT_Full
;
4416 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4417 adv
|= ADVERTISED_100baseT_Half
|
4418 ADVERTISED_100baseT_Full
;
4419 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4420 if (!(tp
->phy_flags
&
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4422 adv
|= ADVERTISED_1000baseT_Half
;
4423 adv
|= ADVERTISED_1000baseT_Full
;
4426 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4428 adv
= tp
->link_config
.advertising
;
4429 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4430 adv
&= ~(ADVERTISED_1000baseT_Half
|
4431 ADVERTISED_1000baseT_Full
);
4433 fc
= tp
->link_config
.flowctrl
;
4436 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4438 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4439 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4447 tg3_writephy(tp
, MII_BMCR
,
4448 BMCR_ANENABLE
| BMCR_ANRESTART
);
4451 u32 bmcr
, orig_bmcr
;
4453 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4454 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4456 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4461 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4465 switch (tp
->link_config
.speed
) {
4471 bmcr
|= BMCR_SPEED100
;
4475 bmcr
|= BMCR_SPEED1000
;
4479 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4480 bmcr
|= BMCR_FULLDPLX
;
4482 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4483 (bmcr
!= orig_bmcr
)) {
4484 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4485 for (i
= 0; i
< 1500; i
++) {
4489 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4490 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4492 if (!(tmp
& BMSR_LSTATUS
)) {
4497 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4503 static int tg3_phy_pull_config(struct tg3
*tp
)
4508 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4512 if (!(val
& BMCR_ANENABLE
)) {
4513 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4514 tp
->link_config
.advertising
= 0;
4515 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4519 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4521 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4524 tp
->link_config
.speed
= SPEED_10
;
4527 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4530 tp
->link_config
.speed
= SPEED_100
;
4532 case BMCR_SPEED1000
:
4533 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4534 tp
->link_config
.speed
= SPEED_1000
;
4542 if (val
& BMCR_FULLDPLX
)
4543 tp
->link_config
.duplex
= DUPLEX_FULL
;
4545 tp
->link_config
.duplex
= DUPLEX_HALF
;
4547 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4553 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4554 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4555 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4557 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4560 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4564 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4565 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4567 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4569 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4572 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4575 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4576 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4580 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4582 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4586 adv
= tg3_decode_flowctrl_1000X(val
);
4587 tp
->link_config
.flowctrl
= adv
;
4589 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4590 adv
= mii_adv_to_ethtool_adv_x(val
);
4593 tp
->link_config
.advertising
|= adv
;
4600 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4608 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4609 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4610 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4611 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4612 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4619 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4621 struct ethtool_eee eee
;
4623 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4626 tg3_eee_pull_config(tp
, &eee
);
4628 if (tp
->eee
.eee_enabled
) {
4629 if (tp
->eee
.advertised
!= eee
.advertised
||
4630 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4631 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4634 /* EEE is disabled but we're advertising */
4642 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4644 u32 advmsk
, tgtadv
, advertising
;
4646 advertising
= tp
->link_config
.advertising
;
4647 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4649 advmsk
= ADVERTISE_ALL
;
4650 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4651 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4652 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4655 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4658 if ((*lcladv
& advmsk
) != tgtadv
)
4661 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4664 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4666 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4670 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4671 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4672 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4673 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4674 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4676 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4679 if (tg3_ctrl
!= tgtadv
)
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4690 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4693 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4696 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4699 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4702 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4703 tp
->link_config
.rmt_adv
= lpeth
;
4708 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4710 if (curr_link_up
!= tp
->link_up
) {
4712 netif_carrier_on(tp
->dev
);
4714 netif_carrier_off(tp
->dev
);
4715 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4716 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4719 tg3_link_report(tp
);
4726 static void tg3_clear_mac_status(struct tg3
*tp
)
4731 MAC_STATUS_SYNC_CHANGED
|
4732 MAC_STATUS_CFG_CHANGED
|
4733 MAC_STATUS_MI_COMPLETION
|
4734 MAC_STATUS_LNKSTATE_CHANGED
);
4738 static void tg3_setup_eee(struct tg3
*tp
)
4742 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4744 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4745 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4749 tw32_f(TG3_CPMU_EEE_CTRL
,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4752 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4753 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX
|
4755 TG3_CPMU_EEEMD_EEE_ENABLE
;
4757 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4758 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4760 if (tg3_flag(tp
, ENABLE_APE
))
4761 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4763 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4765 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4767 (tp
->eee
.tx_lpi_timer
& 0xffff));
4769 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4774 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4776 bool current_link_up
;
4778 u32 lcl_adv
, rmt_adv
;
4783 tg3_clear_mac_status(tp
);
4785 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4787 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4791 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4793 /* Some third-party PHYs need to be reset on link going
4796 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4797 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4798 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4800 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4801 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4802 !(bmsr
& BMSR_LSTATUS
))
4808 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4809 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4810 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4811 !tg3_flag(tp
, INIT_COMPLETE
))
4814 if (!(bmsr
& BMSR_LSTATUS
)) {
4815 err
= tg3_init_5401phy_dsp(tp
);
4819 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4820 for (i
= 0; i
< 1000; i
++) {
4822 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4823 (bmsr
& BMSR_LSTATUS
)) {
4829 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4830 TG3_PHY_REV_BCM5401_B0
&&
4831 !(bmsr
& BMSR_LSTATUS
) &&
4832 tp
->link_config
.active_speed
== SPEED_1000
) {
4833 err
= tg3_phy_reset(tp
);
4835 err
= tg3_init_5401phy_dsp(tp
);
4840 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4841 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp
, 0x15, 0x0a75);
4844 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4845 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4846 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4851 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4853 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4854 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4855 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4856 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4858 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4859 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4860 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4861 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4864 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4867 current_link_up
= false;
4868 current_speed
= SPEED_UNKNOWN
;
4869 current_duplex
= DUPLEX_UNKNOWN
;
4870 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4871 tp
->link_config
.rmt_adv
= 0;
4873 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4874 err
= tg3_phy_auxctl_read(tp
,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4877 if (!err
&& !(val
& (1 << 10))) {
4878 tg3_phy_auxctl_write(tp
,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4886 for (i
= 0; i
< 100; i
++) {
4887 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4888 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4889 (bmsr
& BMSR_LSTATUS
))
4894 if (bmsr
& BMSR_LSTATUS
) {
4897 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4898 for (i
= 0; i
< 2000; i
++) {
4900 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4905 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4910 for (i
= 0; i
< 200; i
++) {
4911 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4912 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4914 if (bmcr
&& bmcr
!= 0x7fff)
4922 tp
->link_config
.active_speed
= current_speed
;
4923 tp
->link_config
.active_duplex
= current_duplex
;
4925 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4926 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4928 if ((bmcr
& BMCR_ANENABLE
) &&
4930 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4931 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4932 current_link_up
= true;
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4938 if (!eee_config_ok
&&
4939 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4945 if (!(bmcr
& BMCR_ANENABLE
) &&
4946 tp
->link_config
.speed
== current_speed
&&
4947 tp
->link_config
.duplex
== current_duplex
) {
4948 current_link_up
= true;
4952 if (current_link_up
&&
4953 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4956 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4957 reg
= MII_TG3_FET_GEN_STAT
;
4958 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4960 reg
= MII_TG3_EXT_STAT
;
4961 bit
= MII_TG3_EXT_STAT_MDIX
;
4964 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4965 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4967 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4972 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4973 tg3_phy_copper_begin(tp
);
4975 if (tg3_flag(tp
, ROBOSWITCH
)) {
4976 current_link_up
= true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed
= SPEED_1000
;
4979 current_duplex
= DUPLEX_FULL
;
4980 tp
->link_config
.active_speed
= current_speed
;
4981 tp
->link_config
.active_duplex
= current_duplex
;
4984 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4985 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4986 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4987 current_link_up
= true;
4990 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4991 if (current_link_up
) {
4992 if (tp
->link_config
.active_speed
== SPEED_100
||
4993 tp
->link_config
.active_speed
== SPEED_10
)
4994 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4996 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4997 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4998 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5000 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5005 if (tg3_flag(tp
, RGMII_MODE
)) {
5006 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
5007 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
5009 if (tp
->link_config
.active_speed
== SPEED_10
)
5010 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
5011 else if (tp
->link_config
.active_speed
== SPEED_100
)
5012 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5013 LED_CTRL_100MBPS_ON
);
5014 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5015 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5016 LED_CTRL_1000MBPS_ON
);
5018 tw32(MAC_LED_CTRL
, led_ctrl
);
5022 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5023 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5024 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5026 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5027 if (current_link_up
&&
5028 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5029 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5031 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5037 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5038 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5039 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5040 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5044 tw32_f(MAC_MODE
, tp
->mac_mode
);
5047 tg3_phy_eee_adjust(tp
, current_link_up
);
5049 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT
, 0);
5053 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5057 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5059 tp
->link_config
.active_speed
== SPEED_1000
&&
5060 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5063 (MAC_STATUS_SYNC_CHANGED
|
5064 MAC_STATUS_CFG_CHANGED
));
5067 NIC_SRAM_FIRMWARE_MBOX
,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5073 if (tp
->link_config
.active_speed
== SPEED_100
||
5074 tp
->link_config
.active_speed
== SPEED_10
)
5075 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5076 PCI_EXP_LNKCTL_CLKREQ_EN
);
5078 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5079 PCI_EXP_LNKCTL_CLKREQ_EN
);
5082 tg3_test_and_report_link_chg(tp
, current_link_up
);
5087 struct tg3_fiber_aneginfo
{
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5123 #define MR_LINK_OK 0x80000000
5125 unsigned long link_time
, cur_time
;
5127 u32 ability_match_cfg
;
5128 int ability_match_count
;
5130 char ability_match
, idle_match
, ack_match
;
5132 u32 txconfig
, rxconfig
;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5149 #define ANEG_STATE_SETTLE_TIME 10000
5151 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5152 struct tg3_fiber_aneginfo
*ap
)
5155 unsigned long delta
;
5159 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5163 ap
->ability_match_cfg
= 0;
5164 ap
->ability_match_count
= 0;
5165 ap
->ability_match
= 0;
5171 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5172 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5174 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5175 ap
->ability_match_cfg
= rx_cfg_reg
;
5176 ap
->ability_match
= 0;
5177 ap
->ability_match_count
= 0;
5179 if (++ap
->ability_match_count
> 1) {
5180 ap
->ability_match
= 1;
5181 ap
->ability_match_cfg
= rx_cfg_reg
;
5184 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5192 ap
->ability_match_cfg
= 0;
5193 ap
->ability_match_count
= 0;
5194 ap
->ability_match
= 0;
5200 ap
->rxconfig
= rx_cfg_reg
;
5203 switch (ap
->state
) {
5204 case ANEG_STATE_UNKNOWN
:
5205 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5206 ap
->state
= ANEG_STATE_AN_ENABLE
;
5209 case ANEG_STATE_AN_ENABLE
:
5210 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5211 if (ap
->flags
& MR_AN_ENABLE
) {
5214 ap
->ability_match_cfg
= 0;
5215 ap
->ability_match_count
= 0;
5216 ap
->ability_match
= 0;
5220 ap
->state
= ANEG_STATE_RESTART_INIT
;
5222 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5226 case ANEG_STATE_RESTART_INIT
:
5227 ap
->link_time
= ap
->cur_time
;
5228 ap
->flags
&= ~(MR_NP_LOADED
);
5230 tw32(MAC_TX_AUTO_NEG
, 0);
5231 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5232 tw32_f(MAC_MODE
, tp
->mac_mode
);
5235 ret
= ANEG_TIMER_ENAB
;
5236 ap
->state
= ANEG_STATE_RESTART
;
5239 case ANEG_STATE_RESTART
:
5240 delta
= ap
->cur_time
- ap
->link_time
;
5241 if (delta
> ANEG_STATE_SETTLE_TIME
)
5242 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5244 ret
= ANEG_TIMER_ENAB
;
5247 case ANEG_STATE_DISABLE_LINK_OK
:
5251 case ANEG_STATE_ABILITY_DETECT_INIT
:
5252 ap
->flags
&= ~(MR_TOGGLE_TX
);
5253 ap
->txconfig
= ANEG_CFG_FD
;
5254 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5255 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5256 ap
->txconfig
|= ANEG_CFG_PS1
;
5257 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5258 ap
->txconfig
|= ANEG_CFG_PS2
;
5259 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5260 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5261 tw32_f(MAC_MODE
, tp
->mac_mode
);
5264 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5267 case ANEG_STATE_ABILITY_DETECT
:
5268 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5269 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5272 case ANEG_STATE_ACK_DETECT_INIT
:
5273 ap
->txconfig
|= ANEG_CFG_ACK
;
5274 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5275 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5276 tw32_f(MAC_MODE
, tp
->mac_mode
);
5279 ap
->state
= ANEG_STATE_ACK_DETECT
;
5282 case ANEG_STATE_ACK_DETECT
:
5283 if (ap
->ack_match
!= 0) {
5284 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5285 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5286 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5288 ap
->state
= ANEG_STATE_AN_ENABLE
;
5290 } else if (ap
->ability_match
!= 0 &&
5291 ap
->rxconfig
== 0) {
5292 ap
->state
= ANEG_STATE_AN_ENABLE
;
5296 case ANEG_STATE_COMPLETE_ACK_INIT
:
5297 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5301 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5302 MR_LP_ADV_HALF_DUPLEX
|
5303 MR_LP_ADV_SYM_PAUSE
|
5304 MR_LP_ADV_ASYM_PAUSE
|
5305 MR_LP_ADV_REMOTE_FAULT1
|
5306 MR_LP_ADV_REMOTE_FAULT2
|
5307 MR_LP_ADV_NEXT_PAGE
|
5310 if (ap
->rxconfig
& ANEG_CFG_FD
)
5311 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5312 if (ap
->rxconfig
& ANEG_CFG_HD
)
5313 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5314 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5315 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5316 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5317 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5318 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5319 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5320 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5321 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5322 if (ap
->rxconfig
& ANEG_CFG_NP
)
5323 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5325 ap
->link_time
= ap
->cur_time
;
5327 ap
->flags
^= (MR_TOGGLE_TX
);
5328 if (ap
->rxconfig
& 0x0008)
5329 ap
->flags
|= MR_TOGGLE_RX
;
5330 if (ap
->rxconfig
& ANEG_CFG_NP
)
5331 ap
->flags
|= MR_NP_RX
;
5332 ap
->flags
|= MR_PAGE_RX
;
5334 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5335 ret
= ANEG_TIMER_ENAB
;
5338 case ANEG_STATE_COMPLETE_ACK
:
5339 if (ap
->ability_match
!= 0 &&
5340 ap
->rxconfig
== 0) {
5341 ap
->state
= ANEG_STATE_AN_ENABLE
;
5344 delta
= ap
->cur_time
- ap
->link_time
;
5345 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5346 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5347 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5349 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5350 !(ap
->flags
& MR_NP_RX
)) {
5351 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5359 case ANEG_STATE_IDLE_DETECT_INIT
:
5360 ap
->link_time
= ap
->cur_time
;
5361 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5362 tw32_f(MAC_MODE
, tp
->mac_mode
);
5365 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5366 ret
= ANEG_TIMER_ENAB
;
5369 case ANEG_STATE_IDLE_DETECT
:
5370 if (ap
->ability_match
!= 0 &&
5371 ap
->rxconfig
== 0) {
5372 ap
->state
= ANEG_STATE_AN_ENABLE
;
5375 delta
= ap
->cur_time
- ap
->link_time
;
5376 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap
->state
= ANEG_STATE_LINK_OK
;
5382 case ANEG_STATE_LINK_OK
:
5383 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5388 /* ??? unimplemented */
5391 case ANEG_STATE_NEXT_PAGE_WAIT
:
5392 /* ??? unimplemented */
5403 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5406 struct tg3_fiber_aneginfo aninfo
;
5407 int status
= ANEG_FAILED
;
5411 tw32_f(MAC_TX_AUTO_NEG
, 0);
5413 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5414 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5417 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5420 memset(&aninfo
, 0, sizeof(aninfo
));
5421 aninfo
.flags
|= MR_AN_ENABLE
;
5422 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5423 aninfo
.cur_time
= 0;
5425 while (++tick
< 195000) {
5426 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5427 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5433 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5434 tw32_f(MAC_MODE
, tp
->mac_mode
);
5437 *txflags
= aninfo
.txconfig
;
5438 *rxflags
= aninfo
.flags
;
5440 if (status
== ANEG_DONE
&&
5441 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5442 MR_LP_ADV_FULL_DUPLEX
)))
5448 static void tg3_init_bcm8002(struct tg3
*tp
)
5450 u32 mac_status
= tr32(MAC_STATUS
);
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5455 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp
, 0x16, 0x8007);
5462 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i
= 0; i
< 500; i
++)
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp
, 0x10, 0x8411);
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp
, 0x11, 0x0a10);
5475 tg3_writephy(tp
, 0x18, 0x00a0);
5476 tg3_writephy(tp
, 0x16, 0x41ff);
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp
, 0x13, 0x0400);
5481 tg3_writephy(tp
, 0x13, 0x0000);
5483 tg3_writephy(tp
, 0x11, 0x0a50);
5485 tg3_writephy(tp
, 0x11, 0x0a10);
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i
= 0; i
< 15000; i
++)
5492 /* Deselect the channel register so we can read the PHYID
5495 tg3_writephy(tp
, 0x10, 0x8011);
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5501 bool current_link_up
;
5502 u32 sg_dig_ctrl
, sg_dig_status
;
5503 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5504 int workaround
, port_a
;
5507 expected_sg_dig_ctrl
= 0;
5510 current_link_up
= false;
5512 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5513 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5515 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5518 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5519 /* preserve bits 20-23 for voltage regulator */
5520 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5523 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5525 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5526 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5528 u32 val
= serdes_cfg
;
5534 tw32_f(MAC_SERDES_CFG
, val
);
5537 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5539 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5540 tg3_setup_flow_control(tp
, 0, 0);
5541 current_link_up
= true;
5546 /* Want auto-negotiation. */
5547 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5549 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5550 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5551 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5552 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5553 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5555 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5556 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5557 tp
->serdes_counter
&&
5558 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5559 MAC_STATUS_RCVD_CFG
)) ==
5560 MAC_STATUS_PCS_SYNCED
)) {
5561 tp
->serdes_counter
--;
5562 current_link_up
= true;
5567 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5568 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5570 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5572 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5573 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5574 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5575 MAC_STATUS_SIGNAL_DET
)) {
5576 sg_dig_status
= tr32(SG_DIG_STATUS
);
5577 mac_status
= tr32(MAC_STATUS
);
5579 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5580 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5581 u32 local_adv
= 0, remote_adv
= 0;
5583 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5584 local_adv
|= ADVERTISE_1000XPAUSE
;
5585 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5586 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5588 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5589 remote_adv
|= LPA_1000XPAUSE
;
5590 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5591 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5593 tp
->link_config
.rmt_adv
=
5594 mii_adv_to_ethtool_adv_x(remote_adv
);
5596 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5597 current_link_up
= true;
5598 tp
->serdes_counter
= 0;
5599 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5600 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5601 if (tp
->serdes_counter
)
5602 tp
->serdes_counter
--;
5605 u32 val
= serdes_cfg
;
5612 tw32_f(MAC_SERDES_CFG
, val
);
5615 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5618 /* Link parallel detection - link is up */
5619 /* only if we have PCS_SYNC and not */
5620 /* receiving config code words */
5621 mac_status
= tr32(MAC_STATUS
);
5622 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5623 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5624 tg3_setup_flow_control(tp
, 0, 0);
5625 current_link_up
= true;
5627 TG3_PHYFLG_PARALLEL_DETECT
;
5628 tp
->serdes_counter
=
5629 SERDES_PARALLEL_DET_TIMEOUT
;
5631 goto restart_autoneg
;
5635 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5636 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5640 return current_link_up
;
5643 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5645 bool current_link_up
= false;
5647 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5650 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5651 u32 txflags
, rxflags
;
5654 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5655 u32 local_adv
= 0, remote_adv
= 0;
5657 if (txflags
& ANEG_CFG_PS1
)
5658 local_adv
|= ADVERTISE_1000XPAUSE
;
5659 if (txflags
& ANEG_CFG_PS2
)
5660 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5662 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5663 remote_adv
|= LPA_1000XPAUSE
;
5664 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5665 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5667 tp
->link_config
.rmt_adv
=
5668 mii_adv_to_ethtool_adv_x(remote_adv
);
5670 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5672 current_link_up
= true;
5674 for (i
= 0; i
< 30; i
++) {
5677 (MAC_STATUS_SYNC_CHANGED
|
5678 MAC_STATUS_CFG_CHANGED
));
5680 if ((tr32(MAC_STATUS
) &
5681 (MAC_STATUS_SYNC_CHANGED
|
5682 MAC_STATUS_CFG_CHANGED
)) == 0)
5686 mac_status
= tr32(MAC_STATUS
);
5687 if (!current_link_up
&&
5688 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5689 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5690 current_link_up
= true;
5692 tg3_setup_flow_control(tp
, 0, 0);
5694 /* Forcing 1000FD link up. */
5695 current_link_up
= true;
5697 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5700 tw32_f(MAC_MODE
, tp
->mac_mode
);
5705 return current_link_up
;
5708 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5711 u16 orig_active_speed
;
5712 u8 orig_active_duplex
;
5714 bool current_link_up
;
5717 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5718 orig_active_speed
= tp
->link_config
.active_speed
;
5719 orig_active_duplex
= tp
->link_config
.active_duplex
;
5721 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5723 tg3_flag(tp
, INIT_COMPLETE
)) {
5724 mac_status
= tr32(MAC_STATUS
);
5725 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5726 MAC_STATUS_SIGNAL_DET
|
5727 MAC_STATUS_CFG_CHANGED
|
5728 MAC_STATUS_RCVD_CFG
);
5729 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5730 MAC_STATUS_SIGNAL_DET
)) {
5731 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5732 MAC_STATUS_CFG_CHANGED
));
5737 tw32_f(MAC_TX_AUTO_NEG
, 0);
5739 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5740 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5741 tw32_f(MAC_MODE
, tp
->mac_mode
);
5744 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5745 tg3_init_bcm8002(tp
);
5747 /* Enable link change event even when serdes polling. */
5748 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5751 current_link_up
= false;
5752 tp
->link_config
.rmt_adv
= 0;
5753 mac_status
= tr32(MAC_STATUS
);
5755 if (tg3_flag(tp
, HW_AUTONEG
))
5756 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5758 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5760 tp
->napi
[0].hw_status
->status
=
5761 (SD_STATUS_UPDATED
|
5762 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5764 for (i
= 0; i
< 100; i
++) {
5765 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5766 MAC_STATUS_CFG_CHANGED
));
5768 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5769 MAC_STATUS_CFG_CHANGED
|
5770 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5774 mac_status
= tr32(MAC_STATUS
);
5775 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5776 current_link_up
= false;
5777 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5778 tp
->serdes_counter
== 0) {
5779 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5780 MAC_MODE_SEND_CONFIGS
));
5782 tw32_f(MAC_MODE
, tp
->mac_mode
);
5786 if (current_link_up
) {
5787 tp
->link_config
.active_speed
= SPEED_1000
;
5788 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5789 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5790 LED_CTRL_LNKLED_OVERRIDE
|
5791 LED_CTRL_1000MBPS_ON
));
5793 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5794 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5795 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5796 LED_CTRL_LNKLED_OVERRIDE
|
5797 LED_CTRL_TRAFFIC_OVERRIDE
));
5800 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5801 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5802 if (orig_pause_cfg
!= now_pause_cfg
||
5803 orig_active_speed
!= tp
->link_config
.active_speed
||
5804 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5805 tg3_link_report(tp
);
5811 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5815 u16 current_speed
= SPEED_UNKNOWN
;
5816 u8 current_duplex
= DUPLEX_UNKNOWN
;
5817 bool current_link_up
= false;
5818 u32 local_adv
, remote_adv
, sgsr
;
5820 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5821 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5822 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5823 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5828 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5830 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5831 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5833 current_link_up
= true;
5834 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5835 current_speed
= SPEED_1000
;
5836 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5837 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5838 current_speed
= SPEED_100
;
5839 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5841 current_speed
= SPEED_10
;
5842 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5845 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5846 current_duplex
= DUPLEX_FULL
;
5848 current_duplex
= DUPLEX_HALF
;
5851 tw32_f(MAC_MODE
, tp
->mac_mode
);
5854 tg3_clear_mac_status(tp
);
5856 goto fiber_setup_done
;
5859 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5860 tw32_f(MAC_MODE
, tp
->mac_mode
);
5863 tg3_clear_mac_status(tp
);
5868 tp
->link_config
.rmt_adv
= 0;
5870 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5871 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5872 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5873 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5874 bmsr
|= BMSR_LSTATUS
;
5876 bmsr
&= ~BMSR_LSTATUS
;
5879 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5881 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5882 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5883 /* do nothing, just check for link up at the end */
5884 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5887 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5888 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5889 ADVERTISE_1000XPAUSE
|
5890 ADVERTISE_1000XPSE_ASYM
|
5893 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5894 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5896 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5897 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5898 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5899 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5901 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5902 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5903 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5910 bmcr
&= ~BMCR_SPEED1000
;
5911 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5913 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5914 new_bmcr
|= BMCR_FULLDPLX
;
5916 if (new_bmcr
!= bmcr
) {
5917 /* BMCR_SPEED1000 is a reserved bit that needs
5918 * to be set on write.
5920 new_bmcr
|= BMCR_SPEED1000
;
5922 /* Force a linkdown */
5926 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5927 adv
&= ~(ADVERTISE_1000XFULL
|
5928 ADVERTISE_1000XHALF
|
5930 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5931 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5935 tg3_carrier_off(tp
);
5937 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5939 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5940 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5941 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5942 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5943 bmsr
|= BMSR_LSTATUS
;
5945 bmsr
&= ~BMSR_LSTATUS
;
5947 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5951 if (bmsr
& BMSR_LSTATUS
) {
5952 current_speed
= SPEED_1000
;
5953 current_link_up
= true;
5954 if (bmcr
& BMCR_FULLDPLX
)
5955 current_duplex
= DUPLEX_FULL
;
5957 current_duplex
= DUPLEX_HALF
;
5962 if (bmcr
& BMCR_ANENABLE
) {
5965 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5966 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5967 common
= local_adv
& remote_adv
;
5968 if (common
& (ADVERTISE_1000XHALF
|
5969 ADVERTISE_1000XFULL
)) {
5970 if (common
& ADVERTISE_1000XFULL
)
5971 current_duplex
= DUPLEX_FULL
;
5973 current_duplex
= DUPLEX_HALF
;
5975 tp
->link_config
.rmt_adv
=
5976 mii_adv_to_ethtool_adv_x(remote_adv
);
5977 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5978 /* Link is up via parallel detect */
5980 current_link_up
= false;
5986 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5987 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5989 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5990 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5991 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5993 tw32_f(MAC_MODE
, tp
->mac_mode
);
5996 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5998 tp
->link_config
.active_speed
= current_speed
;
5999 tp
->link_config
.active_duplex
= current_duplex
;
6001 tg3_test_and_report_link_chg(tp
, current_link_up
);
6005 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
6007 if (tp
->serdes_counter
) {
6008 /* Give autoneg time to complete. */
6009 tp
->serdes_counter
--;
6014 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6017 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6018 if (bmcr
& BMCR_ANENABLE
) {
6021 /* Select shadow register 0x1f */
6022 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6023 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6025 /* Select expansion interrupt status register */
6026 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6027 MII_TG3_DSP_EXP1_INT_STAT
);
6028 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6029 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6031 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6032 /* We have signal detect and not receiving
6033 * config code words, link is up by parallel
6037 bmcr
&= ~BMCR_ANENABLE
;
6038 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6039 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6040 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6043 } else if (tp
->link_up
&&
6044 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6045 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6048 /* Select expansion interrupt status register */
6049 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6050 MII_TG3_DSP_EXP1_INT_STAT
);
6051 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6055 /* Config code words received, turn on autoneg. */
6056 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6057 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6059 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6065 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6070 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6071 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6072 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6073 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6075 err
= tg3_setup_copper_phy(tp
, force_reset
);
6077 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6080 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6081 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6083 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6088 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6089 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6090 tw32(GRC_MISC_CFG
, val
);
6093 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6094 (6 << TX_LENGTHS_IPG_SHIFT
);
6095 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6096 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6097 val
|= tr32(MAC_TX_LENGTHS
) &
6098 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6099 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6101 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6102 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6103 tw32(MAC_TX_LENGTHS
, val
|
6104 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6106 tw32(MAC_TX_LENGTHS
, val
|
6107 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6109 if (!tg3_flag(tp
, 5705_PLUS
)) {
6111 tw32(HOSTCC_STAT_COAL_TICKS
,
6112 tp
->coal
.stats_block_coalesce_usecs
);
6114 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6118 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6119 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6121 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6124 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6125 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6131 /* tp->lock must be held */
6132 static u64
tg3_refclk_read(struct tg3
*tp
)
6134 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6135 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6138 /* tp->lock must be held */
6139 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6141 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6143 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6144 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6145 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6146 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6149 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6150 static inline void tg3_full_unlock(struct tg3
*tp
);
6151 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6153 struct tg3
*tp
= netdev_priv(dev
);
6155 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6156 SOF_TIMESTAMPING_RX_SOFTWARE
|
6157 SOF_TIMESTAMPING_SOFTWARE
;
6159 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6160 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6161 SOF_TIMESTAMPING_RX_HARDWARE
|
6162 SOF_TIMESTAMPING_RAW_HARDWARE
;
6166 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6168 info
->phc_index
= -1;
6170 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6172 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6173 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6174 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6175 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6179 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6181 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6182 bool neg_adj
= false;
6190 /* Frequency adjustment is performed using hardware with a 24 bit
6191 * accumulator and a programmable correction value. On each clk, the
6192 * correction value gets added to the accumulator and when it
6193 * overflows, the time counter is incremented/decremented.
6195 * So conversion from ppb to correction value is
6196 * ppb * (1 << 24) / 1000000000
6198 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6199 TG3_EAV_REF_CLK_CORRECT_MASK
;
6201 tg3_full_lock(tp
, 0);
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6205 TG3_EAV_REF_CLK_CORRECT_EN
|
6206 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6208 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6210 tg3_full_unlock(tp
);
6215 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6217 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6219 tg3_full_lock(tp
, 0);
6220 tp
->ptp_adjust
+= delta
;
6221 tg3_full_unlock(tp
);
6226 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
6230 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6232 tg3_full_lock(tp
, 0);
6233 ns
= tg3_refclk_read(tp
);
6234 ns
+= tp
->ptp_adjust
;
6235 tg3_full_unlock(tp
);
6237 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
6238 ts
->tv_nsec
= remainder
;
6243 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6244 const struct timespec
*ts
)
6247 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6249 ns
= timespec_to_ns(ts
);
6251 tg3_full_lock(tp
, 0);
6252 tg3_refclk_write(tp
, ns
);
6254 tg3_full_unlock(tp
);
6259 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6260 struct ptp_clock_request
*rq
, int on
)
6262 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6267 case PTP_CLK_REQ_PEROUT
:
6268 if (rq
->perout
.index
!= 0)
6271 tg3_full_lock(tp
, 0);
6272 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6273 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6278 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6279 rq
->perout
.start
.nsec
;
6281 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6282 netdev_warn(tp
->dev
,
6283 "Device supports only a one-shot timesync output, period must be 0\n");
6288 if (nsec
& (1ULL << 63)) {
6289 netdev_warn(tp
->dev
,
6290 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6295 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6296 tw32(TG3_EAV_WATCHDOG0_MSB
,
6297 TG3_EAV_WATCHDOG0_EN
|
6298 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6300 tw32(TG3_EAV_REF_CLCK_CTL
,
6301 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6303 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6304 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6308 tg3_full_unlock(tp
);
6318 static const struct ptp_clock_info tg3_ptp_caps
= {
6319 .owner
= THIS_MODULE
,
6320 .name
= "tg3 clock",
6321 .max_adj
= 250000000,
6326 .adjfreq
= tg3_ptp_adjfreq
,
6327 .adjtime
= tg3_ptp_adjtime
,
6328 .gettime
= tg3_ptp_gettime
,
6329 .settime
= tg3_ptp_settime
,
6330 .enable
= tg3_ptp_enable
,
6333 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6334 struct skb_shared_hwtstamps
*timestamp
)
6336 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6337 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6341 /* tp->lock must be held */
6342 static void tg3_ptp_init(struct tg3
*tp
)
6344 if (!tg3_flag(tp
, PTP_CAPABLE
))
6347 /* Initialize the hardware clock to the system time. */
6348 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6350 tp
->ptp_info
= tg3_ptp_caps
;
6353 /* tp->lock must be held */
6354 static void tg3_ptp_resume(struct tg3
*tp
)
6356 if (!tg3_flag(tp
, PTP_CAPABLE
))
6359 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6363 static void tg3_ptp_fini(struct tg3
*tp
)
6365 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6368 ptp_clock_unregister(tp
->ptp_clock
);
6369 tp
->ptp_clock
= NULL
;
6373 static inline int tg3_irq_sync(struct tg3
*tp
)
6375 return tp
->irq_sync
;
6378 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6382 dst
= (u32
*)((u8
*)dst
+ off
);
6383 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6384 *dst
++ = tr32(off
+ i
);
6387 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6389 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6390 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6391 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6392 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6393 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6394 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6395 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6396 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6397 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6398 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6399 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6400 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6401 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6402 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6403 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6404 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6405 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6406 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6407 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6409 if (tg3_flag(tp
, SUPPORT_MSIX
))
6410 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6412 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6413 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6414 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6415 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6416 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6417 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6418 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6419 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6421 if (!tg3_flag(tp
, 5705_PLUS
)) {
6422 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6423 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6424 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6427 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6428 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6429 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6430 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6431 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6433 if (tg3_flag(tp
, NVRAM
))
6434 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6437 static void tg3_dump_state(struct tg3
*tp
)
6442 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6446 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6447 /* Read up to but not including private PCI registers */
6448 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6449 regs
[i
/ sizeof(u32
)] = tr32(i
);
6451 tg3_dump_legacy_regs(tp
, regs
);
6453 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6454 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6455 !regs
[i
+ 2] && !regs
[i
+ 3])
6458 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6460 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6465 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6466 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6468 /* SW status block */
6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6472 tnapi
->hw_status
->status
,
6473 tnapi
->hw_status
->status_tag
,
6474 tnapi
->hw_status
->rx_jumbo_consumer
,
6475 tnapi
->hw_status
->rx_consumer
,
6476 tnapi
->hw_status
->rx_mini_consumer
,
6477 tnapi
->hw_status
->idx
[0].rx_producer
,
6478 tnapi
->hw_status
->idx
[0].tx_consumer
);
6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6483 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6484 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6486 tnapi
->prodring
.rx_std_prod_idx
,
6487 tnapi
->prodring
.rx_std_cons_idx
,
6488 tnapi
->prodring
.rx_jmb_prod_idx
,
6489 tnapi
->prodring
.rx_jmb_cons_idx
);
6493 /* This is called whenever we suspect that the system chipset is re-
6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6495 * is bogus tx completions. We try to recover by setting the
6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6499 static void tg3_tx_recover(struct tg3
*tp
)
6501 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6502 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6504 netdev_warn(tp
->dev
,
6505 "The system may be re-ordering memory-mapped I/O "
6506 "cycles to the network device, attempting to recover. "
6507 "Please report the problem to the driver maintainer "
6508 "and include system chipset information.\n");
6510 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6513 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6515 /* Tell compiler to fetch tx indices from memory. */
6517 return tnapi
->tx_pending
-
6518 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6521 /* Tigon3 never reports partial packet sends. So we do not
6522 * need special logic to handle SKBs that have not had all
6523 * of their frags sent yet, like SunGEM does.
6525 static void tg3_tx(struct tg3_napi
*tnapi
)
6527 struct tg3
*tp
= tnapi
->tp
;
6528 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6529 u32 sw_idx
= tnapi
->tx_cons
;
6530 struct netdev_queue
*txq
;
6531 int index
= tnapi
- tp
->napi
;
6532 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6534 if (tg3_flag(tp
, ENABLE_TSS
))
6537 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6539 while (sw_idx
!= hw_idx
) {
6540 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6541 struct sk_buff
*skb
= ri
->skb
;
6544 if (unlikely(skb
== NULL
)) {
6549 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6550 struct skb_shared_hwtstamps timestamp
;
6551 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6552 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6554 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6556 skb_tstamp_tx(skb
, ×tamp
);
6559 pci_unmap_single(tp
->pdev
,
6560 dma_unmap_addr(ri
, mapping
),
6566 while (ri
->fragmented
) {
6567 ri
->fragmented
= false;
6568 sw_idx
= NEXT_TX(sw_idx
);
6569 ri
= &tnapi
->tx_buffers
[sw_idx
];
6572 sw_idx
= NEXT_TX(sw_idx
);
6574 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6575 ri
= &tnapi
->tx_buffers
[sw_idx
];
6576 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6579 pci_unmap_page(tp
->pdev
,
6580 dma_unmap_addr(ri
, mapping
),
6581 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6584 while (ri
->fragmented
) {
6585 ri
->fragmented
= false;
6586 sw_idx
= NEXT_TX(sw_idx
);
6587 ri
= &tnapi
->tx_buffers
[sw_idx
];
6590 sw_idx
= NEXT_TX(sw_idx
);
6594 bytes_compl
+= skb
->len
;
6598 if (unlikely(tx_bug
)) {
6604 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6606 tnapi
->tx_cons
= sw_idx
;
6608 /* Need to make the tx_cons update visible to tg3_start_xmit()
6609 * before checking for netif_queue_stopped(). Without the
6610 * memory barrier, there is a small possibility that tg3_start_xmit()
6611 * will miss it and cause the queue to be stopped forever.
6615 if (unlikely(netif_tx_queue_stopped(txq
) &&
6616 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6617 __netif_tx_lock(txq
, smp_processor_id());
6618 if (netif_tx_queue_stopped(txq
) &&
6619 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6620 netif_tx_wake_queue(txq
);
6621 __netif_tx_unlock(txq
);
6625 static void tg3_frag_free(bool is_frag
, void *data
)
6628 put_page(virt_to_head_page(data
));
6633 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6635 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6636 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6641 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6642 map_sz
, PCI_DMA_FROMDEVICE
);
6643 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6648 /* Returns size of skb allocated or < 0 on error.
6650 * We only need to fill in the address because the other members
6651 * of the RX descriptor are invariant, see tg3_init_rings.
6653 * Note the purposeful assymetry of cpu vs. chip accesses. For
6654 * posting buffers we only dirty the first cache line of the RX
6655 * descriptor (containing the address). Whereas for the RX status
6656 * buffers the cpu only reads the last cacheline of the RX descriptor
6657 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6659 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6660 u32 opaque_key
, u32 dest_idx_unmasked
,
6661 unsigned int *frag_size
)
6663 struct tg3_rx_buffer_desc
*desc
;
6664 struct ring_info
*map
;
6667 int skb_size
, data_size
, dest_idx
;
6669 switch (opaque_key
) {
6670 case RXD_OPAQUE_RING_STD
:
6671 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6672 desc
= &tpr
->rx_std
[dest_idx
];
6673 map
= &tpr
->rx_std_buffers
[dest_idx
];
6674 data_size
= tp
->rx_pkt_map_sz
;
6677 case RXD_OPAQUE_RING_JUMBO
:
6678 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6679 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6680 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6681 data_size
= TG3_RX_JMB_MAP_SZ
;
6688 /* Do not overwrite any of the map or rp information
6689 * until we are sure we can commit to a new buffer.
6691 * Callers depend upon this behavior and assume that
6692 * we leave everything unchanged if we fail.
6694 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6695 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6696 if (skb_size
<= PAGE_SIZE
) {
6697 data
= netdev_alloc_frag(skb_size
);
6698 *frag_size
= skb_size
;
6700 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6706 mapping
= pci_map_single(tp
->pdev
,
6707 data
+ TG3_RX_OFFSET(tp
),
6709 PCI_DMA_FROMDEVICE
);
6710 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6711 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6716 dma_unmap_addr_set(map
, mapping
, mapping
);
6718 desc
->addr_hi
= ((u64
)mapping
>> 32);
6719 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6724 /* We only need to move over in the address because the other
6725 * members of the RX descriptor are invariant. See notes above
6726 * tg3_alloc_rx_data for full details.
6728 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6729 struct tg3_rx_prodring_set
*dpr
,
6730 u32 opaque_key
, int src_idx
,
6731 u32 dest_idx_unmasked
)
6733 struct tg3
*tp
= tnapi
->tp
;
6734 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6735 struct ring_info
*src_map
, *dest_map
;
6736 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6739 switch (opaque_key
) {
6740 case RXD_OPAQUE_RING_STD
:
6741 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6742 dest_desc
= &dpr
->rx_std
[dest_idx
];
6743 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6744 src_desc
= &spr
->rx_std
[src_idx
];
6745 src_map
= &spr
->rx_std_buffers
[src_idx
];
6748 case RXD_OPAQUE_RING_JUMBO
:
6749 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6750 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6751 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6752 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6753 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6760 dest_map
->data
= src_map
->data
;
6761 dma_unmap_addr_set(dest_map
, mapping
,
6762 dma_unmap_addr(src_map
, mapping
));
6763 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6764 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6766 /* Ensure that the update to the skb happens after the physical
6767 * addresses have been transferred to the new BD location.
6771 src_map
->data
= NULL
;
6774 /* The RX ring scheme is composed of multiple rings which post fresh
6775 * buffers to the chip, and one special ring the chip uses to report
6776 * status back to the host.
6778 * The special ring reports the status of received packets to the
6779 * host. The chip does not write into the original descriptor the
6780 * RX buffer was obtained from. The chip simply takes the original
6781 * descriptor as provided by the host, updates the status and length
6782 * field, then writes this into the next status ring entry.
6784 * Each ring the host uses to post buffers to the chip is described
6785 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6786 * it is first placed into the on-chip ram. When the packet's length
6787 * is known, it walks down the TG3_BDINFO entries to select the ring.
6788 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6789 * which is within the range of the new packet's length is chosen.
6791 * The "separate ring for rx status" scheme may sound queer, but it makes
6792 * sense from a cache coherency perspective. If only the host writes
6793 * to the buffer post rings, and only the chip writes to the rx status
6794 * rings, then cache lines never move beyond shared-modified state.
6795 * If both the host and chip were to write into the same ring, cache line
6796 * eviction could occur since both entities want it in an exclusive state.
6798 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6800 struct tg3
*tp
= tnapi
->tp
;
6801 u32 work_mask
, rx_std_posted
= 0;
6802 u32 std_prod_idx
, jmb_prod_idx
;
6803 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6806 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6808 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6810 * We need to order the read of hw_idx and the read of
6811 * the opaque cookie.
6816 std_prod_idx
= tpr
->rx_std_prod_idx
;
6817 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6818 while (sw_idx
!= hw_idx
&& budget
> 0) {
6819 struct ring_info
*ri
;
6820 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6822 struct sk_buff
*skb
;
6823 dma_addr_t dma_addr
;
6824 u32 opaque_key
, desc_idx
, *post_ptr
;
6828 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6829 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6830 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6831 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6832 dma_addr
= dma_unmap_addr(ri
, mapping
);
6834 post_ptr
= &std_prod_idx
;
6836 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6837 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6838 dma_addr
= dma_unmap_addr(ri
, mapping
);
6840 post_ptr
= &jmb_prod_idx
;
6842 goto next_pkt_nopost
;
6844 work_mask
|= opaque_key
;
6846 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6847 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6849 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6850 desc_idx
, *post_ptr
);
6852 /* Other statistics kept track of by card. */
6857 prefetch(data
+ TG3_RX_OFFSET(tp
));
6858 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6861 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6862 RXD_FLAG_PTPSTAT_PTPV1
||
6863 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6864 RXD_FLAG_PTPSTAT_PTPV2
) {
6865 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6866 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6869 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6871 unsigned int frag_size
;
6873 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6874 *post_ptr
, &frag_size
);
6878 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6879 PCI_DMA_FROMDEVICE
);
6881 /* Ensure that the update to the data happens
6882 * after the usage of the old DMA mapping.
6888 skb
= build_skb(data
, frag_size
);
6890 tg3_frag_free(frag_size
!= 0, data
);
6891 goto drop_it_no_recycle
;
6893 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6895 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6896 desc_idx
, *post_ptr
);
6898 skb
= netdev_alloc_skb(tp
->dev
,
6899 len
+ TG3_RAW_IP_ALIGN
);
6901 goto drop_it_no_recycle
;
6903 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6904 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6906 data
+ TG3_RX_OFFSET(tp
),
6908 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6913 tg3_hwclock_to_timestamp(tp
, tstamp
,
6914 skb_hwtstamps(skb
));
6916 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6917 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6918 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6919 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6920 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6922 skb_checksum_none_assert(skb
);
6924 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6926 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6927 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6929 goto drop_it_no_recycle
;
6932 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6933 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6934 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6935 desc
->err_vlan
& RXD_VLAN_MASK
);
6937 napi_gro_receive(&tnapi
->napi
, skb
);
6945 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6946 tpr
->rx_std_prod_idx
= std_prod_idx
&
6947 tp
->rx_std_ring_mask
;
6948 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6949 tpr
->rx_std_prod_idx
);
6950 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6955 sw_idx
&= tp
->rx_ret_ring_mask
;
6957 /* Refresh hw_idx to see if there is new work */
6958 if (sw_idx
== hw_idx
) {
6959 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6964 /* ACK the status ring. */
6965 tnapi
->rx_rcb_ptr
= sw_idx
;
6966 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6968 /* Refill RX ring(s). */
6969 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6970 /* Sync BD data before updating mailbox */
6973 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6974 tpr
->rx_std_prod_idx
= std_prod_idx
&
6975 tp
->rx_std_ring_mask
;
6976 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6977 tpr
->rx_std_prod_idx
);
6979 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6980 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6981 tp
->rx_jmb_ring_mask
;
6982 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6983 tpr
->rx_jmb_prod_idx
);
6986 } else if (work_mask
) {
6987 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6988 * updated before the producer indices can be updated.
6992 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6993 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6995 if (tnapi
!= &tp
->napi
[1]) {
6996 tp
->rx_refill
= true;
6997 napi_schedule(&tp
->napi
[1].napi
);
7004 static void tg3_poll_link(struct tg3
*tp
)
7006 /* handle link change and other phy events */
7007 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
7008 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
7010 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
7011 sblk
->status
= SD_STATUS_UPDATED
|
7012 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7013 spin_lock(&tp
->lock
);
7014 if (tg3_flag(tp
, USE_PHYLIB
)) {
7016 (MAC_STATUS_SYNC_CHANGED
|
7017 MAC_STATUS_CFG_CHANGED
|
7018 MAC_STATUS_MI_COMPLETION
|
7019 MAC_STATUS_LNKSTATE_CHANGED
));
7022 tg3_setup_phy(tp
, false);
7023 spin_unlock(&tp
->lock
);
7028 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7029 struct tg3_rx_prodring_set
*dpr
,
7030 struct tg3_rx_prodring_set
*spr
)
7032 u32 si
, di
, cpycnt
, src_prod_idx
;
7036 src_prod_idx
= spr
->rx_std_prod_idx
;
7038 /* Make sure updates to the rx_std_buffers[] entries and the
7039 * standard producer index are seen in the correct order.
7043 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7046 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7047 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7049 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7050 spr
->rx_std_cons_idx
;
7052 cpycnt
= min(cpycnt
,
7053 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7055 si
= spr
->rx_std_cons_idx
;
7056 di
= dpr
->rx_std_prod_idx
;
7058 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7059 if (dpr
->rx_std_buffers
[i
].data
) {
7069 /* Ensure that updates to the rx_std_buffers ring and the
7070 * shadowed hardware producer ring from tg3_recycle_skb() are
7071 * ordered correctly WRT the skb check above.
7075 memcpy(&dpr
->rx_std_buffers
[di
],
7076 &spr
->rx_std_buffers
[si
],
7077 cpycnt
* sizeof(struct ring_info
));
7079 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7080 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7081 sbd
= &spr
->rx_std
[si
];
7082 dbd
= &dpr
->rx_std
[di
];
7083 dbd
->addr_hi
= sbd
->addr_hi
;
7084 dbd
->addr_lo
= sbd
->addr_lo
;
7087 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7088 tp
->rx_std_ring_mask
;
7089 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7090 tp
->rx_std_ring_mask
;
7094 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7096 /* Make sure updates to the rx_jmb_buffers[] entries and
7097 * the jumbo producer index are seen in the correct order.
7101 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7104 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7105 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7107 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7108 spr
->rx_jmb_cons_idx
;
7110 cpycnt
= min(cpycnt
,
7111 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7113 si
= spr
->rx_jmb_cons_idx
;
7114 di
= dpr
->rx_jmb_prod_idx
;
7116 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7117 if (dpr
->rx_jmb_buffers
[i
].data
) {
7127 /* Ensure that updates to the rx_jmb_buffers ring and the
7128 * shadowed hardware producer ring from tg3_recycle_skb() are
7129 * ordered correctly WRT the skb check above.
7133 memcpy(&dpr
->rx_jmb_buffers
[di
],
7134 &spr
->rx_jmb_buffers
[si
],
7135 cpycnt
* sizeof(struct ring_info
));
7137 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7138 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7139 sbd
= &spr
->rx_jmb
[si
].std
;
7140 dbd
= &dpr
->rx_jmb
[di
].std
;
7141 dbd
->addr_hi
= sbd
->addr_hi
;
7142 dbd
->addr_lo
= sbd
->addr_lo
;
7145 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7146 tp
->rx_jmb_ring_mask
;
7147 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7148 tp
->rx_jmb_ring_mask
;
7154 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7156 struct tg3
*tp
= tnapi
->tp
;
7158 /* run TX completion thread */
7159 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7161 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7165 if (!tnapi
->rx_rcb_prod_idx
)
7168 /* run RX thread, within the bounds set by NAPI.
7169 * All RX "locking" is done by ensuring outside
7170 * code synchronizes with tg3->napi.poll()
7172 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7173 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7175 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7176 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7178 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7179 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7181 tp
->rx_refill
= false;
7182 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7183 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7184 &tp
->napi
[i
].prodring
);
7188 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7189 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7190 dpr
->rx_std_prod_idx
);
7192 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7193 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7194 dpr
->rx_jmb_prod_idx
);
7199 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7205 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7207 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7208 schedule_work(&tp
->reset_task
);
7211 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7213 cancel_work_sync(&tp
->reset_task
);
7214 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7215 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7218 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7220 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7221 struct tg3
*tp
= tnapi
->tp
;
7223 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7226 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7228 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7231 if (unlikely(work_done
>= budget
))
7234 /* tp->last_tag is used in tg3_int_reenable() below
7235 * to tell the hw how much work has been processed,
7236 * so we must read it before checking for more work.
7238 tnapi
->last_tag
= sblk
->status_tag
;
7239 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7242 /* check for RX/TX work to do */
7243 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7244 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7246 /* This test here is not race free, but will reduce
7247 * the number of interrupts by looping again.
7249 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7252 napi_complete(napi
);
7253 /* Reenable interrupts. */
7254 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7256 /* This test here is synchronized by napi_schedule()
7257 * and napi_complete() to close the race condition.
7259 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7260 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7261 HOSTCC_MODE_ENABLE
|
7272 /* work_done is guaranteed to be less than budget. */
7273 napi_complete(napi
);
7274 tg3_reset_task_schedule(tp
);
7278 static void tg3_process_error(struct tg3
*tp
)
7281 bool real_error
= false;
7283 if (tg3_flag(tp
, ERROR_PROCESSED
))
7286 /* Check Flow Attention register */
7287 val
= tr32(HOSTCC_FLOW_ATTN
);
7288 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7289 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7293 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7294 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7298 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7299 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7308 tg3_flag_set(tp
, ERROR_PROCESSED
);
7309 tg3_reset_task_schedule(tp
);
7312 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7314 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7315 struct tg3
*tp
= tnapi
->tp
;
7317 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7320 if (sblk
->status
& SD_STATUS_ERROR
)
7321 tg3_process_error(tp
);
7325 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7327 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7330 if (unlikely(work_done
>= budget
))
7333 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7334 /* tp->last_tag is used in tg3_int_reenable() below
7335 * to tell the hw how much work has been processed,
7336 * so we must read it before checking for more work.
7338 tnapi
->last_tag
= sblk
->status_tag
;
7339 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7342 sblk
->status
&= ~SD_STATUS_UPDATED
;
7344 if (likely(!tg3_has_work(tnapi
))) {
7345 napi_complete(napi
);
7346 tg3_int_reenable(tnapi
);
7354 /* work_done is guaranteed to be less than budget. */
7355 napi_complete(napi
);
7356 tg3_reset_task_schedule(tp
);
7360 static void tg3_napi_disable(struct tg3
*tp
)
7364 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7365 napi_disable(&tp
->napi
[i
].napi
);
7368 static void tg3_napi_enable(struct tg3
*tp
)
7372 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7373 napi_enable(&tp
->napi
[i
].napi
);
7376 static void tg3_napi_init(struct tg3
*tp
)
7380 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7381 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7382 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7385 static void tg3_napi_fini(struct tg3
*tp
)
7389 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7390 netif_napi_del(&tp
->napi
[i
].napi
);
7393 static inline void tg3_netif_stop(struct tg3
*tp
)
7395 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
7396 tg3_napi_disable(tp
);
7397 netif_carrier_off(tp
->dev
);
7398 netif_tx_disable(tp
->dev
);
7401 /* tp->lock must be held */
7402 static inline void tg3_netif_start(struct tg3
*tp
)
7406 /* NOTE: unconditional netif_tx_wake_all_queues is only
7407 * appropriate so long as all callers are assured to
7408 * have free tx slots (such as after tg3_init_hw)
7410 netif_tx_wake_all_queues(tp
->dev
);
7413 netif_carrier_on(tp
->dev
);
7415 tg3_napi_enable(tp
);
7416 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7417 tg3_enable_ints(tp
);
7420 static void tg3_irq_quiesce(struct tg3
*tp
)
7424 BUG_ON(tp
->irq_sync
);
7429 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7430 synchronize_irq(tp
->napi
[i
].irq_vec
);
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435 * with as well. Most of the time, this is not necessary except when
7436 * shutting down the device.
7438 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7440 spin_lock_bh(&tp
->lock
);
7442 tg3_irq_quiesce(tp
);
7445 static inline void tg3_full_unlock(struct tg3
*tp
)
7447 spin_unlock_bh(&tp
->lock
);
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451 * after sending MSI so driver doesn't have to do it.
7453 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7455 struct tg3_napi
*tnapi
= dev_id
;
7456 struct tg3
*tp
= tnapi
->tp
;
7458 prefetch(tnapi
->hw_status
);
7460 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7462 if (likely(!tg3_irq_sync(tp
)))
7463 napi_schedule(&tnapi
->napi
);
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469 * flush status block and interrupt mailbox. PCI ordering rules
7470 * guarantee that MSI will arrive after the status block.
7472 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7474 struct tg3_napi
*tnapi
= dev_id
;
7475 struct tg3
*tp
= tnapi
->tp
;
7477 prefetch(tnapi
->hw_status
);
7479 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7481 * Writing any value to intr-mbox-0 clears PCI INTA# and
7482 * chip-internal interrupt pending events.
7483 * Writing non-zero to intr-mbox-0 additional tells the
7484 * NIC to stop sending us irqs, engaging "in-intr-handler"
7487 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7488 if (likely(!tg3_irq_sync(tp
)))
7489 napi_schedule(&tnapi
->napi
);
7491 return IRQ_RETVAL(1);
7494 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7496 struct tg3_napi
*tnapi
= dev_id
;
7497 struct tg3
*tp
= tnapi
->tp
;
7498 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7499 unsigned int handled
= 1;
7501 /* In INTx mode, it is possible for the interrupt to arrive at
7502 * the CPU before the status block posted prior to the interrupt.
7503 * Reading the PCI State register will confirm whether the
7504 * interrupt is ours and will flush the status block.
7506 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7507 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7508 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7515 * Writing any value to intr-mbox-0 clears PCI INTA# and
7516 * chip-internal interrupt pending events.
7517 * Writing non-zero to intr-mbox-0 additional tells the
7518 * NIC to stop sending us irqs, engaging "in-intr-handler"
7521 * Flush the mailbox to de-assert the IRQ immediately to prevent
7522 * spurious interrupts. The flush impacts performance but
7523 * excessive spurious interrupts can be worse in some cases.
7525 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7526 if (tg3_irq_sync(tp
))
7528 sblk
->status
&= ~SD_STATUS_UPDATED
;
7529 if (likely(tg3_has_work(tnapi
))) {
7530 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7531 napi_schedule(&tnapi
->napi
);
7533 /* No work, shared interrupt perhaps? re-enable
7534 * interrupts, and flush that PCI write
7536 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7540 return IRQ_RETVAL(handled
);
7543 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7545 struct tg3_napi
*tnapi
= dev_id
;
7546 struct tg3
*tp
= tnapi
->tp
;
7547 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7548 unsigned int handled
= 1;
7550 /* In INTx mode, it is possible for the interrupt to arrive at
7551 * the CPU before the status block posted prior to the interrupt.
7552 * Reading the PCI State register will confirm whether the
7553 * interrupt is ours and will flush the status block.
7555 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7556 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7557 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7564 * writing any value to intr-mbox-0 clears PCI INTA# and
7565 * chip-internal interrupt pending events.
7566 * writing non-zero to intr-mbox-0 additional tells the
7567 * NIC to stop sending us irqs, engaging "in-intr-handler"
7570 * Flush the mailbox to de-assert the IRQ immediately to prevent
7571 * spurious interrupts. The flush impacts performance but
7572 * excessive spurious interrupts can be worse in some cases.
7574 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7577 * In a shared interrupt configuration, sometimes other devices'
7578 * interrupts will scream. We record the current status tag here
7579 * so that the above check can report that the screaming interrupts
7580 * are unhandled. Eventually they will be silenced.
7582 tnapi
->last_irq_tag
= sblk
->status_tag
;
7584 if (tg3_irq_sync(tp
))
7587 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7589 napi_schedule(&tnapi
->napi
);
7592 return IRQ_RETVAL(handled
);
7595 /* ISR for interrupt test */
7596 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7598 struct tg3_napi
*tnapi
= dev_id
;
7599 struct tg3
*tp
= tnapi
->tp
;
7600 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7602 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7603 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7604 tg3_disable_ints(tp
);
7605 return IRQ_RETVAL(1);
7607 return IRQ_RETVAL(0);
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device
*dev
)
7614 struct tg3
*tp
= netdev_priv(dev
);
7616 if (tg3_irq_sync(tp
))
7619 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7620 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7624 static void tg3_tx_timeout(struct net_device
*dev
)
7626 struct tg3
*tp
= netdev_priv(dev
);
7628 if (netif_msg_tx_err(tp
)) {
7629 netdev_err(dev
, "transmit timed out, resetting\n");
7633 tg3_reset_task_schedule(tp
);
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7639 u32 base
= (u32
) mapping
& 0xffffffff;
7641 return base
+ len
+ 8 < base
;
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645 * of any 4GB boundaries: 4G, 8G, etc
7647 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7650 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7651 u32 base
= (u32
) mapping
& 0xffffffff;
7653 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7664 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7672 dma_addr_t mapping
, u32 len
, u32 flags
,
7675 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7676 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7677 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7678 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7681 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7682 dma_addr_t map
, u32 len
, u32 flags
,
7685 struct tg3
*tp
= tnapi
->tp
;
7688 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7691 if (tg3_4g_overflow_test(map
, len
))
7694 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7697 if (tg3_40bit_overflow_test(tp
, map
, len
))
7700 if (tp
->dma_limit
) {
7701 u32 prvidx
= *entry
;
7702 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7703 while (len
> tp
->dma_limit
&& *budget
) {
7704 u32 frag_len
= tp
->dma_limit
;
7705 len
-= tp
->dma_limit
;
7707 /* Avoid the 8byte DMA problem */
7709 len
+= tp
->dma_limit
/ 2;
7710 frag_len
= tp
->dma_limit
/ 2;
7713 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7715 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7716 frag_len
, tmp_flag
, mss
, vlan
);
7719 *entry
= NEXT_TX(*entry
);
7726 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7727 len
, flags
, mss
, vlan
);
7729 *entry
= NEXT_TX(*entry
);
7732 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7736 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7737 len
, flags
, mss
, vlan
);
7738 *entry
= NEXT_TX(*entry
);
7744 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7747 struct sk_buff
*skb
;
7748 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7753 pci_unmap_single(tnapi
->tp
->pdev
,
7754 dma_unmap_addr(txb
, mapping
),
7758 while (txb
->fragmented
) {
7759 txb
->fragmented
= false;
7760 entry
= NEXT_TX(entry
);
7761 txb
= &tnapi
->tx_buffers
[entry
];
7764 for (i
= 0; i
<= last
; i
++) {
7765 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7767 entry
= NEXT_TX(entry
);
7768 txb
= &tnapi
->tx_buffers
[entry
];
7770 pci_unmap_page(tnapi
->tp
->pdev
,
7771 dma_unmap_addr(txb
, mapping
),
7772 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7774 while (txb
->fragmented
) {
7775 txb
->fragmented
= false;
7776 entry
= NEXT_TX(entry
);
7777 txb
= &tnapi
->tx_buffers
[entry
];
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7784 struct sk_buff
**pskb
,
7785 u32
*entry
, u32
*budget
,
7786 u32 base_flags
, u32 mss
, u32 vlan
)
7788 struct tg3
*tp
= tnapi
->tp
;
7789 struct sk_buff
*new_skb
, *skb
= *pskb
;
7790 dma_addr_t new_addr
= 0;
7793 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7794 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7796 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7798 new_skb
= skb_copy_expand(skb
,
7799 skb_headroom(skb
) + more_headroom
,
7800 skb_tailroom(skb
), GFP_ATOMIC
);
7806 /* New SKB is guaranteed to be linear. */
7807 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7809 /* Make sure the mapping succeeded */
7810 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7811 dev_kfree_skb(new_skb
);
7814 u32 save_entry
= *entry
;
7816 base_flags
|= TXD_FLAG_END
;
7818 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7819 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7822 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7823 new_skb
->len
, base_flags
,
7825 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7826 dev_kfree_skb(new_skb
);
7837 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7839 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7840 * TSO header is greater than 80 bytes.
7842 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7844 struct sk_buff
*segs
, *nskb
;
7845 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7847 /* Estimate the number of fragments in the worst case */
7848 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7849 netif_stop_queue(tp
->dev
);
7851 /* netif_tx_stop_queue() must be done before checking
7852 * checking tx index in tg3_tx_avail() below, because in
7853 * tg3_tx(), we update tx index before checking for
7854 * netif_tx_queue_stopped().
7857 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7858 return NETDEV_TX_BUSY
;
7860 netif_wake_queue(tp
->dev
);
7863 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7865 goto tg3_tso_bug_end
;
7871 tg3_start_xmit(nskb
, tp
->dev
);
7877 return NETDEV_TX_OK
;
7880 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7881 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7883 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7885 struct tg3
*tp
= netdev_priv(dev
);
7886 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7888 int i
= -1, would_hit_hwbug
;
7890 struct tg3_napi
*tnapi
;
7891 struct netdev_queue
*txq
;
7894 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7895 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7896 if (tg3_flag(tp
, ENABLE_TSS
))
7899 budget
= tg3_tx_avail(tnapi
);
7901 /* We are running in BH disabled context with netif_tx_lock
7902 * and TX reclaim runs via tp->napi.poll inside of a software
7903 * interrupt. Furthermore, IRQ processing runs lockless so we have
7904 * no IRQ context deadlocks to worry about either. Rejoice!
7906 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7907 if (!netif_tx_queue_stopped(txq
)) {
7908 netif_tx_stop_queue(txq
);
7910 /* This is a hard error, log it. */
7912 "BUG! Tx Ring full when queue awake!\n");
7914 return NETDEV_TX_BUSY
;
7917 entry
= tnapi
->tx_prod
;
7919 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7920 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7922 mss
= skb_shinfo(skb
)->gso_size
;
7925 u32 tcp_opt_len
, hdr_len
;
7927 if (skb_header_cloned(skb
) &&
7928 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7932 tcp_opt_len
= tcp_optlen(skb
);
7934 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7936 if (!skb_is_gso_v6(skb
)) {
7938 iph
->tot_len
= htons(mss
+ hdr_len
);
7941 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7942 tg3_flag(tp
, TSO_BUG
))
7943 return tg3_tso_bug(tp
, skb
);
7945 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7946 TXD_FLAG_CPU_POST_DMA
);
7948 if (tg3_flag(tp
, HW_TSO_1
) ||
7949 tg3_flag(tp
, HW_TSO_2
) ||
7950 tg3_flag(tp
, HW_TSO_3
)) {
7951 tcp_hdr(skb
)->check
= 0;
7952 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7954 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7959 if (tg3_flag(tp
, HW_TSO_3
)) {
7960 mss
|= (hdr_len
& 0xc) << 12;
7962 base_flags
|= 0x00000010;
7963 base_flags
|= (hdr_len
& 0x3e0) << 5;
7964 } else if (tg3_flag(tp
, HW_TSO_2
))
7965 mss
|= hdr_len
<< 9;
7966 else if (tg3_flag(tp
, HW_TSO_1
) ||
7967 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7968 if (tcp_opt_len
|| iph
->ihl
> 5) {
7971 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7972 mss
|= (tsflags
<< 11);
7975 if (tcp_opt_len
|| iph
->ihl
> 5) {
7978 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7979 base_flags
|= tsflags
<< 12;
7984 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7985 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7986 base_flags
|= TXD_FLAG_JMB_PKT
;
7988 if (vlan_tx_tag_present(skb
)) {
7989 base_flags
|= TXD_FLAG_VLAN
;
7990 vlan
= vlan_tx_tag_get(skb
);
7993 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7994 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7995 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7996 base_flags
|= TXD_FLAG_HWTSTAMP
;
7999 len
= skb_headlen(skb
);
8001 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
8002 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
8006 tnapi
->tx_buffers
[entry
].skb
= skb
;
8007 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
8009 would_hit_hwbug
= 0;
8011 if (tg3_flag(tp
, 5701_DMA_BUG
))
8012 would_hit_hwbug
= 1;
8014 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8015 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8017 would_hit_hwbug
= 1;
8018 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8021 if (!tg3_flag(tp
, HW_TSO_1
) &&
8022 !tg3_flag(tp
, HW_TSO_2
) &&
8023 !tg3_flag(tp
, HW_TSO_3
))
8026 /* Now loop through additional data
8027 * fragments, and queue them.
8029 last
= skb_shinfo(skb
)->nr_frags
- 1;
8030 for (i
= 0; i
<= last
; i
++) {
8031 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8033 len
= skb_frag_size(frag
);
8034 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8035 len
, DMA_TO_DEVICE
);
8037 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8038 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8040 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8044 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8046 ((i
== last
) ? TXD_FLAG_END
: 0),
8048 would_hit_hwbug
= 1;
8054 if (would_hit_hwbug
) {
8055 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8057 /* If the workaround fails due to memory/mapping
8058 * failure, silently drop this packet.
8060 entry
= tnapi
->tx_prod
;
8061 budget
= tg3_tx_avail(tnapi
);
8062 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8063 base_flags
, mss
, vlan
))
8067 skb_tx_timestamp(skb
);
8068 netdev_tx_sent_queue(txq
, skb
->len
);
8070 /* Sync BD data before updating mailbox */
8073 /* Packets are ready, update Tx producer idx local and on card. */
8074 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8076 tnapi
->tx_prod
= entry
;
8077 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8078 netif_tx_stop_queue(txq
);
8080 /* netif_tx_stop_queue() must be done before checking
8081 * checking tx index in tg3_tx_avail() below, because in
8082 * tg3_tx(), we update tx index before checking for
8083 * netif_tx_queue_stopped().
8086 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8087 netif_tx_wake_queue(txq
);
8091 return NETDEV_TX_OK
;
8094 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8095 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8100 return NETDEV_TX_OK
;
8103 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8106 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8107 MAC_MODE_PORT_MODE_MASK
);
8109 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8111 if (!tg3_flag(tp
, 5705_PLUS
))
8112 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8114 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8115 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8117 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8119 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8121 if (tg3_flag(tp
, 5705_PLUS
) ||
8122 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8123 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8124 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8127 tw32(MAC_MODE
, tp
->mac_mode
);
8131 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8133 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8135 tg3_phy_toggle_apd(tp
, false);
8136 tg3_phy_toggle_automdix(tp
, false);
8138 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8141 bmcr
= BMCR_FULLDPLX
;
8146 bmcr
|= BMCR_SPEED100
;
8150 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8152 bmcr
|= BMCR_SPEED100
;
8155 bmcr
|= BMCR_SPEED1000
;
8160 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8161 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8162 val
|= CTL1000_AS_MASTER
|
8163 CTL1000_ENABLE_MASTER
;
8164 tg3_writephy(tp
, MII_CTRL1000
, val
);
8166 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8167 MII_TG3_FET_PTEST_TRIM_2
;
8168 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8171 bmcr
|= BMCR_LOOPBACK
;
8173 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8175 /* The write needs to be flushed for the FETs */
8176 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8177 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8181 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8182 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8183 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8184 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8185 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8187 /* The write needs to be flushed for the AC131 */
8188 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8191 /* Reset to prevent losing 1st rx packet intermittently */
8192 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8193 tg3_flag(tp
, 5780_CLASS
)) {
8194 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8196 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8199 mac_mode
= tp
->mac_mode
&
8200 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8201 if (speed
== SPEED_1000
)
8202 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8204 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8206 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8207 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8209 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8210 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8211 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8212 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8214 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8215 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8218 tw32(MAC_MODE
, mac_mode
);
8224 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8226 struct tg3
*tp
= netdev_priv(dev
);
8228 if (features
& NETIF_F_LOOPBACK
) {
8229 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8232 spin_lock_bh(&tp
->lock
);
8233 tg3_mac_loopback(tp
, true);
8234 netif_carrier_on(tp
->dev
);
8235 spin_unlock_bh(&tp
->lock
);
8236 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8238 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8241 spin_lock_bh(&tp
->lock
);
8242 tg3_mac_loopback(tp
, false);
8243 /* Force link status check */
8244 tg3_setup_phy(tp
, true);
8245 spin_unlock_bh(&tp
->lock
);
8246 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8250 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8251 netdev_features_t features
)
8253 struct tg3
*tp
= netdev_priv(dev
);
8255 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8256 features
&= ~NETIF_F_ALL_TSO
;
8261 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8263 netdev_features_t changed
= dev
->features
^ features
;
8265 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8266 tg3_set_loopback(dev
, features
);
8271 static void tg3_rx_prodring_free(struct tg3
*tp
,
8272 struct tg3_rx_prodring_set
*tpr
)
8276 if (tpr
!= &tp
->napi
[0].prodring
) {
8277 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8278 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8279 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8282 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8283 for (i
= tpr
->rx_jmb_cons_idx
;
8284 i
!= tpr
->rx_jmb_prod_idx
;
8285 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8286 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8294 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8295 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8298 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8299 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8300 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8305 /* Initialize rx rings for packet processing.
8307 * The chip has been shut down and the driver detached from
8308 * the networking, so no interrupts or new tx packets will
8309 * end up in the driver. tp->{tx,}lock are held and thus
8312 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8313 struct tg3_rx_prodring_set
*tpr
)
8315 u32 i
, rx_pkt_dma_sz
;
8317 tpr
->rx_std_cons_idx
= 0;
8318 tpr
->rx_std_prod_idx
= 0;
8319 tpr
->rx_jmb_cons_idx
= 0;
8320 tpr
->rx_jmb_prod_idx
= 0;
8322 if (tpr
!= &tp
->napi
[0].prodring
) {
8323 memset(&tpr
->rx_std_buffers
[0], 0,
8324 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8325 if (tpr
->rx_jmb_buffers
)
8326 memset(&tpr
->rx_jmb_buffers
[0], 0,
8327 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8331 /* Zero out all descriptors. */
8332 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8334 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8335 if (tg3_flag(tp
, 5780_CLASS
) &&
8336 tp
->dev
->mtu
> ETH_DATA_LEN
)
8337 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8338 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8340 /* Initialize invariants of the rings, we only set this
8341 * stuff once. This works because the card does not
8342 * write into the rx buffer posting rings.
8344 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8345 struct tg3_rx_buffer_desc
*rxd
;
8347 rxd
= &tpr
->rx_std
[i
];
8348 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8349 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8350 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8351 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8354 /* Now allocate fresh SKBs for each rx ring. */
8355 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8356 unsigned int frag_size
;
8358 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8360 netdev_warn(tp
->dev
,
8361 "Using a smaller RX standard ring. Only "
8362 "%d out of %d buffers were allocated "
8363 "successfully\n", i
, tp
->rx_pending
);
8371 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8374 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8376 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8379 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8380 struct tg3_rx_buffer_desc
*rxd
;
8382 rxd
= &tpr
->rx_jmb
[i
].std
;
8383 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8384 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8386 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8387 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8390 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8391 unsigned int frag_size
;
8393 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8395 netdev_warn(tp
->dev
,
8396 "Using a smaller RX jumbo ring. Only %d "
8397 "out of %d buffers were allocated "
8398 "successfully\n", i
, tp
->rx_jumbo_pending
);
8401 tp
->rx_jumbo_pending
= i
;
8410 tg3_rx_prodring_free(tp
, tpr
);
8414 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8415 struct tg3_rx_prodring_set
*tpr
)
8417 kfree(tpr
->rx_std_buffers
);
8418 tpr
->rx_std_buffers
= NULL
;
8419 kfree(tpr
->rx_jmb_buffers
);
8420 tpr
->rx_jmb_buffers
= NULL
;
8422 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8423 tpr
->rx_std
, tpr
->rx_std_mapping
);
8427 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8428 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8433 static int tg3_rx_prodring_init(struct tg3
*tp
,
8434 struct tg3_rx_prodring_set
*tpr
)
8436 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8438 if (!tpr
->rx_std_buffers
)
8441 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8442 TG3_RX_STD_RING_BYTES(tp
),
8443 &tpr
->rx_std_mapping
,
8448 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8449 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8451 if (!tpr
->rx_jmb_buffers
)
8454 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8455 TG3_RX_JMB_RING_BYTES(tp
),
8456 &tpr
->rx_jmb_mapping
,
8465 tg3_rx_prodring_fini(tp
, tpr
);
8469 /* Free up pending packets in all rx/tx rings.
8471 * The chip has been shut down and the driver detached from
8472 * the networking, so no interrupts or new tx packets will
8473 * end up in the driver. tp->{tx,}lock is not held and we are not
8474 * in an interrupt context and thus may sleep.
8476 static void tg3_free_rings(struct tg3
*tp
)
8480 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8481 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8483 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8485 if (!tnapi
->tx_buffers
)
8488 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8489 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8494 tg3_tx_skb_unmap(tnapi
, i
,
8495 skb_shinfo(skb
)->nr_frags
- 1);
8497 dev_kfree_skb_any(skb
);
8499 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8503 /* Initialize tx/rx rings for packet processing.
8505 * The chip has been shut down and the driver detached from
8506 * the networking, so no interrupts or new tx packets will
8507 * end up in the driver. tp->{tx,}lock are held and thus
8510 static int tg3_init_rings(struct tg3
*tp
)
8514 /* Free up all the SKBs. */
8517 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8518 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8520 tnapi
->last_tag
= 0;
8521 tnapi
->last_irq_tag
= 0;
8522 tnapi
->hw_status
->status
= 0;
8523 tnapi
->hw_status
->status_tag
= 0;
8524 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8529 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8531 tnapi
->rx_rcb_ptr
= 0;
8533 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8535 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8544 static void tg3_mem_tx_release(struct tg3
*tp
)
8548 for (i
= 0; i
< tp
->irq_max
; i
++) {
8549 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8551 if (tnapi
->tx_ring
) {
8552 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8553 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8554 tnapi
->tx_ring
= NULL
;
8557 kfree(tnapi
->tx_buffers
);
8558 tnapi
->tx_buffers
= NULL
;
8562 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8565 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8567 /* If multivector TSS is enabled, vector 0 does not handle
8568 * tx interrupts. Don't allocate any resources for it.
8570 if (tg3_flag(tp
, ENABLE_TSS
))
8573 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8574 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8575 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8576 if (!tnapi
->tx_buffers
)
8579 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8581 &tnapi
->tx_desc_mapping
,
8583 if (!tnapi
->tx_ring
)
8590 tg3_mem_tx_release(tp
);
8594 static void tg3_mem_rx_release(struct tg3
*tp
)
8598 for (i
= 0; i
< tp
->irq_max
; i
++) {
8599 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8601 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8606 dma_free_coherent(&tp
->pdev
->dev
,
8607 TG3_RX_RCB_RING_BYTES(tp
),
8609 tnapi
->rx_rcb_mapping
);
8610 tnapi
->rx_rcb
= NULL
;
8614 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8616 unsigned int i
, limit
;
8618 limit
= tp
->rxq_cnt
;
8620 /* If RSS is enabled, we need a (dummy) producer ring
8621 * set on vector zero. This is the true hw prodring.
8623 if (tg3_flag(tp
, ENABLE_RSS
))
8626 for (i
= 0; i
< limit
; i
++) {
8627 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8629 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8632 /* If multivector RSS is enabled, vector 0
8633 * does not handle rx or tx interrupts.
8634 * Don't allocate any resources for it.
8636 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8639 tnapi
->rx_rcb
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8640 TG3_RX_RCB_RING_BYTES(tp
),
8641 &tnapi
->rx_rcb_mapping
,
8650 tg3_mem_rx_release(tp
);
8655 * Must not be invoked with interrupt sources disabled and
8656 * the hardware shutdown down.
8658 static void tg3_free_consistent(struct tg3
*tp
)
8662 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8663 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8665 if (tnapi
->hw_status
) {
8666 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8668 tnapi
->status_mapping
);
8669 tnapi
->hw_status
= NULL
;
8673 tg3_mem_rx_release(tp
);
8674 tg3_mem_tx_release(tp
);
8677 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8678 tp
->hw_stats
, tp
->stats_mapping
);
8679 tp
->hw_stats
= NULL
;
8684 * Must not be invoked with interrupt sources disabled and
8685 * the hardware shutdown down. Can sleep.
8687 static int tg3_alloc_consistent(struct tg3
*tp
)
8691 tp
->hw_stats
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8692 sizeof(struct tg3_hw_stats
),
8693 &tp
->stats_mapping
, GFP_KERNEL
);
8697 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8698 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8699 struct tg3_hw_status
*sblk
;
8701 tnapi
->hw_status
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8703 &tnapi
->status_mapping
,
8705 if (!tnapi
->hw_status
)
8708 sblk
= tnapi
->hw_status
;
8710 if (tg3_flag(tp
, ENABLE_RSS
)) {
8711 u16
*prodptr
= NULL
;
8714 * When RSS is enabled, the status block format changes
8715 * slightly. The "rx_jumbo_consumer", "reserved",
8716 * and "rx_mini_consumer" members get mapped to the
8717 * other three rx return ring producer indexes.
8721 prodptr
= &sblk
->idx
[0].rx_producer
;
8724 prodptr
= &sblk
->rx_jumbo_consumer
;
8727 prodptr
= &sblk
->reserved
;
8730 prodptr
= &sblk
->rx_mini_consumer
;
8733 tnapi
->rx_rcb_prod_idx
= prodptr
;
8735 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8739 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8745 tg3_free_consistent(tp
);
8749 #define MAX_WAIT_CNT 1000
8751 /* To stop a block, clear the enable bit and poll till it
8752 * clears. tp->lock is held.
8754 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8759 if (tg3_flag(tp
, 5705_PLUS
)) {
8766 /* We can't enable/disable these bits of the
8767 * 5705/5750, just say success.
8780 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8781 if (pci_channel_offline(tp
->pdev
)) {
8782 dev_err(&tp
->pdev
->dev
,
8783 "tg3_stop_block device offline, "
8784 "ofs=%lx enable_bit=%x\n",
8791 if ((val
& enable_bit
) == 0)
8795 if (i
== MAX_WAIT_CNT
&& !silent
) {
8796 dev_err(&tp
->pdev
->dev
,
8797 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8805 /* tp->lock is held. */
8806 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8810 tg3_disable_ints(tp
);
8812 if (pci_channel_offline(tp
->pdev
)) {
8813 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8814 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8819 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8820 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8823 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8824 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8825 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8826 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8827 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8828 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8830 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8831 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8832 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8833 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8834 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8835 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8836 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8838 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8839 tw32_f(MAC_MODE
, tp
->mac_mode
);
8842 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8843 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8845 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8847 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8850 if (i
>= MAX_WAIT_CNT
) {
8851 dev_err(&tp
->pdev
->dev
,
8852 "%s timed out, TX_MODE_ENABLE will not clear "
8853 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8857 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8858 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8859 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8861 tw32(FTQ_RESET
, 0xffffffff);
8862 tw32(FTQ_RESET
, 0x00000000);
8864 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8865 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8868 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8869 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8870 if (tnapi
->hw_status
)
8871 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8877 /* Save PCI command register before chip reset */
8878 static void tg3_save_pci_state(struct tg3
*tp
)
8880 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8883 /* Restore PCI state after chip reset */
8884 static void tg3_restore_pci_state(struct tg3
*tp
)
8888 /* Re-enable indirect register accesses. */
8889 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8890 tp
->misc_host_ctrl
);
8892 /* Set MAX PCI retry to zero. */
8893 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8894 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8895 tg3_flag(tp
, PCIX_MODE
))
8896 val
|= PCISTATE_RETRY_SAME_DMA
;
8897 /* Allow reads and writes to the APE register and memory space. */
8898 if (tg3_flag(tp
, ENABLE_APE
))
8899 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8900 PCISTATE_ALLOW_APE_SHMEM_WR
|
8901 PCISTATE_ALLOW_APE_PSPACE_WR
;
8902 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8904 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8906 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8907 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8908 tp
->pci_cacheline_sz
);
8909 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8913 /* Make sure PCI-X relaxed ordering bit is clear. */
8914 if (tg3_flag(tp
, PCIX_MODE
)) {
8917 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8919 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8920 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8924 if (tg3_flag(tp
, 5780_CLASS
)) {
8926 /* Chip reset on 5780 will reset MSI enable bit,
8927 * so need to restore it.
8929 if (tg3_flag(tp
, USING_MSI
)) {
8932 pci_read_config_word(tp
->pdev
,
8933 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8935 pci_write_config_word(tp
->pdev
,
8936 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8937 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8938 val
= tr32(MSGINT_MODE
);
8939 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8944 static void tg3_override_clk(struct tg3
*tp
)
8948 switch (tg3_asic_rev(tp
)) {
8950 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
8951 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
, val
|
8952 TG3_CPMU_MAC_ORIDE_ENABLE
);
8957 tw32(TG3_CPMU_CLCK_ORIDE
, CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8965 static void tg3_restore_clk(struct tg3
*tp
)
8969 switch (tg3_asic_rev(tp
)) {
8971 val
= tr32(TG3_CPMU_CLCK_ORIDE_ENABLE
);
8972 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE
,
8973 val
& ~TG3_CPMU_MAC_ORIDE_ENABLE
);
8978 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8979 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8987 /* tp->lock is held. */
8988 static int tg3_chip_reset(struct tg3
*tp
)
8991 void (*write_op
)(struct tg3
*, u32
, u32
);
8994 if (!pci_device_is_present(tp
->pdev
))
8999 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
9001 /* No matching tg3_nvram_unlock() after this because
9002 * chip reset below will undo the nvram lock.
9004 tp
->nvram_lock_cnt
= 0;
9006 /* GRC_MISC_CFG core clock reset will clear the memory
9007 * enable bit in PCI register 4 and the MSI enable bit
9008 * on some chips, so we save relevant registers here.
9010 tg3_save_pci_state(tp
);
9012 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
9013 tg3_flag(tp
, 5755_PLUS
))
9014 tw32(GRC_FASTBOOT_PC
, 0);
9017 * We must avoid the readl() that normally takes place.
9018 * It locks machines, causes machine checks, and other
9019 * fun things. So, temporarily disable the 5701
9020 * hardware workaround, while we do the reset.
9022 write_op
= tp
->write32
;
9023 if (write_op
== tg3_write_flush_reg32
)
9024 tp
->write32
= tg3_write32
;
9026 /* Prevent the irq handler from reading or writing PCI registers
9027 * during chip reset when the memory enable bit in the PCI command
9028 * register may be cleared. The chip does not generate interrupt
9029 * at this time, but the irq handler may still be called due to irq
9030 * sharing or irqpoll.
9032 tg3_flag_set(tp
, CHIP_RESETTING
);
9033 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9034 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9035 if (tnapi
->hw_status
) {
9036 tnapi
->hw_status
->status
= 0;
9037 tnapi
->hw_status
->status_tag
= 0;
9039 tnapi
->last_tag
= 0;
9040 tnapi
->last_irq_tag
= 0;
9044 for (i
= 0; i
< tp
->irq_cnt
; i
++)
9045 synchronize_irq(tp
->napi
[i
].irq_vec
);
9047 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9048 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9049 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9053 val
= GRC_MISC_CFG_CORECLK_RESET
;
9055 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9056 /* Force PCIe 1.0a mode */
9057 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9058 !tg3_flag(tp
, 57765_PLUS
) &&
9059 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9060 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9061 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9063 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9064 tw32(GRC_MISC_CFG
, (1 << 29));
9069 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9070 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9071 tw32(GRC_VCPU_EXT_CTRL
,
9072 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9075 /* Set the clock to the highest frequency to avoid timeouts. With link
9076 * aware mode, the clock speed could be slow and bootcode does not
9077 * complete within the expected time. Override the clock to allow the
9078 * bootcode to finish sooner and then restore it.
9080 tg3_override_clk(tp
);
9082 /* Manage gphy power for all CPMU absent PCIe devices. */
9083 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9084 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9086 tw32(GRC_MISC_CFG
, val
);
9088 /* restore 5701 hardware bug workaround write method */
9089 tp
->write32
= write_op
;
9091 /* Unfortunately, we have to delay before the PCI read back.
9092 * Some 575X chips even will not respond to a PCI cfg access
9093 * when the reset command is given to the chip.
9095 * How do these hardware designers expect things to work
9096 * properly if the PCI write is posted for a long period
9097 * of time? It is always necessary to have some method by
9098 * which a register read back can occur to push the write
9099 * out which does the reset.
9101 * For most tg3 variants the trick below was working.
9106 /* Flush PCI posted writes. The normal MMIO registers
9107 * are inaccessible at this time so this is the only
9108 * way to make this reliably (actually, this is no longer
9109 * the case, see above). I tried to use indirect
9110 * register read/write but this upset some 5701 variants.
9112 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9116 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9119 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9123 /* Wait for link training to complete. */
9124 for (j
= 0; j
< 5000; j
++)
9127 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9128 pci_write_config_dword(tp
->pdev
, 0xc4,
9129 cfg_val
| (1 << 15));
9132 /* Clear the "no snoop" and "relaxed ordering" bits. */
9133 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9135 * Older PCIe devices only support the 128 byte
9136 * MPS setting. Enforce the restriction.
9138 if (!tg3_flag(tp
, CPMU_PRESENT
))
9139 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9140 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9142 /* Clear error status */
9143 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9144 PCI_EXP_DEVSTA_CED
|
9145 PCI_EXP_DEVSTA_NFED
|
9146 PCI_EXP_DEVSTA_FED
|
9147 PCI_EXP_DEVSTA_URD
);
9150 tg3_restore_pci_state(tp
);
9152 tg3_flag_clear(tp
, CHIP_RESETTING
);
9153 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9156 if (tg3_flag(tp
, 5780_CLASS
))
9157 val
= tr32(MEMARB_MODE
);
9158 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9160 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9162 tw32(0x5000, 0x400);
9165 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9167 * BCM4785: In order to avoid repercussions from using
9168 * potentially defective internal ROM, stop the Rx RISC CPU,
9169 * which is not required.
9172 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9175 err
= tg3_poll_fw(tp
);
9179 tw32(GRC_MODE
, tp
->grc_mode
);
9181 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9184 tw32(0xc4, val
| (1 << 15));
9187 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9188 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9189 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9190 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9191 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9192 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9195 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9196 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9198 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9199 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9204 tw32_f(MAC_MODE
, val
);
9207 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9211 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9212 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9213 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9214 !tg3_flag(tp
, 57765_PLUS
)) {
9217 tw32(0x7c00, val
| (1 << 25));
9220 tg3_restore_clk(tp
);
9222 /* Reprobe ASF enable state. */
9223 tg3_flag_clear(tp
, ENABLE_ASF
);
9224 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9225 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9227 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9228 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9229 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9232 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9233 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9234 tg3_flag_set(tp
, ENABLE_ASF
);
9235 tp
->last_event_jiffies
= jiffies
;
9236 if (tg3_flag(tp
, 5750_PLUS
))
9237 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9239 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9240 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9241 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9242 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9243 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9250 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9251 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9252 static void __tg3_set_rx_mode(struct net_device
*);
9254 /* tp->lock is held. */
9255 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9261 tg3_write_sig_pre_reset(tp
, kind
);
9263 tg3_abort_hw(tp
, silent
);
9264 err
= tg3_chip_reset(tp
);
9266 __tg3_set_mac_addr(tp
, false);
9268 tg3_write_sig_legacy(tp
, kind
);
9269 tg3_write_sig_post_reset(tp
, kind
);
9272 /* Save the stats across chip resets... */
9273 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9274 tg3_get_estats(tp
, &tp
->estats_prev
);
9276 /* And make sure the next sample is new data */
9277 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9283 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9285 struct tg3
*tp
= netdev_priv(dev
);
9286 struct sockaddr
*addr
= p
;
9288 bool skip_mac_1
= false;
9290 if (!is_valid_ether_addr(addr
->sa_data
))
9291 return -EADDRNOTAVAIL
;
9293 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9295 if (!netif_running(dev
))
9298 if (tg3_flag(tp
, ENABLE_ASF
)) {
9299 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9301 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9302 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9303 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9304 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9306 /* Skip MAC addr 1 if ASF is using it. */
9307 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9308 !(addr1_high
== 0 && addr1_low
== 0))
9311 spin_lock_bh(&tp
->lock
);
9312 __tg3_set_mac_addr(tp
, skip_mac_1
);
9313 __tg3_set_rx_mode(dev
);
9314 spin_unlock_bh(&tp
->lock
);
9319 /* tp->lock is held. */
9320 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9321 dma_addr_t mapping
, u32 maxlen_flags
,
9325 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9326 ((u64
) mapping
>> 32));
9328 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9329 ((u64
) mapping
& 0xffffffff));
9331 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9334 if (!tg3_flag(tp
, 5705_PLUS
))
9336 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9341 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9345 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9346 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9347 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9348 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9350 tw32(HOSTCC_TXCOL_TICKS
, 0);
9351 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9352 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9354 for (; i
< tp
->txq_cnt
; i
++) {
9357 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9358 tw32(reg
, ec
->tx_coalesce_usecs
);
9359 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9360 tw32(reg
, ec
->tx_max_coalesced_frames
);
9361 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9362 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9366 for (; i
< tp
->irq_max
- 1; i
++) {
9367 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9368 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9369 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9373 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9376 u32 limit
= tp
->rxq_cnt
;
9378 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9379 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9380 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9381 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9384 tw32(HOSTCC_RXCOL_TICKS
, 0);
9385 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9386 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9389 for (; i
< limit
; i
++) {
9392 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9393 tw32(reg
, ec
->rx_coalesce_usecs
);
9394 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9395 tw32(reg
, ec
->rx_max_coalesced_frames
);
9396 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9397 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9400 for (; i
< tp
->irq_max
- 1; i
++) {
9401 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9402 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9403 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9407 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9409 tg3_coal_tx_init(tp
, ec
);
9410 tg3_coal_rx_init(tp
, ec
);
9412 if (!tg3_flag(tp
, 5705_PLUS
)) {
9413 u32 val
= ec
->stats_block_coalesce_usecs
;
9415 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9416 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9421 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9425 /* tp->lock is held. */
9426 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9430 /* Disable all transmit rings but the first. */
9431 if (!tg3_flag(tp
, 5705_PLUS
))
9432 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9433 else if (tg3_flag(tp
, 5717_PLUS
))
9434 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9435 else if (tg3_flag(tp
, 57765_CLASS
) ||
9436 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9437 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9439 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9441 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9442 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9443 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9444 BDINFO_FLAGS_DISABLED
);
9447 /* tp->lock is held. */
9448 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9451 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9453 if (tg3_flag(tp
, ENABLE_TSS
))
9456 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9457 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9459 if (!tnapi
->tx_ring
)
9462 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9463 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9464 NIC_SRAM_TX_BUFFER_DESC
);
9468 /* tp->lock is held. */
9469 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9473 /* Disable all receive return rings but the first. */
9474 if (tg3_flag(tp
, 5717_PLUS
))
9475 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9476 else if (!tg3_flag(tp
, 5705_PLUS
))
9477 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9478 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9479 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9480 tg3_flag(tp
, 57765_CLASS
))
9481 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9483 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9485 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9486 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9487 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9488 BDINFO_FLAGS_DISABLED
);
9491 /* tp->lock is held. */
9492 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9495 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9497 if (tg3_flag(tp
, ENABLE_RSS
))
9500 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9501 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9506 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9507 (tp
->rx_ret_ring_mask
+ 1) <<
9508 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9512 /* tp->lock is held. */
9513 static void tg3_rings_reset(struct tg3
*tp
)
9517 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9519 tg3_tx_rcbs_disable(tp
);
9521 tg3_rx_ret_rcbs_disable(tp
);
9523 /* Disable interrupts */
9524 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9525 tp
->napi
[0].chk_msi_cnt
= 0;
9526 tp
->napi
[0].last_rx_cons
= 0;
9527 tp
->napi
[0].last_tx_cons
= 0;
9529 /* Zero mailbox registers. */
9530 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9531 for (i
= 1; i
< tp
->irq_max
; i
++) {
9532 tp
->napi
[i
].tx_prod
= 0;
9533 tp
->napi
[i
].tx_cons
= 0;
9534 if (tg3_flag(tp
, ENABLE_TSS
))
9535 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9536 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9537 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9538 tp
->napi
[i
].chk_msi_cnt
= 0;
9539 tp
->napi
[i
].last_rx_cons
= 0;
9540 tp
->napi
[i
].last_tx_cons
= 0;
9542 if (!tg3_flag(tp
, ENABLE_TSS
))
9543 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9545 tp
->napi
[0].tx_prod
= 0;
9546 tp
->napi
[0].tx_cons
= 0;
9547 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9548 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9551 /* Make sure the NIC-based send BD rings are disabled. */
9552 if (!tg3_flag(tp
, 5705_PLUS
)) {
9553 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9554 for (i
= 0; i
< 16; i
++)
9555 tw32_tx_mbox(mbox
+ i
* 8, 0);
9558 /* Clear status block in ram. */
9559 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9561 /* Set status block DMA address */
9562 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9563 ((u64
) tnapi
->status_mapping
>> 32));
9564 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9565 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9567 stblk
= HOSTCC_STATBLCK_RING1
;
9569 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9570 u64 mapping
= (u64
)tnapi
->status_mapping
;
9571 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9572 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9575 /* Clear status block in ram. */
9576 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9579 tg3_tx_rcbs_init(tp
);
9580 tg3_rx_ret_rcbs_init(tp
);
9583 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9585 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9587 if (!tg3_flag(tp
, 5750_PLUS
) ||
9588 tg3_flag(tp
, 5780_CLASS
) ||
9589 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9590 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9591 tg3_flag(tp
, 57765_PLUS
))
9592 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9593 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9594 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9595 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9597 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9599 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9600 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9602 val
= min(nic_rep_thresh
, host_rep_thresh
);
9603 tw32(RCVBDI_STD_THRESH
, val
);
9605 if (tg3_flag(tp
, 57765_PLUS
))
9606 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9608 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9611 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9613 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9615 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9616 tw32(RCVBDI_JUMBO_THRESH
, val
);
9618 if (tg3_flag(tp
, 57765_PLUS
))
9619 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9622 static inline u32
calc_crc(unsigned char *buf
, int len
)
9630 for (j
= 0; j
< len
; j
++) {
9633 for (k
= 0; k
< 8; k
++) {
9646 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9648 /* accept or reject all multicast frames */
9649 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9650 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9651 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9652 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9655 static void __tg3_set_rx_mode(struct net_device
*dev
)
9657 struct tg3
*tp
= netdev_priv(dev
);
9660 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9661 RX_MODE_KEEP_VLAN_TAG
);
9663 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9664 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9667 if (!tg3_flag(tp
, ENABLE_ASF
))
9668 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9671 if (dev
->flags
& IFF_PROMISC
) {
9672 /* Promiscuous mode. */
9673 rx_mode
|= RX_MODE_PROMISC
;
9674 } else if (dev
->flags
& IFF_ALLMULTI
) {
9675 /* Accept all multicast. */
9676 tg3_set_multi(tp
, 1);
9677 } else if (netdev_mc_empty(dev
)) {
9678 /* Reject all multicast. */
9679 tg3_set_multi(tp
, 0);
9681 /* Accept one or more multicast(s). */
9682 struct netdev_hw_addr
*ha
;
9683 u32 mc_filter
[4] = { 0, };
9688 netdev_for_each_mc_addr(ha
, dev
) {
9689 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9691 regidx
= (bit
& 0x60) >> 5;
9693 mc_filter
[regidx
] |= (1 << bit
);
9696 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9697 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9698 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9699 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9702 if (netdev_uc_count(dev
) > TG3_MAX_UCAST_ADDR(tp
)) {
9703 rx_mode
|= RX_MODE_PROMISC
;
9704 } else if (!(dev
->flags
& IFF_PROMISC
)) {
9705 /* Add all entries into to the mac addr filter list */
9707 struct netdev_hw_addr
*ha
;
9709 netdev_for_each_uc_addr(ha
, dev
) {
9710 __tg3_set_one_mac_addr(tp
, ha
->addr
,
9711 i
+ TG3_UCAST_ADDR_IDX(tp
));
9716 if (rx_mode
!= tp
->rx_mode
) {
9717 tp
->rx_mode
= rx_mode
;
9718 tw32_f(MAC_RX_MODE
, rx_mode
);
9723 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9727 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9728 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9731 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9735 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9738 if (tp
->rxq_cnt
== 1) {
9739 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9743 /* Validate table against current IRQ count */
9744 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9745 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9749 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9750 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9753 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9756 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9758 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9759 u32 val
= tp
->rss_ind_tbl
[i
];
9761 for (; i
% 8; i
++) {
9763 val
|= tp
->rss_ind_tbl
[i
];
9770 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9772 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9773 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9775 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9778 /* tp->lock is held. */
9779 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9781 u32 val
, rdmac_mode
;
9783 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9785 tg3_disable_ints(tp
);
9789 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9791 if (tg3_flag(tp
, INIT_COMPLETE
))
9792 tg3_abort_hw(tp
, 1);
9794 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9795 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9796 tg3_phy_pull_config(tp
);
9797 tg3_eee_pull_config(tp
, NULL
);
9798 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9801 /* Enable MAC control of LPI */
9802 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9808 err
= tg3_chip_reset(tp
);
9812 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9814 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9815 val
= tr32(TG3_CPMU_CTRL
);
9816 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9817 tw32(TG3_CPMU_CTRL
, val
);
9819 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9820 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9821 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9822 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9824 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9825 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9826 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9827 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9829 val
= tr32(TG3_CPMU_HST_ACC
);
9830 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9831 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9832 tw32(TG3_CPMU_HST_ACC
, val
);
9835 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9836 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9837 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9838 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9839 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9841 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9842 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9844 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9846 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9847 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9850 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9851 u32 grc_mode
= tr32(GRC_MODE
);
9853 /* Access the lower 1K of PL PCIE block registers. */
9854 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9855 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9857 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9858 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9859 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9861 tw32(GRC_MODE
, grc_mode
);
9864 if (tg3_flag(tp
, 57765_CLASS
)) {
9865 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9866 u32 grc_mode
= tr32(GRC_MODE
);
9868 /* Access the lower 1K of PL PCIE block registers. */
9869 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9870 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9872 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9873 TG3_PCIE_PL_LO_PHYCTL5
);
9874 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9875 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9877 tw32(GRC_MODE
, grc_mode
);
9880 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9883 /* Fix transmit hangs */
9884 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9885 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9886 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9888 grc_mode
= tr32(GRC_MODE
);
9890 /* Access the lower 1K of DL PCIE block registers. */
9891 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9892 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9894 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9895 TG3_PCIE_DL_LO_FTSMAX
);
9896 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9897 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9898 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9900 tw32(GRC_MODE
, grc_mode
);
9903 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9904 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9905 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9906 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9909 /* This works around an issue with Athlon chipsets on
9910 * B3 tigon3 silicon. This bit has no effect on any
9911 * other revision. But do not set this on PCI Express
9912 * chips and don't even touch the clocks if the CPMU is present.
9914 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9915 if (!tg3_flag(tp
, PCI_EXPRESS
))
9916 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9917 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9920 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9921 tg3_flag(tp
, PCIX_MODE
)) {
9922 val
= tr32(TG3PCI_PCISTATE
);
9923 val
|= PCISTATE_RETRY_SAME_DMA
;
9924 tw32(TG3PCI_PCISTATE
, val
);
9927 if (tg3_flag(tp
, ENABLE_APE
)) {
9928 /* Allow reads and writes to the
9929 * APE register and memory space.
9931 val
= tr32(TG3PCI_PCISTATE
);
9932 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9933 PCISTATE_ALLOW_APE_SHMEM_WR
|
9934 PCISTATE_ALLOW_APE_PSPACE_WR
;
9935 tw32(TG3PCI_PCISTATE
, val
);
9938 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9939 /* Enable some hw fixes. */
9940 val
= tr32(TG3PCI_MSI_DATA
);
9941 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9942 tw32(TG3PCI_MSI_DATA
, val
);
9945 /* Descriptor ring init may make accesses to the
9946 * NIC SRAM area to setup the TX descriptors, so we
9947 * can only do this after the hardware has been
9948 * successfully reset.
9950 err
= tg3_init_rings(tp
);
9954 if (tg3_flag(tp
, 57765_PLUS
)) {
9955 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9956 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9957 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9958 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9959 if (!tg3_flag(tp
, 57765_CLASS
) &&
9960 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9961 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9962 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9963 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9964 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9965 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9966 /* This value is determined during the probe time DMA
9967 * engine test, tg3_test_dma.
9969 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9972 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9973 GRC_MODE_4X_NIC_SEND_RINGS
|
9974 GRC_MODE_NO_TX_PHDR_CSUM
|
9975 GRC_MODE_NO_RX_PHDR_CSUM
);
9976 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9978 /* Pseudo-header checksum is done by hardware logic and not
9979 * the offload processers, so make the chip do the pseudo-
9980 * header checksums on receive. For transmit it is more
9981 * convenient to do the pseudo-header checksum in software
9982 * as Linux does that on transmit for us in all cases.
9984 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9986 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9988 tw32(TG3_RX_PTP_CTL
,
9989 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9991 if (tg3_flag(tp
, PTP_CAPABLE
))
9992 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9994 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9996 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9997 val
= tr32(GRC_MISC_CFG
);
9999 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
10000 tw32(GRC_MISC_CFG
, val
);
10002 /* Initialize MBUF/DESC pool. */
10003 if (tg3_flag(tp
, 5750_PLUS
)) {
10005 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
10006 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
10007 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
10008 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
10010 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
10011 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
10012 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
10013 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
10016 fw_len
= tp
->fw_len
;
10017 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
10018 tw32(BUFMGR_MB_POOL_ADDR
,
10019 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
10020 tw32(BUFMGR_MB_POOL_SIZE
,
10021 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
10024 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10025 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10026 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
10027 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10028 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
10029 tw32(BUFMGR_MB_HIGH_WATER
,
10030 tp
->bufmgr_config
.mbuf_high_water
);
10032 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
10033 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
10034 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
10035 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
10036 tw32(BUFMGR_MB_HIGH_WATER
,
10037 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
10039 tw32(BUFMGR_DMA_LOW_WATER
,
10040 tp
->bufmgr_config
.dma_low_water
);
10041 tw32(BUFMGR_DMA_HIGH_WATER
,
10042 tp
->bufmgr_config
.dma_high_water
);
10044 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
10045 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
10046 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
10047 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10048 tg3_asic_rev(tp
) == ASIC_REV_5762
||
10049 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10050 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
10051 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
10052 tw32(BUFMGR_MODE
, val
);
10053 for (i
= 0; i
< 2000; i
++) {
10054 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
10059 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
10063 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
10064 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
10066 tg3_setup_rxbd_thresholds(tp
);
10068 /* Initialize TG3_BDINFO's at:
10069 * RCVDBDI_STD_BD: standard eth size rx ring
10070 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10071 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10074 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10075 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10076 * ring attribute flags
10077 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10079 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10080 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10082 * The size of each ring is fixed in the firmware, but the location is
10085 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10086 ((u64
) tpr
->rx_std_mapping
>> 32));
10087 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10088 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10089 if (!tg3_flag(tp
, 5717_PLUS
))
10090 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10091 NIC_SRAM_RX_BUFFER_DESC
);
10093 /* Disable the mini ring */
10094 if (!tg3_flag(tp
, 5705_PLUS
))
10095 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10096 BDINFO_FLAGS_DISABLED
);
10098 /* Program the jumbo buffer descriptor ring control
10099 * blocks on those devices that have them.
10101 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10102 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10104 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10105 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10106 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10107 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10108 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10109 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10110 BDINFO_FLAGS_MAXLEN_SHIFT
;
10111 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10112 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10113 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10114 tg3_flag(tp
, 57765_CLASS
) ||
10115 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10116 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10117 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10119 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10120 BDINFO_FLAGS_DISABLED
);
10123 if (tg3_flag(tp
, 57765_PLUS
)) {
10124 val
= TG3_RX_STD_RING_SIZE(tp
);
10125 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10126 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10128 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10130 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10132 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10134 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10135 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10137 tpr
->rx_jmb_prod_idx
=
10138 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10139 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10141 tg3_rings_reset(tp
);
10143 /* Initialize MAC address and backoff seed. */
10144 __tg3_set_mac_addr(tp
, false);
10146 /* MTU + ethernet header + FCS + optional VLAN tag */
10147 tw32(MAC_RX_MTU_SIZE
,
10148 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10150 /* The slot time is changed by tg3_setup_phy if we
10151 * run at gigabit with half duplex.
10153 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10154 (6 << TX_LENGTHS_IPG_SHIFT
) |
10155 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10157 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10158 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10159 val
|= tr32(MAC_TX_LENGTHS
) &
10160 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10161 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10163 tw32(MAC_TX_LENGTHS
, val
);
10165 /* Receive rules. */
10166 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10167 tw32(RCVLPC_CONFIG
, 0x0181);
10169 /* Calculate RDMAC_MODE setting early, we need it to determine
10170 * the RCVLPC_STATE_ENABLE mask.
10172 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10173 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10174 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10175 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10176 RDMAC_MODE_LNGREAD_ENAB
);
10178 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10179 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10181 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10182 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10183 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10184 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10185 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10186 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10188 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10189 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10190 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10191 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10192 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10193 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10194 !tg3_flag(tp
, IS_5788
)) {
10195 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10199 if (tg3_flag(tp
, PCI_EXPRESS
))
10200 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10202 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10204 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10205 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10206 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10210 if (tg3_flag(tp
, HW_TSO_1
) ||
10211 tg3_flag(tp
, HW_TSO_2
) ||
10212 tg3_flag(tp
, HW_TSO_3
))
10213 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10215 if (tg3_flag(tp
, 57765_PLUS
) ||
10216 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10217 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10218 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10220 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10221 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10222 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10224 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10225 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10226 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10227 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10228 tg3_flag(tp
, 57765_PLUS
)) {
10231 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10232 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10234 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10236 val
= tr32(tgtreg
);
10237 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10238 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10239 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10240 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10241 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10242 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10243 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10244 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10246 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10249 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10250 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10251 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10254 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10255 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10257 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10259 val
= tr32(tgtreg
);
10261 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10262 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10265 /* Receive/send statistics. */
10266 if (tg3_flag(tp
, 5750_PLUS
)) {
10267 val
= tr32(RCVLPC_STATS_ENABLE
);
10268 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10269 tw32(RCVLPC_STATS_ENABLE
, val
);
10270 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10271 tg3_flag(tp
, TSO_CAPABLE
)) {
10272 val
= tr32(RCVLPC_STATS_ENABLE
);
10273 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10274 tw32(RCVLPC_STATS_ENABLE
, val
);
10276 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10278 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10279 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10280 tw32(SNDDATAI_STATSCTRL
,
10281 (SNDDATAI_SCTRL_ENABLE
|
10282 SNDDATAI_SCTRL_FASTUPD
));
10284 /* Setup host coalescing engine. */
10285 tw32(HOSTCC_MODE
, 0);
10286 for (i
= 0; i
< 2000; i
++) {
10287 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10292 __tg3_set_coalesce(tp
, &tp
->coal
);
10294 if (!tg3_flag(tp
, 5705_PLUS
)) {
10295 /* Status/statistics block address. See tg3_timer,
10296 * the tg3_periodic_fetch_stats call there, and
10297 * tg3_get_stats to see how this works for 5705/5750 chips.
10299 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10300 ((u64
) tp
->stats_mapping
>> 32));
10301 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10302 ((u64
) tp
->stats_mapping
& 0xffffffff));
10303 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10305 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10307 /* Clear statistics and status block memory areas */
10308 for (i
= NIC_SRAM_STATS_BLK
;
10309 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10310 i
+= sizeof(u32
)) {
10311 tg3_write_mem(tp
, i
, 0);
10316 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10318 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10319 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10320 if (!tg3_flag(tp
, 5705_PLUS
))
10321 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10323 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10324 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10325 /* reset to prevent losing 1st rx packet intermittently */
10326 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10330 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10331 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10332 MAC_MODE_FHDE_ENABLE
;
10333 if (tg3_flag(tp
, ENABLE_APE
))
10334 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10335 if (!tg3_flag(tp
, 5705_PLUS
) &&
10336 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10337 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10338 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10339 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10342 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10343 * If TG3_FLAG_IS_NIC is zero, we should read the
10344 * register to preserve the GPIO settings for LOMs. The GPIOs,
10345 * whether used as inputs or outputs, are set by boot code after
10348 if (!tg3_flag(tp
, IS_NIC
)) {
10351 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10352 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10353 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10355 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10356 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10357 GRC_LCLCTRL_GPIO_OUTPUT3
;
10359 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10360 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10362 tp
->grc_local_ctrl
&= ~gpio_mask
;
10363 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10365 /* GPIO1 must be driven high for eeprom write protect */
10366 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10367 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10368 GRC_LCLCTRL_GPIO_OUTPUT1
);
10370 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10373 if (tg3_flag(tp
, USING_MSIX
)) {
10374 val
= tr32(MSGINT_MODE
);
10375 val
|= MSGINT_MODE_ENABLE
;
10376 if (tp
->irq_cnt
> 1)
10377 val
|= MSGINT_MODE_MULTIVEC_EN
;
10378 if (!tg3_flag(tp
, 1SHOT_MSI
))
10379 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10380 tw32(MSGINT_MODE
, val
);
10383 if (!tg3_flag(tp
, 5705_PLUS
)) {
10384 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10388 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10389 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10390 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10391 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10392 WDMAC_MODE_LNGREAD_ENAB
);
10394 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10395 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10396 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10397 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10398 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10400 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10401 !tg3_flag(tp
, IS_5788
)) {
10402 val
|= WDMAC_MODE_RX_ACCEL
;
10406 /* Enable host coalescing bug fix */
10407 if (tg3_flag(tp
, 5755_PLUS
))
10408 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10410 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10411 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10413 tw32_f(WDMAC_MODE
, val
);
10416 if (tg3_flag(tp
, PCIX_MODE
)) {
10419 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10421 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10422 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10423 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10424 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10425 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10426 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10428 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10432 tw32_f(RDMAC_MODE
, rdmac_mode
);
10435 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10436 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10437 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10438 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10441 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10442 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10443 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10444 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10445 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10449 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10450 if (!tg3_flag(tp
, 5705_PLUS
))
10451 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10453 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10454 tw32(SNDDATAC_MODE
,
10455 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10457 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10459 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10460 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10461 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10462 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10463 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10464 tw32(RCVDBDI_MODE
, val
);
10465 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10466 if (tg3_flag(tp
, HW_TSO_1
) ||
10467 tg3_flag(tp
, HW_TSO_2
) ||
10468 tg3_flag(tp
, HW_TSO_3
))
10469 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10470 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10471 if (tg3_flag(tp
, ENABLE_TSS
))
10472 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10473 tw32(SNDBDI_MODE
, val
);
10474 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10476 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10477 err
= tg3_load_5701_a0_firmware_fix(tp
);
10482 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10483 /* Ignore any errors for the firmware download. If download
10484 * fails, the device will operate with EEE disabled
10486 tg3_load_57766_firmware(tp
);
10489 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10490 err
= tg3_load_tso_firmware(tp
);
10495 tp
->tx_mode
= TX_MODE_ENABLE
;
10497 if (tg3_flag(tp
, 5755_PLUS
) ||
10498 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10499 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10501 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10502 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10503 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10504 tp
->tx_mode
&= ~val
;
10505 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10508 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10511 if (tg3_flag(tp
, ENABLE_RSS
)) {
10512 tg3_rss_write_indir_tbl(tp
);
10514 /* Setup the "secret" hash key. */
10515 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
10516 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
10517 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
10518 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
10519 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
10520 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
10521 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
10522 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
10523 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
10524 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
10527 tp
->rx_mode
= RX_MODE_ENABLE
;
10528 if (tg3_flag(tp
, 5755_PLUS
))
10529 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10531 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10532 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10534 if (tg3_flag(tp
, ENABLE_RSS
))
10535 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10536 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10537 RX_MODE_RSS_IPV6_HASH_EN
|
10538 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10539 RX_MODE_RSS_IPV4_HASH_EN
|
10540 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10542 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10545 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10547 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10548 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10549 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10552 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10555 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10556 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10557 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10558 /* Set drive transmission level to 1.2V */
10559 /* only if the signal pre-emphasis bit is not set */
10560 val
= tr32(MAC_SERDES_CFG
);
10563 tw32(MAC_SERDES_CFG
, val
);
10565 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10566 tw32(MAC_SERDES_CFG
, 0x616000);
10569 /* Prevent chip from dropping frames when flow control
10572 if (tg3_flag(tp
, 57765_CLASS
))
10576 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10578 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10579 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10580 /* Use hardware link auto-negotiation */
10581 tg3_flag_set(tp
, HW_AUTONEG
);
10584 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10585 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10588 tmp
= tr32(SERDES_RX_CTRL
);
10589 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10590 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10591 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10592 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10595 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10596 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10597 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10599 err
= tg3_setup_phy(tp
, false);
10603 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10604 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10607 /* Clear CRC stats. */
10608 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10609 tg3_writephy(tp
, MII_TG3_TEST1
,
10610 tmp
| MII_TG3_TEST1_CRC_EN
);
10611 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10616 __tg3_set_rx_mode(tp
->dev
);
10618 /* Initialize receive rules. */
10619 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10620 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10621 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10622 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10624 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10628 if (tg3_flag(tp
, ENABLE_ASF
))
10632 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10634 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10636 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10638 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10640 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10642 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10644 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10646 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10648 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10650 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10652 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10654 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10656 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10658 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10666 if (tg3_flag(tp
, ENABLE_APE
))
10667 /* Write our heartbeat update interval to APE. */
10668 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10669 APE_HOST_HEARTBEAT_INT_DISABLE
);
10671 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10676 /* Called at device open time to get the chip ready for
10677 * packet processing. Invoked with tp->lock held.
10679 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10681 /* Chip may have been just powered on. If so, the boot code may still
10682 * be running initialization. Wait for it to finish to avoid races in
10683 * accessing the hardware.
10685 tg3_enable_register_access(tp
);
10688 tg3_switch_clocks(tp
);
10690 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10692 return tg3_reset_hw(tp
, reset_phy
);
10695 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10699 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10700 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10702 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10705 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10706 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10707 memset(ocir
, 0, TG3_OCIR_LEN
);
10711 /* sysfs attributes for hwmon */
10712 static ssize_t
tg3_show_temp(struct device
*dev
,
10713 struct device_attribute
*devattr
, char *buf
)
10715 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10716 struct tg3
*tp
= dev_get_drvdata(dev
);
10719 spin_lock_bh(&tp
->lock
);
10720 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10721 sizeof(temperature
));
10722 spin_unlock_bh(&tp
->lock
);
10723 return sprintf(buf
, "%u\n", temperature
);
10727 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10728 TG3_TEMP_SENSOR_OFFSET
);
10729 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10730 TG3_TEMP_CAUTION_OFFSET
);
10731 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10732 TG3_TEMP_MAX_OFFSET
);
10734 static struct attribute
*tg3_attrs
[] = {
10735 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10736 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10737 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10740 ATTRIBUTE_GROUPS(tg3
);
10742 static void tg3_hwmon_close(struct tg3
*tp
)
10744 if (tp
->hwmon_dev
) {
10745 hwmon_device_unregister(tp
->hwmon_dev
);
10746 tp
->hwmon_dev
= NULL
;
10750 static void tg3_hwmon_open(struct tg3
*tp
)
10754 struct pci_dev
*pdev
= tp
->pdev
;
10755 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10757 tg3_sd_scan_scratchpad(tp
, ocirs
);
10759 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10760 if (!ocirs
[i
].src_data_length
)
10763 size
+= ocirs
[i
].src_hdr_length
;
10764 size
+= ocirs
[i
].src_data_length
;
10770 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10772 if (IS_ERR(tp
->hwmon_dev
)) {
10773 tp
->hwmon_dev
= NULL
;
10774 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10779 #define TG3_STAT_ADD32(PSTAT, REG) \
10780 do { u32 __val = tr32(REG); \
10781 (PSTAT)->low += __val; \
10782 if ((PSTAT)->low < __val) \
10783 (PSTAT)->high += 1; \
10786 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10788 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10793 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10794 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10795 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10796 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10797 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10798 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10799 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10800 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10801 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10802 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10803 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10804 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10805 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10806 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10807 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10808 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10811 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10812 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10813 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10814 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10817 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10818 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10819 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10820 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10821 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10822 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10823 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10824 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10825 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10826 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10827 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10828 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10829 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10830 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10832 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10833 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10834 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10835 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10836 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10837 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10839 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10840 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10842 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10843 sp
->rx_discards
.low
+= val
;
10844 if (sp
->rx_discards
.low
< val
)
10845 sp
->rx_discards
.high
+= 1;
10847 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10849 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10852 static void tg3_chk_missed_msi(struct tg3
*tp
)
10856 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10857 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10859 if (tg3_has_work(tnapi
)) {
10860 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10861 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10862 if (tnapi
->chk_msi_cnt
< 1) {
10863 tnapi
->chk_msi_cnt
++;
10869 tnapi
->chk_msi_cnt
= 0;
10870 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10871 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10875 static void tg3_timer(unsigned long __opaque
)
10877 struct tg3
*tp
= (struct tg3
*) __opaque
;
10879 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10880 goto restart_timer
;
10882 spin_lock(&tp
->lock
);
10884 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10885 tg3_flag(tp
, 57765_CLASS
))
10886 tg3_chk_missed_msi(tp
);
10888 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10889 /* BCM4785: Flush posted writes from GbE to host memory. */
10893 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10894 /* All of this garbage is because when using non-tagged
10895 * IRQ status the mailbox/status_block protocol the chip
10896 * uses with the cpu is race prone.
10898 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10899 tw32(GRC_LOCAL_CTRL
,
10900 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10902 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10903 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10906 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10907 spin_unlock(&tp
->lock
);
10908 tg3_reset_task_schedule(tp
);
10909 goto restart_timer
;
10913 /* This part only runs once per second. */
10914 if (!--tp
->timer_counter
) {
10915 if (tg3_flag(tp
, 5705_PLUS
))
10916 tg3_periodic_fetch_stats(tp
);
10918 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10919 tg3_phy_eee_enable(tp
);
10921 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10925 mac_stat
= tr32(MAC_STATUS
);
10928 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10929 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10931 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10935 tg3_setup_phy(tp
, false);
10936 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10937 u32 mac_stat
= tr32(MAC_STATUS
);
10938 int need_setup
= 0;
10941 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10944 if (!tp
->link_up
&&
10945 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10946 MAC_STATUS_SIGNAL_DET
))) {
10950 if (!tp
->serdes_counter
) {
10953 ~MAC_MODE_PORT_MODE_MASK
));
10955 tw32_f(MAC_MODE
, tp
->mac_mode
);
10958 tg3_setup_phy(tp
, false);
10960 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10961 tg3_flag(tp
, 5780_CLASS
)) {
10962 tg3_serdes_parallel_detect(tp
);
10963 } else if (tg3_flag(tp
, POLL_CPMU_LINK
)) {
10964 u32 cpmu
= tr32(TG3_CPMU_STATUS
);
10965 bool link_up
= !((cpmu
& TG3_CPMU_STATUS_LINK_MASK
) ==
10966 TG3_CPMU_STATUS_LINK_MASK
);
10968 if (link_up
!= tp
->link_up
)
10969 tg3_setup_phy(tp
, false);
10972 tp
->timer_counter
= tp
->timer_multiplier
;
10975 /* Heartbeat is only sent once every 2 seconds.
10977 * The heartbeat is to tell the ASF firmware that the host
10978 * driver is still alive. In the event that the OS crashes,
10979 * ASF needs to reset the hardware to free up the FIFO space
10980 * that may be filled with rx packets destined for the host.
10981 * If the FIFO is full, ASF will no longer function properly.
10983 * Unintended resets have been reported on real time kernels
10984 * where the timer doesn't run on time. Netpoll will also have
10987 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10988 * to check the ring condition when the heartbeat is expiring
10989 * before doing the reset. This will prevent most unintended
10992 if (!--tp
->asf_counter
) {
10993 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10994 tg3_wait_for_event_ack(tp
);
10996 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10997 FWCMD_NICDRV_ALIVE3
);
10998 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10999 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
11000 TG3_FW_UPDATE_TIMEOUT_SEC
);
11002 tg3_generate_fw_event(tp
);
11004 tp
->asf_counter
= tp
->asf_multiplier
;
11007 spin_unlock(&tp
->lock
);
11010 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11011 add_timer(&tp
->timer
);
11014 static void tg3_timer_init(struct tg3
*tp
)
11016 if (tg3_flag(tp
, TAGGED_STATUS
) &&
11017 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
11018 !tg3_flag(tp
, 57765_CLASS
))
11019 tp
->timer_offset
= HZ
;
11021 tp
->timer_offset
= HZ
/ 10;
11023 BUG_ON(tp
->timer_offset
> HZ
);
11025 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
11026 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
11027 TG3_FW_UPDATE_FREQ_SEC
;
11029 init_timer(&tp
->timer
);
11030 tp
->timer
.data
= (unsigned long) tp
;
11031 tp
->timer
.function
= tg3_timer
;
11034 static void tg3_timer_start(struct tg3
*tp
)
11036 tp
->asf_counter
= tp
->asf_multiplier
;
11037 tp
->timer_counter
= tp
->timer_multiplier
;
11039 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11040 add_timer(&tp
->timer
);
11043 static void tg3_timer_stop(struct tg3
*tp
)
11045 del_timer_sync(&tp
->timer
);
11048 /* Restart hardware after configuration changes, self-test, etc.
11049 * Invoked with tp->lock held.
11051 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
11052 __releases(tp
->lock
)
11053 __acquires(tp
->lock
)
11057 err
= tg3_init_hw(tp
, reset_phy
);
11059 netdev_err(tp
->dev
,
11060 "Failed to re-initialize device, aborting\n");
11061 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11062 tg3_full_unlock(tp
);
11063 tg3_timer_stop(tp
);
11065 tg3_napi_enable(tp
);
11066 dev_close(tp
->dev
);
11067 tg3_full_lock(tp
, 0);
11072 static void tg3_reset_task(struct work_struct
*work
)
11074 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
11077 tg3_full_lock(tp
, 0);
11079 if (!netif_running(tp
->dev
)) {
11080 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11081 tg3_full_unlock(tp
);
11085 tg3_full_unlock(tp
);
11089 tg3_netif_stop(tp
);
11091 tg3_full_lock(tp
, 1);
11093 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11094 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11095 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11096 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11097 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11100 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11101 err
= tg3_init_hw(tp
, true);
11105 tg3_netif_start(tp
);
11108 tg3_full_unlock(tp
);
11113 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11116 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11119 unsigned long flags
;
11121 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11123 if (tp
->irq_cnt
== 1)
11124 name
= tp
->dev
->name
;
11126 name
= &tnapi
->irq_lbl
[0];
11127 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11128 snprintf(name
, IFNAMSIZ
,
11129 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11130 else if (tnapi
->tx_buffers
)
11131 snprintf(name
, IFNAMSIZ
,
11132 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11133 else if (tnapi
->rx_rcb
)
11134 snprintf(name
, IFNAMSIZ
,
11135 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11137 snprintf(name
, IFNAMSIZ
,
11138 "%s-%d", tp
->dev
->name
, irq_num
);
11139 name
[IFNAMSIZ
-1] = 0;
11142 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11144 if (tg3_flag(tp
, 1SHOT_MSI
))
11145 fn
= tg3_msi_1shot
;
11148 fn
= tg3_interrupt
;
11149 if (tg3_flag(tp
, TAGGED_STATUS
))
11150 fn
= tg3_interrupt_tagged
;
11151 flags
= IRQF_SHARED
;
11154 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11157 static int tg3_test_interrupt(struct tg3
*tp
)
11159 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11160 struct net_device
*dev
= tp
->dev
;
11161 int err
, i
, intr_ok
= 0;
11164 if (!netif_running(dev
))
11167 tg3_disable_ints(tp
);
11169 free_irq(tnapi
->irq_vec
, tnapi
);
11172 * Turn off MSI one shot mode. Otherwise this test has no
11173 * observable way to know whether the interrupt was delivered.
11175 if (tg3_flag(tp
, 57765_PLUS
)) {
11176 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11177 tw32(MSGINT_MODE
, val
);
11180 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11181 IRQF_SHARED
, dev
->name
, tnapi
);
11185 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11186 tg3_enable_ints(tp
);
11188 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11191 for (i
= 0; i
< 5; i
++) {
11192 u32 int_mbox
, misc_host_ctrl
;
11194 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11195 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11197 if ((int_mbox
!= 0) ||
11198 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11203 if (tg3_flag(tp
, 57765_PLUS
) &&
11204 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11205 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11210 tg3_disable_ints(tp
);
11212 free_irq(tnapi
->irq_vec
, tnapi
);
11214 err
= tg3_request_irq(tp
, 0);
11220 /* Reenable MSI one shot mode. */
11221 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11222 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11223 tw32(MSGINT_MODE
, val
);
11231 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11232 * successfully restored
11234 static int tg3_test_msi(struct tg3
*tp
)
11239 if (!tg3_flag(tp
, USING_MSI
))
11242 /* Turn off SERR reporting in case MSI terminates with Master
11245 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11246 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11247 pci_cmd
& ~PCI_COMMAND_SERR
);
11249 err
= tg3_test_interrupt(tp
);
11251 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11256 /* other failures */
11260 /* MSI test failed, go back to INTx mode */
11261 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11262 "to INTx mode. Please report this failure to the PCI "
11263 "maintainer and include system chipset information\n");
11265 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11267 pci_disable_msi(tp
->pdev
);
11269 tg3_flag_clear(tp
, USING_MSI
);
11270 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11272 err
= tg3_request_irq(tp
, 0);
11276 /* Need to reset the chip because the MSI cycle may have terminated
11277 * with Master Abort.
11279 tg3_full_lock(tp
, 1);
11281 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11282 err
= tg3_init_hw(tp
, true);
11284 tg3_full_unlock(tp
);
11287 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11292 static int tg3_request_firmware(struct tg3
*tp
)
11294 const struct tg3_firmware_hdr
*fw_hdr
;
11296 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11297 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11302 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11304 /* Firmware blob starts with version numbers, followed by
11305 * start address and _full_ length including BSS sections
11306 * (which must be longer than the actual data, of course
11309 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11310 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11311 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11312 tp
->fw_len
, tp
->fw_needed
);
11313 release_firmware(tp
->fw
);
11318 /* We no longer need firmware; we have it. */
11319 tp
->fw_needed
= NULL
;
11323 static u32
tg3_irq_count(struct tg3
*tp
)
11325 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11328 /* We want as many rx rings enabled as there are cpus.
11329 * In multiqueue MSI-X mode, the first MSI-X vector
11330 * only deals with link interrupts, etc, so we add
11331 * one to the number of vectors we are requesting.
11333 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11339 static bool tg3_enable_msix(struct tg3
*tp
)
11342 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11344 tp
->txq_cnt
= tp
->txq_req
;
11345 tp
->rxq_cnt
= tp
->rxq_req
;
11347 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11348 if (tp
->rxq_cnt
> tp
->rxq_max
)
11349 tp
->rxq_cnt
= tp
->rxq_max
;
11351 /* Disable multiple TX rings by default. Simple round-robin hardware
11352 * scheduling of the TX rings can cause starvation of rings with
11353 * small packets when other rings have TSO or jumbo packets.
11358 tp
->irq_cnt
= tg3_irq_count(tp
);
11360 for (i
= 0; i
< tp
->irq_max
; i
++) {
11361 msix_ent
[i
].entry
= i
;
11362 msix_ent
[i
].vector
= 0;
11365 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
11368 } else if (rc
!= 0) {
11369 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
11371 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11374 tp
->rxq_cnt
= max(rc
- 1, 1);
11376 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11379 for (i
= 0; i
< tp
->irq_max
; i
++)
11380 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11382 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11383 pci_disable_msix(tp
->pdev
);
11387 if (tp
->irq_cnt
== 1)
11390 tg3_flag_set(tp
, ENABLE_RSS
);
11392 if (tp
->txq_cnt
> 1)
11393 tg3_flag_set(tp
, ENABLE_TSS
);
11395 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11400 static void tg3_ints_init(struct tg3
*tp
)
11402 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11403 !tg3_flag(tp
, TAGGED_STATUS
)) {
11404 /* All MSI supporting chips should support tagged
11405 * status. Assert that this is the case.
11407 netdev_warn(tp
->dev
,
11408 "MSI without TAGGED_STATUS? Not using MSI\n");
11412 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11413 tg3_flag_set(tp
, USING_MSIX
);
11414 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11415 tg3_flag_set(tp
, USING_MSI
);
11417 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11418 u32 msi_mode
= tr32(MSGINT_MODE
);
11419 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11420 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11421 if (!tg3_flag(tp
, 1SHOT_MSI
))
11422 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11423 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11426 if (!tg3_flag(tp
, USING_MSIX
)) {
11428 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11431 if (tp
->irq_cnt
== 1) {
11434 netif_set_real_num_tx_queues(tp
->dev
, 1);
11435 netif_set_real_num_rx_queues(tp
->dev
, 1);
11439 static void tg3_ints_fini(struct tg3
*tp
)
11441 if (tg3_flag(tp
, USING_MSIX
))
11442 pci_disable_msix(tp
->pdev
);
11443 else if (tg3_flag(tp
, USING_MSI
))
11444 pci_disable_msi(tp
->pdev
);
11445 tg3_flag_clear(tp
, USING_MSI
);
11446 tg3_flag_clear(tp
, USING_MSIX
);
11447 tg3_flag_clear(tp
, ENABLE_RSS
);
11448 tg3_flag_clear(tp
, ENABLE_TSS
);
11451 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11454 struct net_device
*dev
= tp
->dev
;
11458 * Setup interrupts first so we know how
11459 * many NAPI resources to allocate
11463 tg3_rss_check_indir_tbl(tp
);
11465 /* The placement of this call is tied
11466 * to the setup and use of Host TX descriptors.
11468 err
= tg3_alloc_consistent(tp
);
11470 goto out_ints_fini
;
11474 tg3_napi_enable(tp
);
11476 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11477 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11478 err
= tg3_request_irq(tp
, i
);
11480 for (i
--; i
>= 0; i
--) {
11481 tnapi
= &tp
->napi
[i
];
11482 free_irq(tnapi
->irq_vec
, tnapi
);
11484 goto out_napi_fini
;
11488 tg3_full_lock(tp
, 0);
11491 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11493 err
= tg3_init_hw(tp
, reset_phy
);
11495 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11496 tg3_free_rings(tp
);
11499 tg3_full_unlock(tp
);
11504 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11505 err
= tg3_test_msi(tp
);
11508 tg3_full_lock(tp
, 0);
11509 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11510 tg3_free_rings(tp
);
11511 tg3_full_unlock(tp
);
11513 goto out_napi_fini
;
11516 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11517 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11519 tw32(PCIE_TRANSACTION_CFG
,
11520 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11526 tg3_hwmon_open(tp
);
11528 tg3_full_lock(tp
, 0);
11530 tg3_timer_start(tp
);
11531 tg3_flag_set(tp
, INIT_COMPLETE
);
11532 tg3_enable_ints(tp
);
11537 tg3_ptp_resume(tp
);
11540 tg3_full_unlock(tp
);
11542 netif_tx_start_all_queues(dev
);
11545 * Reset loopback feature if it was turned on while the device was down
11546 * make sure that it's installed properly now.
11548 if (dev
->features
& NETIF_F_LOOPBACK
)
11549 tg3_set_loopback(dev
, dev
->features
);
11554 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11555 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11556 free_irq(tnapi
->irq_vec
, tnapi
);
11560 tg3_napi_disable(tp
);
11562 tg3_free_consistent(tp
);
11570 static void tg3_stop(struct tg3
*tp
)
11574 tg3_reset_task_cancel(tp
);
11575 tg3_netif_stop(tp
);
11577 tg3_timer_stop(tp
);
11579 tg3_hwmon_close(tp
);
11583 tg3_full_lock(tp
, 1);
11585 tg3_disable_ints(tp
);
11587 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11588 tg3_free_rings(tp
);
11589 tg3_flag_clear(tp
, INIT_COMPLETE
);
11591 tg3_full_unlock(tp
);
11593 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11594 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11595 free_irq(tnapi
->irq_vec
, tnapi
);
11602 tg3_free_consistent(tp
);
11605 static int tg3_open(struct net_device
*dev
)
11607 struct tg3
*tp
= netdev_priv(dev
);
11610 if (tp
->fw_needed
) {
11611 err
= tg3_request_firmware(tp
);
11612 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11614 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11615 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11616 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11617 netdev_warn(tp
->dev
, "EEE capability restored\n");
11618 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11620 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11624 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11625 tg3_flag_clear(tp
, TSO_CAPABLE
);
11626 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11627 netdev_notice(tp
->dev
, "TSO capability restored\n");
11628 tg3_flag_set(tp
, TSO_CAPABLE
);
11632 tg3_carrier_off(tp
);
11634 err
= tg3_power_up(tp
);
11638 tg3_full_lock(tp
, 0);
11640 tg3_disable_ints(tp
);
11641 tg3_flag_clear(tp
, INIT_COMPLETE
);
11643 tg3_full_unlock(tp
);
11645 err
= tg3_start(tp
,
11646 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11649 tg3_frob_aux_power(tp
, false);
11650 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11653 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11654 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11656 if (IS_ERR(tp
->ptp_clock
))
11657 tp
->ptp_clock
= NULL
;
11663 static int tg3_close(struct net_device
*dev
)
11665 struct tg3
*tp
= netdev_priv(dev
);
11671 /* Clear stats across close / open calls */
11672 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11673 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11675 if (pci_device_is_present(tp
->pdev
)) {
11676 tg3_power_down_prepare(tp
);
11678 tg3_carrier_off(tp
);
11683 static inline u64
get_stat64(tg3_stat64_t
*val
)
11685 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11688 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11690 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11692 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11693 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11694 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11697 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11698 tg3_writephy(tp
, MII_TG3_TEST1
,
11699 val
| MII_TG3_TEST1_CRC_EN
);
11700 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11704 tp
->phy_crc_errors
+= val
;
11706 return tp
->phy_crc_errors
;
11709 return get_stat64(&hw_stats
->rx_fcs_errors
);
11712 #define ESTAT_ADD(member) \
11713 estats->member = old_estats->member + \
11714 get_stat64(&hw_stats->member)
11716 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11718 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11719 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11721 ESTAT_ADD(rx_octets
);
11722 ESTAT_ADD(rx_fragments
);
11723 ESTAT_ADD(rx_ucast_packets
);
11724 ESTAT_ADD(rx_mcast_packets
);
11725 ESTAT_ADD(rx_bcast_packets
);
11726 ESTAT_ADD(rx_fcs_errors
);
11727 ESTAT_ADD(rx_align_errors
);
11728 ESTAT_ADD(rx_xon_pause_rcvd
);
11729 ESTAT_ADD(rx_xoff_pause_rcvd
);
11730 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11731 ESTAT_ADD(rx_xoff_entered
);
11732 ESTAT_ADD(rx_frame_too_long_errors
);
11733 ESTAT_ADD(rx_jabbers
);
11734 ESTAT_ADD(rx_undersize_packets
);
11735 ESTAT_ADD(rx_in_length_errors
);
11736 ESTAT_ADD(rx_out_length_errors
);
11737 ESTAT_ADD(rx_64_or_less_octet_packets
);
11738 ESTAT_ADD(rx_65_to_127_octet_packets
);
11739 ESTAT_ADD(rx_128_to_255_octet_packets
);
11740 ESTAT_ADD(rx_256_to_511_octet_packets
);
11741 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11742 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11743 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11744 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11745 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11746 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11748 ESTAT_ADD(tx_octets
);
11749 ESTAT_ADD(tx_collisions
);
11750 ESTAT_ADD(tx_xon_sent
);
11751 ESTAT_ADD(tx_xoff_sent
);
11752 ESTAT_ADD(tx_flow_control
);
11753 ESTAT_ADD(tx_mac_errors
);
11754 ESTAT_ADD(tx_single_collisions
);
11755 ESTAT_ADD(tx_mult_collisions
);
11756 ESTAT_ADD(tx_deferred
);
11757 ESTAT_ADD(tx_excessive_collisions
);
11758 ESTAT_ADD(tx_late_collisions
);
11759 ESTAT_ADD(tx_collide_2times
);
11760 ESTAT_ADD(tx_collide_3times
);
11761 ESTAT_ADD(tx_collide_4times
);
11762 ESTAT_ADD(tx_collide_5times
);
11763 ESTAT_ADD(tx_collide_6times
);
11764 ESTAT_ADD(tx_collide_7times
);
11765 ESTAT_ADD(tx_collide_8times
);
11766 ESTAT_ADD(tx_collide_9times
);
11767 ESTAT_ADD(tx_collide_10times
);
11768 ESTAT_ADD(tx_collide_11times
);
11769 ESTAT_ADD(tx_collide_12times
);
11770 ESTAT_ADD(tx_collide_13times
);
11771 ESTAT_ADD(tx_collide_14times
);
11772 ESTAT_ADD(tx_collide_15times
);
11773 ESTAT_ADD(tx_ucast_packets
);
11774 ESTAT_ADD(tx_mcast_packets
);
11775 ESTAT_ADD(tx_bcast_packets
);
11776 ESTAT_ADD(tx_carrier_sense_errors
);
11777 ESTAT_ADD(tx_discards
);
11778 ESTAT_ADD(tx_errors
);
11780 ESTAT_ADD(dma_writeq_full
);
11781 ESTAT_ADD(dma_write_prioq_full
);
11782 ESTAT_ADD(rxbds_empty
);
11783 ESTAT_ADD(rx_discards
);
11784 ESTAT_ADD(rx_errors
);
11785 ESTAT_ADD(rx_threshold_hit
);
11787 ESTAT_ADD(dma_readq_full
);
11788 ESTAT_ADD(dma_read_prioq_full
);
11789 ESTAT_ADD(tx_comp_queue_full
);
11791 ESTAT_ADD(ring_set_send_prod_index
);
11792 ESTAT_ADD(ring_status_update
);
11793 ESTAT_ADD(nic_irqs
);
11794 ESTAT_ADD(nic_avoided_irqs
);
11795 ESTAT_ADD(nic_tx_threshold_hit
);
11797 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11800 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11802 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11803 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11805 stats
->rx_packets
= old_stats
->rx_packets
+
11806 get_stat64(&hw_stats
->rx_ucast_packets
) +
11807 get_stat64(&hw_stats
->rx_mcast_packets
) +
11808 get_stat64(&hw_stats
->rx_bcast_packets
);
11810 stats
->tx_packets
= old_stats
->tx_packets
+
11811 get_stat64(&hw_stats
->tx_ucast_packets
) +
11812 get_stat64(&hw_stats
->tx_mcast_packets
) +
11813 get_stat64(&hw_stats
->tx_bcast_packets
);
11815 stats
->rx_bytes
= old_stats
->rx_bytes
+
11816 get_stat64(&hw_stats
->rx_octets
);
11817 stats
->tx_bytes
= old_stats
->tx_bytes
+
11818 get_stat64(&hw_stats
->tx_octets
);
11820 stats
->rx_errors
= old_stats
->rx_errors
+
11821 get_stat64(&hw_stats
->rx_errors
);
11822 stats
->tx_errors
= old_stats
->tx_errors
+
11823 get_stat64(&hw_stats
->tx_errors
) +
11824 get_stat64(&hw_stats
->tx_mac_errors
) +
11825 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11826 get_stat64(&hw_stats
->tx_discards
);
11828 stats
->multicast
= old_stats
->multicast
+
11829 get_stat64(&hw_stats
->rx_mcast_packets
);
11830 stats
->collisions
= old_stats
->collisions
+
11831 get_stat64(&hw_stats
->tx_collisions
);
11833 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11834 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11835 get_stat64(&hw_stats
->rx_undersize_packets
);
11837 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11838 get_stat64(&hw_stats
->rx_align_errors
);
11839 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11840 get_stat64(&hw_stats
->tx_discards
);
11841 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11842 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11844 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11845 tg3_calc_crc_errors(tp
);
11847 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11848 get_stat64(&hw_stats
->rx_discards
);
11850 stats
->rx_dropped
= tp
->rx_dropped
;
11851 stats
->tx_dropped
= tp
->tx_dropped
;
11854 static int tg3_get_regs_len(struct net_device
*dev
)
11856 return TG3_REG_BLK_SIZE
;
11859 static void tg3_get_regs(struct net_device
*dev
,
11860 struct ethtool_regs
*regs
, void *_p
)
11862 struct tg3
*tp
= netdev_priv(dev
);
11866 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11868 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11871 tg3_full_lock(tp
, 0);
11873 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11875 tg3_full_unlock(tp
);
11878 static int tg3_get_eeprom_len(struct net_device
*dev
)
11880 struct tg3
*tp
= netdev_priv(dev
);
11882 return tp
->nvram_size
;
11885 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11887 struct tg3
*tp
= netdev_priv(dev
);
11890 u32 i
, offset
, len
, b_offset
, b_count
;
11893 if (tg3_flag(tp
, NO_NVRAM
))
11896 offset
= eeprom
->offset
;
11900 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11903 /* adjustments to start on required 4 byte boundary */
11904 b_offset
= offset
& 3;
11905 b_count
= 4 - b_offset
;
11906 if (b_count
> len
) {
11907 /* i.e. offset=1 len=2 */
11910 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11913 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11916 eeprom
->len
+= b_count
;
11919 /* read bytes up to the last 4 byte boundary */
11920 pd
= &data
[eeprom
->len
];
11921 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11922 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11927 memcpy(pd
+ i
, &val
, 4);
11932 /* read last bytes not ending on 4 byte boundary */
11933 pd
= &data
[eeprom
->len
];
11935 b_offset
= offset
+ len
- b_count
;
11936 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11939 memcpy(pd
, &val
, b_count
);
11940 eeprom
->len
+= b_count
;
11945 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11947 struct tg3
*tp
= netdev_priv(dev
);
11949 u32 offset
, len
, b_offset
, odd_len
;
11953 if (tg3_flag(tp
, NO_NVRAM
) ||
11954 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11957 offset
= eeprom
->offset
;
11960 if ((b_offset
= (offset
& 3))) {
11961 /* adjustments to start on required 4 byte boundary */
11962 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11973 /* adjustments to end on required 4 byte boundary */
11975 len
= (len
+ 3) & ~3;
11976 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11982 if (b_offset
|| odd_len
) {
11983 buf
= kmalloc(len
, GFP_KERNEL
);
11987 memcpy(buf
, &start
, 4);
11989 memcpy(buf
+len
-4, &end
, 4);
11990 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11993 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
12001 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
12003 struct tg3
*tp
= netdev_priv(dev
);
12005 if (tg3_flag(tp
, USE_PHYLIB
)) {
12006 struct phy_device
*phydev
;
12007 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12009 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
12010 return phy_ethtool_gset(phydev
, cmd
);
12013 cmd
->supported
= (SUPPORTED_Autoneg
);
12015 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12016 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
12017 SUPPORTED_1000baseT_Full
);
12019 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12020 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
12021 SUPPORTED_100baseT_Full
|
12022 SUPPORTED_10baseT_Half
|
12023 SUPPORTED_10baseT_Full
|
12025 cmd
->port
= PORT_TP
;
12027 cmd
->supported
|= SUPPORTED_FIBRE
;
12028 cmd
->port
= PORT_FIBRE
;
12031 cmd
->advertising
= tp
->link_config
.advertising
;
12032 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
12033 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
12034 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12035 cmd
->advertising
|= ADVERTISED_Pause
;
12037 cmd
->advertising
|= ADVERTISED_Pause
|
12038 ADVERTISED_Asym_Pause
;
12040 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
12041 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
12044 if (netif_running(dev
) && tp
->link_up
) {
12045 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
12046 cmd
->duplex
= tp
->link_config
.active_duplex
;
12047 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
12048 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
12049 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
12050 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
12052 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
12055 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
12056 cmd
->duplex
= DUPLEX_UNKNOWN
;
12057 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
12059 cmd
->phy_address
= tp
->phy_addr
;
12060 cmd
->transceiver
= XCVR_INTERNAL
;
12061 cmd
->autoneg
= tp
->link_config
.autoneg
;
12067 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
12069 struct tg3
*tp
= netdev_priv(dev
);
12070 u32 speed
= ethtool_cmd_speed(cmd
);
12072 if (tg3_flag(tp
, USE_PHYLIB
)) {
12073 struct phy_device
*phydev
;
12074 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12076 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
12077 return phy_ethtool_sset(phydev
, cmd
);
12080 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
12081 cmd
->autoneg
!= AUTONEG_DISABLE
)
12084 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
12085 cmd
->duplex
!= DUPLEX_FULL
&&
12086 cmd
->duplex
!= DUPLEX_HALF
)
12089 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
12090 u32 mask
= ADVERTISED_Autoneg
|
12092 ADVERTISED_Asym_Pause
;
12094 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12095 mask
|= ADVERTISED_1000baseT_Half
|
12096 ADVERTISED_1000baseT_Full
;
12098 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12099 mask
|= ADVERTISED_100baseT_Half
|
12100 ADVERTISED_100baseT_Full
|
12101 ADVERTISED_10baseT_Half
|
12102 ADVERTISED_10baseT_Full
|
12105 mask
|= ADVERTISED_FIBRE
;
12107 if (cmd
->advertising
& ~mask
)
12110 mask
&= (ADVERTISED_1000baseT_Half
|
12111 ADVERTISED_1000baseT_Full
|
12112 ADVERTISED_100baseT_Half
|
12113 ADVERTISED_100baseT_Full
|
12114 ADVERTISED_10baseT_Half
|
12115 ADVERTISED_10baseT_Full
);
12117 cmd
->advertising
&= mask
;
12119 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12120 if (speed
!= SPEED_1000
)
12123 if (cmd
->duplex
!= DUPLEX_FULL
)
12126 if (speed
!= SPEED_100
&&
12132 tg3_full_lock(tp
, 0);
12134 tp
->link_config
.autoneg
= cmd
->autoneg
;
12135 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
12136 tp
->link_config
.advertising
= (cmd
->advertising
|
12137 ADVERTISED_Autoneg
);
12138 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12139 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12141 tp
->link_config
.advertising
= 0;
12142 tp
->link_config
.speed
= speed
;
12143 tp
->link_config
.duplex
= cmd
->duplex
;
12146 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12148 tg3_warn_mgmt_link_flap(tp
);
12150 if (netif_running(dev
))
12151 tg3_setup_phy(tp
, true);
12153 tg3_full_unlock(tp
);
12158 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12160 struct tg3
*tp
= netdev_priv(dev
);
12162 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12163 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12164 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12165 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12168 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12170 struct tg3
*tp
= netdev_priv(dev
);
12172 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12173 wol
->supported
= WAKE_MAGIC
;
12175 wol
->supported
= 0;
12177 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12178 wol
->wolopts
= WAKE_MAGIC
;
12179 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12182 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12184 struct tg3
*tp
= netdev_priv(dev
);
12185 struct device
*dp
= &tp
->pdev
->dev
;
12187 if (wol
->wolopts
& ~WAKE_MAGIC
)
12189 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12190 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12193 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12195 if (device_may_wakeup(dp
))
12196 tg3_flag_set(tp
, WOL_ENABLE
);
12198 tg3_flag_clear(tp
, WOL_ENABLE
);
12203 static u32
tg3_get_msglevel(struct net_device
*dev
)
12205 struct tg3
*tp
= netdev_priv(dev
);
12206 return tp
->msg_enable
;
12209 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12211 struct tg3
*tp
= netdev_priv(dev
);
12212 tp
->msg_enable
= value
;
12215 static int tg3_nway_reset(struct net_device
*dev
)
12217 struct tg3
*tp
= netdev_priv(dev
);
12220 if (!netif_running(dev
))
12223 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12226 tg3_warn_mgmt_link_flap(tp
);
12228 if (tg3_flag(tp
, USE_PHYLIB
)) {
12229 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12231 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
12235 spin_lock_bh(&tp
->lock
);
12237 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12238 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12239 ((bmcr
& BMCR_ANENABLE
) ||
12240 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12241 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12245 spin_unlock_bh(&tp
->lock
);
12251 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12253 struct tg3
*tp
= netdev_priv(dev
);
12255 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12256 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12257 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12259 ering
->rx_jumbo_max_pending
= 0;
12261 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12263 ering
->rx_pending
= tp
->rx_pending
;
12264 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12265 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12267 ering
->rx_jumbo_pending
= 0;
12269 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12272 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12274 struct tg3
*tp
= netdev_priv(dev
);
12275 int i
, irq_sync
= 0, err
= 0;
12277 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12278 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12279 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12280 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12281 (tg3_flag(tp
, TSO_BUG
) &&
12282 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12285 if (netif_running(dev
)) {
12287 tg3_netif_stop(tp
);
12291 tg3_full_lock(tp
, irq_sync
);
12293 tp
->rx_pending
= ering
->rx_pending
;
12295 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12296 tp
->rx_pending
> 63)
12297 tp
->rx_pending
= 63;
12298 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12300 for (i
= 0; i
< tp
->irq_max
; i
++)
12301 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12303 if (netif_running(dev
)) {
12304 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12305 err
= tg3_restart_hw(tp
, false);
12307 tg3_netif_start(tp
);
12310 tg3_full_unlock(tp
);
12312 if (irq_sync
&& !err
)
12318 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12320 struct tg3
*tp
= netdev_priv(dev
);
12322 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12324 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12325 epause
->rx_pause
= 1;
12327 epause
->rx_pause
= 0;
12329 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12330 epause
->tx_pause
= 1;
12332 epause
->tx_pause
= 0;
12335 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12337 struct tg3
*tp
= netdev_priv(dev
);
12340 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12341 tg3_warn_mgmt_link_flap(tp
);
12343 if (tg3_flag(tp
, USE_PHYLIB
)) {
12345 struct phy_device
*phydev
;
12347 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
12349 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12350 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12351 (epause
->rx_pause
!= epause
->tx_pause
)))
12354 tp
->link_config
.flowctrl
= 0;
12355 if (epause
->rx_pause
) {
12356 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12358 if (epause
->tx_pause
) {
12359 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12360 newadv
= ADVERTISED_Pause
;
12362 newadv
= ADVERTISED_Pause
|
12363 ADVERTISED_Asym_Pause
;
12364 } else if (epause
->tx_pause
) {
12365 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12366 newadv
= ADVERTISED_Asym_Pause
;
12370 if (epause
->autoneg
)
12371 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12373 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12375 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12376 u32 oldadv
= phydev
->advertising
&
12377 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12378 if (oldadv
!= newadv
) {
12379 phydev
->advertising
&=
12380 ~(ADVERTISED_Pause
|
12381 ADVERTISED_Asym_Pause
);
12382 phydev
->advertising
|= newadv
;
12383 if (phydev
->autoneg
) {
12385 * Always renegotiate the link to
12386 * inform our link partner of our
12387 * flow control settings, even if the
12388 * flow control is forced. Let
12389 * tg3_adjust_link() do the final
12390 * flow control setup.
12392 return phy_start_aneg(phydev
);
12396 if (!epause
->autoneg
)
12397 tg3_setup_flow_control(tp
, 0, 0);
12399 tp
->link_config
.advertising
&=
12400 ~(ADVERTISED_Pause
|
12401 ADVERTISED_Asym_Pause
);
12402 tp
->link_config
.advertising
|= newadv
;
12407 if (netif_running(dev
)) {
12408 tg3_netif_stop(tp
);
12412 tg3_full_lock(tp
, irq_sync
);
12414 if (epause
->autoneg
)
12415 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12417 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12418 if (epause
->rx_pause
)
12419 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12421 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12422 if (epause
->tx_pause
)
12423 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12425 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12427 if (netif_running(dev
)) {
12428 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12429 err
= tg3_restart_hw(tp
, false);
12431 tg3_netif_start(tp
);
12434 tg3_full_unlock(tp
);
12437 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12442 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12446 return TG3_NUM_TEST
;
12448 return TG3_NUM_STATS
;
12450 return -EOPNOTSUPP
;
12454 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12455 u32
*rules __always_unused
)
12457 struct tg3
*tp
= netdev_priv(dev
);
12459 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12460 return -EOPNOTSUPP
;
12462 switch (info
->cmd
) {
12463 case ETHTOOL_GRXRINGS
:
12464 if (netif_running(tp
->dev
))
12465 info
->data
= tp
->rxq_cnt
;
12467 info
->data
= num_online_cpus();
12468 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12469 info
->data
= TG3_RSS_MAX_NUM_QS
;
12472 /* The first interrupt vector only
12473 * handles link interrupts.
12479 return -EOPNOTSUPP
;
12483 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12486 struct tg3
*tp
= netdev_priv(dev
);
12488 if (tg3_flag(tp
, SUPPORT_MSIX
))
12489 size
= TG3_RSS_INDIR_TBL_SIZE
;
12494 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
12496 struct tg3
*tp
= netdev_priv(dev
);
12499 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12500 indir
[i
] = tp
->rss_ind_tbl
[i
];
12505 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
12507 struct tg3
*tp
= netdev_priv(dev
);
12510 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12511 tp
->rss_ind_tbl
[i
] = indir
[i
];
12513 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12516 /* It is legal to write the indirection
12517 * table while the device is running.
12519 tg3_full_lock(tp
, 0);
12520 tg3_rss_write_indir_tbl(tp
);
12521 tg3_full_unlock(tp
);
12526 static void tg3_get_channels(struct net_device
*dev
,
12527 struct ethtool_channels
*channel
)
12529 struct tg3
*tp
= netdev_priv(dev
);
12530 u32 deflt_qs
= netif_get_num_default_rss_queues();
12532 channel
->max_rx
= tp
->rxq_max
;
12533 channel
->max_tx
= tp
->txq_max
;
12535 if (netif_running(dev
)) {
12536 channel
->rx_count
= tp
->rxq_cnt
;
12537 channel
->tx_count
= tp
->txq_cnt
;
12540 channel
->rx_count
= tp
->rxq_req
;
12542 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12545 channel
->tx_count
= tp
->txq_req
;
12547 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12551 static int tg3_set_channels(struct net_device
*dev
,
12552 struct ethtool_channels
*channel
)
12554 struct tg3
*tp
= netdev_priv(dev
);
12556 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12557 return -EOPNOTSUPP
;
12559 if (channel
->rx_count
> tp
->rxq_max
||
12560 channel
->tx_count
> tp
->txq_max
)
12563 tp
->rxq_req
= channel
->rx_count
;
12564 tp
->txq_req
= channel
->tx_count
;
12566 if (!netif_running(dev
))
12571 tg3_carrier_off(tp
);
12573 tg3_start(tp
, true, false, false);
12578 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12580 switch (stringset
) {
12582 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12585 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12588 WARN_ON(1); /* we need a WARN() */
12593 static int tg3_set_phys_id(struct net_device
*dev
,
12594 enum ethtool_phys_id_state state
)
12596 struct tg3
*tp
= netdev_priv(dev
);
12598 if (!netif_running(tp
->dev
))
12602 case ETHTOOL_ID_ACTIVE
:
12603 return 1; /* cycle on/off once per second */
12605 case ETHTOOL_ID_ON
:
12606 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12607 LED_CTRL_1000MBPS_ON
|
12608 LED_CTRL_100MBPS_ON
|
12609 LED_CTRL_10MBPS_ON
|
12610 LED_CTRL_TRAFFIC_OVERRIDE
|
12611 LED_CTRL_TRAFFIC_BLINK
|
12612 LED_CTRL_TRAFFIC_LED
);
12615 case ETHTOOL_ID_OFF
:
12616 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12617 LED_CTRL_TRAFFIC_OVERRIDE
);
12620 case ETHTOOL_ID_INACTIVE
:
12621 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12628 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12629 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12631 struct tg3
*tp
= netdev_priv(dev
);
12634 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12636 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12639 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12643 u32 offset
= 0, len
= 0;
12646 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12649 if (magic
== TG3_EEPROM_MAGIC
) {
12650 for (offset
= TG3_NVM_DIR_START
;
12651 offset
< TG3_NVM_DIR_END
;
12652 offset
+= TG3_NVM_DIRENT_SIZE
) {
12653 if (tg3_nvram_read(tp
, offset
, &val
))
12656 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12657 TG3_NVM_DIRTYPE_EXTVPD
)
12661 if (offset
!= TG3_NVM_DIR_END
) {
12662 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12663 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12666 offset
= tg3_nvram_logical_addr(tp
, offset
);
12670 if (!offset
|| !len
) {
12671 offset
= TG3_NVM_VPD_OFF
;
12672 len
= TG3_NVM_VPD_LEN
;
12675 buf
= kmalloc(len
, GFP_KERNEL
);
12679 if (magic
== TG3_EEPROM_MAGIC
) {
12680 for (i
= 0; i
< len
; i
+= 4) {
12681 /* The data is in little-endian format in NVRAM.
12682 * Use the big-endian read routines to preserve
12683 * the byte order as it exists in NVRAM.
12685 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12691 unsigned int pos
= 0;
12693 ptr
= (u8
*)&buf
[0];
12694 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12695 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12697 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12715 #define NVRAM_TEST_SIZE 0x100
12716 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12717 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12718 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12719 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12720 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12721 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12722 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12723 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12725 static int tg3_test_nvram(struct tg3
*tp
)
12727 u32 csum
, magic
, len
;
12729 int i
, j
, k
, err
= 0, size
;
12731 if (tg3_flag(tp
, NO_NVRAM
))
12734 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12737 if (magic
== TG3_EEPROM_MAGIC
)
12738 size
= NVRAM_TEST_SIZE
;
12739 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12740 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12741 TG3_EEPROM_SB_FORMAT_1
) {
12742 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12743 case TG3_EEPROM_SB_REVISION_0
:
12744 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12746 case TG3_EEPROM_SB_REVISION_2
:
12747 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12749 case TG3_EEPROM_SB_REVISION_3
:
12750 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12752 case TG3_EEPROM_SB_REVISION_4
:
12753 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12755 case TG3_EEPROM_SB_REVISION_5
:
12756 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12758 case TG3_EEPROM_SB_REVISION_6
:
12759 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12766 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12767 size
= NVRAM_SELFBOOT_HW_SIZE
;
12771 buf
= kmalloc(size
, GFP_KERNEL
);
12776 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12777 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12784 /* Selfboot format */
12785 magic
= be32_to_cpu(buf
[0]);
12786 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12787 TG3_EEPROM_MAGIC_FW
) {
12788 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12790 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12791 TG3_EEPROM_SB_REVISION_2
) {
12792 /* For rev 2, the csum doesn't include the MBA. */
12793 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12795 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12798 for (i
= 0; i
< size
; i
++)
12811 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12812 TG3_EEPROM_MAGIC_HW
) {
12813 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12814 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12815 u8
*buf8
= (u8
*) buf
;
12817 /* Separate the parity bits and the data bytes. */
12818 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12819 if ((i
== 0) || (i
== 8)) {
12823 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12824 parity
[k
++] = buf8
[i
] & msk
;
12826 } else if (i
== 16) {
12830 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12831 parity
[k
++] = buf8
[i
] & msk
;
12834 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12835 parity
[k
++] = buf8
[i
] & msk
;
12838 data
[j
++] = buf8
[i
];
12842 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12843 u8 hw8
= hweight8(data
[i
]);
12845 if ((hw8
& 0x1) && parity
[i
])
12847 else if (!(hw8
& 0x1) && !parity
[i
])
12856 /* Bootstrap checksum at offset 0x10 */
12857 csum
= calc_crc((unsigned char *) buf
, 0x10);
12858 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12861 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12862 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12863 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12868 buf
= tg3_vpd_readblock(tp
, &len
);
12872 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12874 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12878 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12881 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12882 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12883 PCI_VPD_RO_KEYWORD_CHKSUM
);
12887 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12889 for (i
= 0; i
<= j
; i
++)
12890 csum8
+= ((u8
*)buf
)[i
];
12904 #define TG3_SERDES_TIMEOUT_SEC 2
12905 #define TG3_COPPER_TIMEOUT_SEC 6
12907 static int tg3_test_link(struct tg3
*tp
)
12911 if (!netif_running(tp
->dev
))
12914 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12915 max
= TG3_SERDES_TIMEOUT_SEC
;
12917 max
= TG3_COPPER_TIMEOUT_SEC
;
12919 for (i
= 0; i
< max
; i
++) {
12923 if (msleep_interruptible(1000))
12930 /* Only test the commonly used registers */
12931 static int tg3_test_registers(struct tg3
*tp
)
12933 int i
, is_5705
, is_5750
;
12934 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12938 #define TG3_FL_5705 0x1
12939 #define TG3_FL_NOT_5705 0x2
12940 #define TG3_FL_NOT_5788 0x4
12941 #define TG3_FL_NOT_5750 0x8
12945 /* MAC Control Registers */
12946 { MAC_MODE
, TG3_FL_NOT_5705
,
12947 0x00000000, 0x00ef6f8c },
12948 { MAC_MODE
, TG3_FL_5705
,
12949 0x00000000, 0x01ef6b8c },
12950 { MAC_STATUS
, TG3_FL_NOT_5705
,
12951 0x03800107, 0x00000000 },
12952 { MAC_STATUS
, TG3_FL_5705
,
12953 0x03800100, 0x00000000 },
12954 { MAC_ADDR_0_HIGH
, 0x0000,
12955 0x00000000, 0x0000ffff },
12956 { MAC_ADDR_0_LOW
, 0x0000,
12957 0x00000000, 0xffffffff },
12958 { MAC_RX_MTU_SIZE
, 0x0000,
12959 0x00000000, 0x0000ffff },
12960 { MAC_TX_MODE
, 0x0000,
12961 0x00000000, 0x00000070 },
12962 { MAC_TX_LENGTHS
, 0x0000,
12963 0x00000000, 0x00003fff },
12964 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12965 0x00000000, 0x000007fc },
12966 { MAC_RX_MODE
, TG3_FL_5705
,
12967 0x00000000, 0x000007dc },
12968 { MAC_HASH_REG_0
, 0x0000,
12969 0x00000000, 0xffffffff },
12970 { MAC_HASH_REG_1
, 0x0000,
12971 0x00000000, 0xffffffff },
12972 { MAC_HASH_REG_2
, 0x0000,
12973 0x00000000, 0xffffffff },
12974 { MAC_HASH_REG_3
, 0x0000,
12975 0x00000000, 0xffffffff },
12977 /* Receive Data and Receive BD Initiator Control Registers. */
12978 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12979 0x00000000, 0xffffffff },
12980 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12981 0x00000000, 0xffffffff },
12982 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12983 0x00000000, 0x00000003 },
12984 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12985 0x00000000, 0xffffffff },
12986 { RCVDBDI_STD_BD
+0, 0x0000,
12987 0x00000000, 0xffffffff },
12988 { RCVDBDI_STD_BD
+4, 0x0000,
12989 0x00000000, 0xffffffff },
12990 { RCVDBDI_STD_BD
+8, 0x0000,
12991 0x00000000, 0xffff0002 },
12992 { RCVDBDI_STD_BD
+0xc, 0x0000,
12993 0x00000000, 0xffffffff },
12995 /* Receive BD Initiator Control Registers. */
12996 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12997 0x00000000, 0xffffffff },
12998 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12999 0x00000000, 0x000003ff },
13000 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
13001 0x00000000, 0xffffffff },
13003 /* Host Coalescing Control Registers. */
13004 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
13005 0x00000000, 0x00000004 },
13006 { HOSTCC_MODE
, TG3_FL_5705
,
13007 0x00000000, 0x000000f6 },
13008 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
13009 0x00000000, 0xffffffff },
13010 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
13011 0x00000000, 0x000003ff },
13012 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
13013 0x00000000, 0xffffffff },
13014 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
13015 0x00000000, 0x000003ff },
13016 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
13017 0x00000000, 0xffffffff },
13018 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13019 0x00000000, 0x000000ff },
13020 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
13021 0x00000000, 0xffffffff },
13022 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13023 0x00000000, 0x000000ff },
13024 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13025 0x00000000, 0xffffffff },
13026 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
13027 0x00000000, 0xffffffff },
13028 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13029 0x00000000, 0xffffffff },
13030 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13031 0x00000000, 0x000000ff },
13032 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
13033 0x00000000, 0xffffffff },
13034 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
13035 0x00000000, 0x000000ff },
13036 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
13037 0x00000000, 0xffffffff },
13038 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
13039 0x00000000, 0xffffffff },
13040 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
13041 0x00000000, 0xffffffff },
13042 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
13043 0x00000000, 0xffffffff },
13044 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
13045 0x00000000, 0xffffffff },
13046 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
13047 0xffffffff, 0x00000000 },
13048 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
13049 0xffffffff, 0x00000000 },
13051 /* Buffer Manager Control Registers. */
13052 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
13053 0x00000000, 0x007fff80 },
13054 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
13055 0x00000000, 0x007fffff },
13056 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
13057 0x00000000, 0x0000003f },
13058 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
13059 0x00000000, 0x000001ff },
13060 { BUFMGR_MB_HIGH_WATER
, 0x0000,
13061 0x00000000, 0x000001ff },
13062 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
13063 0xffffffff, 0x00000000 },
13064 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
13065 0xffffffff, 0x00000000 },
13067 /* Mailbox Registers */
13068 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
13069 0x00000000, 0x000001ff },
13070 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
13071 0x00000000, 0x000001ff },
13072 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
13073 0x00000000, 0x000007ff },
13074 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
13075 0x00000000, 0x000001ff },
13077 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13080 is_5705
= is_5750
= 0;
13081 if (tg3_flag(tp
, 5705_PLUS
)) {
13083 if (tg3_flag(tp
, 5750_PLUS
))
13087 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13088 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13091 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13094 if (tg3_flag(tp
, IS_5788
) &&
13095 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13098 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13101 offset
= (u32
) reg_tbl
[i
].offset
;
13102 read_mask
= reg_tbl
[i
].read_mask
;
13103 write_mask
= reg_tbl
[i
].write_mask
;
13105 /* Save the original register content */
13106 save_val
= tr32(offset
);
13108 /* Determine the read-only value. */
13109 read_val
= save_val
& read_mask
;
13111 /* Write zero to the register, then make sure the read-only bits
13112 * are not changed and the read/write bits are all zeros.
13116 val
= tr32(offset
);
13118 /* Test the read-only and read/write bits. */
13119 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13122 /* Write ones to all the bits defined by RdMask and WrMask, then
13123 * make sure the read-only bits are not changed and the
13124 * read/write bits are all ones.
13126 tw32(offset
, read_mask
| write_mask
);
13128 val
= tr32(offset
);
13130 /* Test the read-only bits. */
13131 if ((val
& read_mask
) != read_val
)
13134 /* Test the read/write bits. */
13135 if ((val
& write_mask
) != write_mask
)
13138 tw32(offset
, save_val
);
13144 if (netif_msg_hw(tp
))
13145 netdev_err(tp
->dev
,
13146 "Register test failed at offset %x\n", offset
);
13147 tw32(offset
, save_val
);
13151 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13153 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13157 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13158 for (j
= 0; j
< len
; j
+= 4) {
13161 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13162 tg3_read_mem(tp
, offset
+ j
, &val
);
13163 if (val
!= test_pattern
[i
])
13170 static int tg3_test_memory(struct tg3
*tp
)
13172 static struct mem_entry
{
13175 } mem_tbl_570x
[] = {
13176 { 0x00000000, 0x00b50},
13177 { 0x00002000, 0x1c000},
13178 { 0xffffffff, 0x00000}
13179 }, mem_tbl_5705
[] = {
13180 { 0x00000100, 0x0000c},
13181 { 0x00000200, 0x00008},
13182 { 0x00004000, 0x00800},
13183 { 0x00006000, 0x01000},
13184 { 0x00008000, 0x02000},
13185 { 0x00010000, 0x0e000},
13186 { 0xffffffff, 0x00000}
13187 }, mem_tbl_5755
[] = {
13188 { 0x00000200, 0x00008},
13189 { 0x00004000, 0x00800},
13190 { 0x00006000, 0x00800},
13191 { 0x00008000, 0x02000},
13192 { 0x00010000, 0x0c000},
13193 { 0xffffffff, 0x00000}
13194 }, mem_tbl_5906
[] = {
13195 { 0x00000200, 0x00008},
13196 { 0x00004000, 0x00400},
13197 { 0x00006000, 0x00400},
13198 { 0x00008000, 0x01000},
13199 { 0x00010000, 0x01000},
13200 { 0xffffffff, 0x00000}
13201 }, mem_tbl_5717
[] = {
13202 { 0x00000200, 0x00008},
13203 { 0x00010000, 0x0a000},
13204 { 0x00020000, 0x13c00},
13205 { 0xffffffff, 0x00000}
13206 }, mem_tbl_57765
[] = {
13207 { 0x00000200, 0x00008},
13208 { 0x00004000, 0x00800},
13209 { 0x00006000, 0x09800},
13210 { 0x00010000, 0x0a000},
13211 { 0xffffffff, 0x00000}
13213 struct mem_entry
*mem_tbl
;
13217 if (tg3_flag(tp
, 5717_PLUS
))
13218 mem_tbl
= mem_tbl_5717
;
13219 else if (tg3_flag(tp
, 57765_CLASS
) ||
13220 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13221 mem_tbl
= mem_tbl_57765
;
13222 else if (tg3_flag(tp
, 5755_PLUS
))
13223 mem_tbl
= mem_tbl_5755
;
13224 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13225 mem_tbl
= mem_tbl_5906
;
13226 else if (tg3_flag(tp
, 5705_PLUS
))
13227 mem_tbl
= mem_tbl_5705
;
13229 mem_tbl
= mem_tbl_570x
;
13231 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13232 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13240 #define TG3_TSO_MSS 500
13242 #define TG3_TSO_IP_HDR_LEN 20
13243 #define TG3_TSO_TCP_HDR_LEN 20
13244 #define TG3_TSO_TCP_OPT_LEN 12
13246 static const u8 tg3_tso_header
[] = {
13248 0x45, 0x00, 0x00, 0x00,
13249 0x00, 0x00, 0x40, 0x00,
13250 0x40, 0x06, 0x00, 0x00,
13251 0x0a, 0x00, 0x00, 0x01,
13252 0x0a, 0x00, 0x00, 0x02,
13253 0x0d, 0x00, 0xe0, 0x00,
13254 0x00, 0x00, 0x01, 0x00,
13255 0x00, 0x00, 0x02, 0x00,
13256 0x80, 0x10, 0x10, 0x00,
13257 0x14, 0x09, 0x00, 0x00,
13258 0x01, 0x01, 0x08, 0x0a,
13259 0x11, 0x11, 0x11, 0x11,
13260 0x11, 0x11, 0x11, 0x11,
13263 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13265 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13266 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13268 struct sk_buff
*skb
;
13269 u8
*tx_data
, *rx_data
;
13271 int num_pkts
, tx_len
, rx_len
, i
, err
;
13272 struct tg3_rx_buffer_desc
*desc
;
13273 struct tg3_napi
*tnapi
, *rnapi
;
13274 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13276 tnapi
= &tp
->napi
[0];
13277 rnapi
= &tp
->napi
[0];
13278 if (tp
->irq_cnt
> 1) {
13279 if (tg3_flag(tp
, ENABLE_RSS
))
13280 rnapi
= &tp
->napi
[1];
13281 if (tg3_flag(tp
, ENABLE_TSS
))
13282 tnapi
= &tp
->napi
[1];
13284 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13289 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13293 tx_data
= skb_put(skb
, tx_len
);
13294 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13295 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13297 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13299 if (tso_loopback
) {
13300 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13302 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13303 TG3_TSO_TCP_OPT_LEN
;
13305 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13306 sizeof(tg3_tso_header
));
13309 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13310 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13312 /* Set the total length field in the IP header */
13313 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13315 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13316 TXD_FLAG_CPU_POST_DMA
);
13318 if (tg3_flag(tp
, HW_TSO_1
) ||
13319 tg3_flag(tp
, HW_TSO_2
) ||
13320 tg3_flag(tp
, HW_TSO_3
)) {
13322 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13323 th
= (struct tcphdr
*)&tx_data
[val
];
13326 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13328 if (tg3_flag(tp
, HW_TSO_3
)) {
13329 mss
|= (hdr_len
& 0xc) << 12;
13330 if (hdr_len
& 0x10)
13331 base_flags
|= 0x00000010;
13332 base_flags
|= (hdr_len
& 0x3e0) << 5;
13333 } else if (tg3_flag(tp
, HW_TSO_2
))
13334 mss
|= hdr_len
<< 9;
13335 else if (tg3_flag(tp
, HW_TSO_1
) ||
13336 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13337 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13339 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13342 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13345 data_off
= ETH_HLEN
;
13347 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13348 tx_len
> VLAN_ETH_FRAME_LEN
)
13349 base_flags
|= TXD_FLAG_JMB_PKT
;
13352 for (i
= data_off
; i
< tx_len
; i
++)
13353 tx_data
[i
] = (u8
) (i
& 0xff);
13355 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13356 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13357 dev_kfree_skb(skb
);
13361 val
= tnapi
->tx_prod
;
13362 tnapi
->tx_buffers
[val
].skb
= skb
;
13363 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13365 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13370 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13372 budget
= tg3_tx_avail(tnapi
);
13373 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13374 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13375 tnapi
->tx_buffers
[val
].skb
= NULL
;
13376 dev_kfree_skb(skb
);
13382 /* Sync BD data before updating mailbox */
13385 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13386 tr32_mailbox(tnapi
->prodmbox
);
13390 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13391 for (i
= 0; i
< 35; i
++) {
13392 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13397 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13398 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13399 if ((tx_idx
== tnapi
->tx_prod
) &&
13400 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13404 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13405 dev_kfree_skb(skb
);
13407 if (tx_idx
!= tnapi
->tx_prod
)
13410 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13414 while (rx_idx
!= rx_start_idx
) {
13415 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13416 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13417 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13419 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13420 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13423 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13426 if (!tso_loopback
) {
13427 if (rx_len
!= tx_len
)
13430 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13431 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13434 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13437 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13438 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13439 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13443 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13444 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13445 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13447 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13448 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13449 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13454 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13455 PCI_DMA_FROMDEVICE
);
13457 rx_data
+= TG3_RX_OFFSET(tp
);
13458 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13459 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13466 /* tg3_free_rings will unmap and free the rx_data */
13471 #define TG3_STD_LOOPBACK_FAILED 1
13472 #define TG3_JMB_LOOPBACK_FAILED 2
13473 #define TG3_TSO_LOOPBACK_FAILED 4
13474 #define TG3_LOOPBACK_FAILED \
13475 (TG3_STD_LOOPBACK_FAILED | \
13476 TG3_JMB_LOOPBACK_FAILED | \
13477 TG3_TSO_LOOPBACK_FAILED)
13479 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13483 u32 jmb_pkt_sz
= 9000;
13486 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13488 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13489 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13491 if (!netif_running(tp
->dev
)) {
13492 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13493 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13495 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13499 err
= tg3_reset_hw(tp
, true);
13501 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13502 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13504 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13508 if (tg3_flag(tp
, ENABLE_RSS
)) {
13511 /* Reroute all rx packets to the 1st queue */
13512 for (i
= MAC_RSS_INDIR_TBL_0
;
13513 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13517 /* HW errata - mac loopback fails in some cases on 5780.
13518 * Normal traffic and PHY loopback are not affected by
13519 * errata. Also, the MAC loopback test is deprecated for
13520 * all newer ASIC revisions.
13522 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13523 !tg3_flag(tp
, CPMU_PRESENT
)) {
13524 tg3_mac_loopback(tp
, true);
13526 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13527 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13529 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13530 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13531 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13533 tg3_mac_loopback(tp
, false);
13536 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13537 !tg3_flag(tp
, USE_PHYLIB
)) {
13540 tg3_phy_lpbk_set(tp
, 0, false);
13542 /* Wait for link */
13543 for (i
= 0; i
< 100; i
++) {
13544 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13549 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13550 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13551 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13552 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13553 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13554 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13555 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13556 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13559 tg3_phy_lpbk_set(tp
, 0, true);
13561 /* All link indications report up, but the hardware
13562 * isn't really ready for about 20 msec. Double it
13567 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13568 data
[TG3_EXT_LOOPB_TEST
] |=
13569 TG3_STD_LOOPBACK_FAILED
;
13570 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13571 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13572 data
[TG3_EXT_LOOPB_TEST
] |=
13573 TG3_TSO_LOOPBACK_FAILED
;
13574 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13575 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13576 data
[TG3_EXT_LOOPB_TEST
] |=
13577 TG3_JMB_LOOPBACK_FAILED
;
13580 /* Re-enable gphy autopowerdown. */
13581 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13582 tg3_phy_toggle_apd(tp
, true);
13585 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13586 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13589 tp
->phy_flags
|= eee_cap
;
13594 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13597 struct tg3
*tp
= netdev_priv(dev
);
13598 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13600 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13601 if (tg3_power_up(tp
)) {
13602 etest
->flags
|= ETH_TEST_FL_FAILED
;
13603 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13606 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13609 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13611 if (tg3_test_nvram(tp
) != 0) {
13612 etest
->flags
|= ETH_TEST_FL_FAILED
;
13613 data
[TG3_NVRAM_TEST
] = 1;
13615 if (!doextlpbk
&& tg3_test_link(tp
)) {
13616 etest
->flags
|= ETH_TEST_FL_FAILED
;
13617 data
[TG3_LINK_TEST
] = 1;
13619 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13620 int err
, err2
= 0, irq_sync
= 0;
13622 if (netif_running(dev
)) {
13624 tg3_netif_stop(tp
);
13628 tg3_full_lock(tp
, irq_sync
);
13629 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13630 err
= tg3_nvram_lock(tp
);
13631 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13632 if (!tg3_flag(tp
, 5705_PLUS
))
13633 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13635 tg3_nvram_unlock(tp
);
13637 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13640 if (tg3_test_registers(tp
) != 0) {
13641 etest
->flags
|= ETH_TEST_FL_FAILED
;
13642 data
[TG3_REGISTER_TEST
] = 1;
13645 if (tg3_test_memory(tp
) != 0) {
13646 etest
->flags
|= ETH_TEST_FL_FAILED
;
13647 data
[TG3_MEMORY_TEST
] = 1;
13651 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13653 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13654 etest
->flags
|= ETH_TEST_FL_FAILED
;
13656 tg3_full_unlock(tp
);
13658 if (tg3_test_interrupt(tp
) != 0) {
13659 etest
->flags
|= ETH_TEST_FL_FAILED
;
13660 data
[TG3_INTERRUPT_TEST
] = 1;
13663 tg3_full_lock(tp
, 0);
13665 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13666 if (netif_running(dev
)) {
13667 tg3_flag_set(tp
, INIT_COMPLETE
);
13668 err2
= tg3_restart_hw(tp
, true);
13670 tg3_netif_start(tp
);
13673 tg3_full_unlock(tp
);
13675 if (irq_sync
&& !err2
)
13678 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13679 tg3_power_down_prepare(tp
);
13683 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13685 struct tg3
*tp
= netdev_priv(dev
);
13686 struct hwtstamp_config stmpconf
;
13688 if (!tg3_flag(tp
, PTP_CAPABLE
))
13689 return -EOPNOTSUPP
;
13691 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13694 if (stmpconf
.flags
)
13697 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13698 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13701 switch (stmpconf
.rx_filter
) {
13702 case HWTSTAMP_FILTER_NONE
:
13705 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13706 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13707 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13709 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13710 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13711 TG3_RX_PTP_CTL_SYNC_EVNT
;
13713 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13714 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13715 TG3_RX_PTP_CTL_DELAY_REQ
;
13717 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13718 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13719 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13721 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13722 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13723 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13725 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13726 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13727 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13729 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13730 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13731 TG3_RX_PTP_CTL_SYNC_EVNT
;
13733 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13734 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13735 TG3_RX_PTP_CTL_SYNC_EVNT
;
13737 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13738 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13739 TG3_RX_PTP_CTL_SYNC_EVNT
;
13741 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13742 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13743 TG3_RX_PTP_CTL_DELAY_REQ
;
13745 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13746 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13747 TG3_RX_PTP_CTL_DELAY_REQ
;
13749 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13750 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13751 TG3_RX_PTP_CTL_DELAY_REQ
;
13757 if (netif_running(dev
) && tp
->rxptpctl
)
13758 tw32(TG3_RX_PTP_CTL
,
13759 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13761 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13762 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13764 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13766 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13770 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13772 struct tg3
*tp
= netdev_priv(dev
);
13773 struct hwtstamp_config stmpconf
;
13775 if (!tg3_flag(tp
, PTP_CAPABLE
))
13776 return -EOPNOTSUPP
;
13778 stmpconf
.flags
= 0;
13779 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13780 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13782 switch (tp
->rxptpctl
) {
13784 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13786 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13787 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13789 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13790 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13792 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13793 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13795 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13796 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13798 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13799 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13801 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13802 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13804 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13805 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13807 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13808 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13810 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13811 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13813 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13814 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13816 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13817 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13819 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13820 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13827 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13831 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13833 struct mii_ioctl_data
*data
= if_mii(ifr
);
13834 struct tg3
*tp
= netdev_priv(dev
);
13837 if (tg3_flag(tp
, USE_PHYLIB
)) {
13838 struct phy_device
*phydev
;
13839 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13841 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
13842 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13847 data
->phy_id
= tp
->phy_addr
;
13850 case SIOCGMIIREG
: {
13853 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13854 break; /* We have no PHY */
13856 if (!netif_running(dev
))
13859 spin_lock_bh(&tp
->lock
);
13860 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13861 data
->reg_num
& 0x1f, &mii_regval
);
13862 spin_unlock_bh(&tp
->lock
);
13864 data
->val_out
= mii_regval
;
13870 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13871 break; /* We have no PHY */
13873 if (!netif_running(dev
))
13876 spin_lock_bh(&tp
->lock
);
13877 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13878 data
->reg_num
& 0x1f, data
->val_in
);
13879 spin_unlock_bh(&tp
->lock
);
13883 case SIOCSHWTSTAMP
:
13884 return tg3_hwtstamp_set(dev
, ifr
);
13886 case SIOCGHWTSTAMP
:
13887 return tg3_hwtstamp_get(dev
, ifr
);
13893 return -EOPNOTSUPP
;
13896 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13898 struct tg3
*tp
= netdev_priv(dev
);
13900 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13904 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13906 struct tg3
*tp
= netdev_priv(dev
);
13907 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13908 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13910 if (!tg3_flag(tp
, 5705_PLUS
)) {
13911 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13912 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13913 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13914 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13917 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13918 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13919 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13920 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13921 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13922 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13923 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13924 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13925 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13926 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13929 /* No rx interrupts will be generated if both are zero */
13930 if ((ec
->rx_coalesce_usecs
== 0) &&
13931 (ec
->rx_max_coalesced_frames
== 0))
13934 /* No tx interrupts will be generated if both are zero */
13935 if ((ec
->tx_coalesce_usecs
== 0) &&
13936 (ec
->tx_max_coalesced_frames
== 0))
13939 /* Only copy relevant parameters, ignore all others. */
13940 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13941 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13942 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13943 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13944 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13945 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13946 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13947 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13948 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13950 if (netif_running(dev
)) {
13951 tg3_full_lock(tp
, 0);
13952 __tg3_set_coalesce(tp
, &tp
->coal
);
13953 tg3_full_unlock(tp
);
13958 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13960 struct tg3
*tp
= netdev_priv(dev
);
13962 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
13963 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
13964 return -EOPNOTSUPP
;
13967 if (edata
->advertised
!= tp
->eee
.advertised
) {
13968 netdev_warn(tp
->dev
,
13969 "Direct manipulation of EEE advertisement is not supported\n");
13973 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
13974 netdev_warn(tp
->dev
,
13975 "Maximal Tx Lpi timer supported is %#x(u)\n",
13976 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
13982 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
13983 tg3_warn_mgmt_link_flap(tp
);
13985 if (netif_running(tp
->dev
)) {
13986 tg3_full_lock(tp
, 0);
13989 tg3_full_unlock(tp
);
13995 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13997 struct tg3
*tp
= netdev_priv(dev
);
13999 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
14000 netdev_warn(tp
->dev
,
14001 "Board does not support EEE!\n");
14002 return -EOPNOTSUPP
;
14009 static const struct ethtool_ops tg3_ethtool_ops
= {
14010 .get_settings
= tg3_get_settings
,
14011 .set_settings
= tg3_set_settings
,
14012 .get_drvinfo
= tg3_get_drvinfo
,
14013 .get_regs_len
= tg3_get_regs_len
,
14014 .get_regs
= tg3_get_regs
,
14015 .get_wol
= tg3_get_wol
,
14016 .set_wol
= tg3_set_wol
,
14017 .get_msglevel
= tg3_get_msglevel
,
14018 .set_msglevel
= tg3_set_msglevel
,
14019 .nway_reset
= tg3_nway_reset
,
14020 .get_link
= ethtool_op_get_link
,
14021 .get_eeprom_len
= tg3_get_eeprom_len
,
14022 .get_eeprom
= tg3_get_eeprom
,
14023 .set_eeprom
= tg3_set_eeprom
,
14024 .get_ringparam
= tg3_get_ringparam
,
14025 .set_ringparam
= tg3_set_ringparam
,
14026 .get_pauseparam
= tg3_get_pauseparam
,
14027 .set_pauseparam
= tg3_set_pauseparam
,
14028 .self_test
= tg3_self_test
,
14029 .get_strings
= tg3_get_strings
,
14030 .set_phys_id
= tg3_set_phys_id
,
14031 .get_ethtool_stats
= tg3_get_ethtool_stats
,
14032 .get_coalesce
= tg3_get_coalesce
,
14033 .set_coalesce
= tg3_set_coalesce
,
14034 .get_sset_count
= tg3_get_sset_count
,
14035 .get_rxnfc
= tg3_get_rxnfc
,
14036 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
14037 .get_rxfh_indir
= tg3_get_rxfh_indir
,
14038 .set_rxfh_indir
= tg3_set_rxfh_indir
,
14039 .get_channels
= tg3_get_channels
,
14040 .set_channels
= tg3_set_channels
,
14041 .get_ts_info
= tg3_get_ts_info
,
14042 .get_eee
= tg3_get_eee
,
14043 .set_eee
= tg3_set_eee
,
14046 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
14047 struct rtnl_link_stats64
*stats
)
14049 struct tg3
*tp
= netdev_priv(dev
);
14051 spin_lock_bh(&tp
->lock
);
14052 if (!tp
->hw_stats
) {
14053 spin_unlock_bh(&tp
->lock
);
14054 return &tp
->net_stats_prev
;
14057 tg3_get_nstats(tp
, stats
);
14058 spin_unlock_bh(&tp
->lock
);
14063 static void tg3_set_rx_mode(struct net_device
*dev
)
14065 struct tg3
*tp
= netdev_priv(dev
);
14067 if (!netif_running(dev
))
14070 tg3_full_lock(tp
, 0);
14071 __tg3_set_rx_mode(dev
);
14072 tg3_full_unlock(tp
);
14075 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
14078 dev
->mtu
= new_mtu
;
14080 if (new_mtu
> ETH_DATA_LEN
) {
14081 if (tg3_flag(tp
, 5780_CLASS
)) {
14082 netdev_update_features(dev
);
14083 tg3_flag_clear(tp
, TSO_CAPABLE
);
14085 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14088 if (tg3_flag(tp
, 5780_CLASS
)) {
14089 tg3_flag_set(tp
, TSO_CAPABLE
);
14090 netdev_update_features(dev
);
14092 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14096 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14098 struct tg3
*tp
= netdev_priv(dev
);
14100 bool reset_phy
= false;
14102 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
14105 if (!netif_running(dev
)) {
14106 /* We'll just catch it later when the
14109 tg3_set_mtu(dev
, tp
, new_mtu
);
14115 tg3_netif_stop(tp
);
14117 tg3_set_mtu(dev
, tp
, new_mtu
);
14119 tg3_full_lock(tp
, 1);
14121 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14123 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14124 * breaks all requests to 256 bytes.
14126 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
14129 err
= tg3_restart_hw(tp
, reset_phy
);
14132 tg3_netif_start(tp
);
14134 tg3_full_unlock(tp
);
14142 static const struct net_device_ops tg3_netdev_ops
= {
14143 .ndo_open
= tg3_open
,
14144 .ndo_stop
= tg3_close
,
14145 .ndo_start_xmit
= tg3_start_xmit
,
14146 .ndo_get_stats64
= tg3_get_stats64
,
14147 .ndo_validate_addr
= eth_validate_addr
,
14148 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14149 .ndo_set_mac_address
= tg3_set_mac_addr
,
14150 .ndo_do_ioctl
= tg3_ioctl
,
14151 .ndo_tx_timeout
= tg3_tx_timeout
,
14152 .ndo_change_mtu
= tg3_change_mtu
,
14153 .ndo_fix_features
= tg3_fix_features
,
14154 .ndo_set_features
= tg3_set_features
,
14155 #ifdef CONFIG_NET_POLL_CONTROLLER
14156 .ndo_poll_controller
= tg3_poll_controller
,
14160 static void tg3_get_eeprom_size(struct tg3
*tp
)
14162 u32 cursize
, val
, magic
;
14164 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14166 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14169 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14170 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14171 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14175 * Size the chip by reading offsets at increasing powers of two.
14176 * When we encounter our validation signature, we know the addressing
14177 * has wrapped around, and thus have our chip size.
14181 while (cursize
< tp
->nvram_size
) {
14182 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14191 tp
->nvram_size
= cursize
;
14194 static void tg3_get_nvram_size(struct tg3
*tp
)
14198 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14201 /* Selfboot format */
14202 if (val
!= TG3_EEPROM_MAGIC
) {
14203 tg3_get_eeprom_size(tp
);
14207 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14209 /* This is confusing. We want to operate on the
14210 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14211 * call will read from NVRAM and byteswap the data
14212 * according to the byteswapping settings for all
14213 * other register accesses. This ensures the data we
14214 * want will always reside in the lower 16-bits.
14215 * However, the data in NVRAM is in LE format, which
14216 * means the data from the NVRAM read will always be
14217 * opposite the endianness of the CPU. The 16-bit
14218 * byteswap then brings the data to CPU endianness.
14220 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14224 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14227 static void tg3_get_nvram_info(struct tg3
*tp
)
14231 nvcfg1
= tr32(NVRAM_CFG1
);
14232 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14233 tg3_flag_set(tp
, FLASH
);
14235 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14236 tw32(NVRAM_CFG1
, nvcfg1
);
14239 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14240 tg3_flag(tp
, 5780_CLASS
)) {
14241 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14242 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14243 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14244 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14245 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14247 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14248 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14249 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14251 case FLASH_VENDOR_ATMEL_EEPROM
:
14252 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14253 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14254 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14256 case FLASH_VENDOR_ST
:
14257 tp
->nvram_jedecnum
= JEDEC_ST
;
14258 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14259 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14261 case FLASH_VENDOR_SAIFUN
:
14262 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14263 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14265 case FLASH_VENDOR_SST_SMALL
:
14266 case FLASH_VENDOR_SST_LARGE
:
14267 tp
->nvram_jedecnum
= JEDEC_SST
;
14268 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14272 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14273 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14274 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14278 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14280 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14281 case FLASH_5752PAGE_SIZE_256
:
14282 tp
->nvram_pagesize
= 256;
14284 case FLASH_5752PAGE_SIZE_512
:
14285 tp
->nvram_pagesize
= 512;
14287 case FLASH_5752PAGE_SIZE_1K
:
14288 tp
->nvram_pagesize
= 1024;
14290 case FLASH_5752PAGE_SIZE_2K
:
14291 tp
->nvram_pagesize
= 2048;
14293 case FLASH_5752PAGE_SIZE_4K
:
14294 tp
->nvram_pagesize
= 4096;
14296 case FLASH_5752PAGE_SIZE_264
:
14297 tp
->nvram_pagesize
= 264;
14299 case FLASH_5752PAGE_SIZE_528
:
14300 tp
->nvram_pagesize
= 528;
14305 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14309 nvcfg1
= tr32(NVRAM_CFG1
);
14311 /* NVRAM protection for TPM */
14312 if (nvcfg1
& (1 << 27))
14313 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14315 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14316 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14317 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14318 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14319 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14321 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14322 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14323 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14324 tg3_flag_set(tp
, FLASH
);
14326 case FLASH_5752VENDOR_ST_M45PE10
:
14327 case FLASH_5752VENDOR_ST_M45PE20
:
14328 case FLASH_5752VENDOR_ST_M45PE40
:
14329 tp
->nvram_jedecnum
= JEDEC_ST
;
14330 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14331 tg3_flag_set(tp
, FLASH
);
14335 if (tg3_flag(tp
, FLASH
)) {
14336 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14338 /* For eeprom, set pagesize to maximum eeprom size */
14339 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14341 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14342 tw32(NVRAM_CFG1
, nvcfg1
);
14346 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14348 u32 nvcfg1
, protect
= 0;
14350 nvcfg1
= tr32(NVRAM_CFG1
);
14352 /* NVRAM protection for TPM */
14353 if (nvcfg1
& (1 << 27)) {
14354 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14358 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14360 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14361 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14362 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14363 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14364 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14365 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14366 tg3_flag_set(tp
, FLASH
);
14367 tp
->nvram_pagesize
= 264;
14368 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14369 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14370 tp
->nvram_size
= (protect
? 0x3e200 :
14371 TG3_NVRAM_SIZE_512KB
);
14372 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14373 tp
->nvram_size
= (protect
? 0x1f200 :
14374 TG3_NVRAM_SIZE_256KB
);
14376 tp
->nvram_size
= (protect
? 0x1f200 :
14377 TG3_NVRAM_SIZE_128KB
);
14379 case FLASH_5752VENDOR_ST_M45PE10
:
14380 case FLASH_5752VENDOR_ST_M45PE20
:
14381 case FLASH_5752VENDOR_ST_M45PE40
:
14382 tp
->nvram_jedecnum
= JEDEC_ST
;
14383 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14384 tg3_flag_set(tp
, FLASH
);
14385 tp
->nvram_pagesize
= 256;
14386 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14387 tp
->nvram_size
= (protect
?
14388 TG3_NVRAM_SIZE_64KB
:
14389 TG3_NVRAM_SIZE_128KB
);
14390 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14391 tp
->nvram_size
= (protect
?
14392 TG3_NVRAM_SIZE_64KB
:
14393 TG3_NVRAM_SIZE_256KB
);
14395 tp
->nvram_size
= (protect
?
14396 TG3_NVRAM_SIZE_128KB
:
14397 TG3_NVRAM_SIZE_512KB
);
14402 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14406 nvcfg1
= tr32(NVRAM_CFG1
);
14408 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14409 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14410 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14411 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14412 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14413 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14414 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14415 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14417 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14418 tw32(NVRAM_CFG1
, nvcfg1
);
14420 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14421 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14422 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14423 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14424 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14425 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14426 tg3_flag_set(tp
, FLASH
);
14427 tp
->nvram_pagesize
= 264;
14429 case FLASH_5752VENDOR_ST_M45PE10
:
14430 case FLASH_5752VENDOR_ST_M45PE20
:
14431 case FLASH_5752VENDOR_ST_M45PE40
:
14432 tp
->nvram_jedecnum
= JEDEC_ST
;
14433 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14434 tg3_flag_set(tp
, FLASH
);
14435 tp
->nvram_pagesize
= 256;
14440 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14442 u32 nvcfg1
, protect
= 0;
14444 nvcfg1
= tr32(NVRAM_CFG1
);
14446 /* NVRAM protection for TPM */
14447 if (nvcfg1
& (1 << 27)) {
14448 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14452 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14454 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14455 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14456 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14457 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14458 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14459 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14460 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14461 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14462 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14463 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14464 tg3_flag_set(tp
, FLASH
);
14465 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14466 tp
->nvram_pagesize
= 256;
14468 case FLASH_5761VENDOR_ST_A_M45PE20
:
14469 case FLASH_5761VENDOR_ST_A_M45PE40
:
14470 case FLASH_5761VENDOR_ST_A_M45PE80
:
14471 case FLASH_5761VENDOR_ST_A_M45PE16
:
14472 case FLASH_5761VENDOR_ST_M_M45PE20
:
14473 case FLASH_5761VENDOR_ST_M_M45PE40
:
14474 case FLASH_5761VENDOR_ST_M_M45PE80
:
14475 case FLASH_5761VENDOR_ST_M_M45PE16
:
14476 tp
->nvram_jedecnum
= JEDEC_ST
;
14477 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14478 tg3_flag_set(tp
, FLASH
);
14479 tp
->nvram_pagesize
= 256;
14484 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14487 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14488 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14489 case FLASH_5761VENDOR_ST_A_M45PE16
:
14490 case FLASH_5761VENDOR_ST_M_M45PE16
:
14491 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14493 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14494 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14495 case FLASH_5761VENDOR_ST_A_M45PE80
:
14496 case FLASH_5761VENDOR_ST_M_M45PE80
:
14497 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14499 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14500 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14501 case FLASH_5761VENDOR_ST_A_M45PE40
:
14502 case FLASH_5761VENDOR_ST_M_M45PE40
:
14503 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14505 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14506 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14507 case FLASH_5761VENDOR_ST_A_M45PE20
:
14508 case FLASH_5761VENDOR_ST_M_M45PE20
:
14509 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14515 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14517 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14518 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14519 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14522 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14526 nvcfg1
= tr32(NVRAM_CFG1
);
14528 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14529 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14530 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14531 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14532 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14533 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14535 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14536 tw32(NVRAM_CFG1
, nvcfg1
);
14538 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14539 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14540 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14541 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14542 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14543 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14544 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14545 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14546 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14547 tg3_flag_set(tp
, FLASH
);
14549 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14550 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14551 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14552 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14553 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14555 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14556 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14557 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14559 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14560 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14561 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14565 case FLASH_5752VENDOR_ST_M45PE10
:
14566 case FLASH_5752VENDOR_ST_M45PE20
:
14567 case FLASH_5752VENDOR_ST_M45PE40
:
14568 tp
->nvram_jedecnum
= JEDEC_ST
;
14569 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14570 tg3_flag_set(tp
, FLASH
);
14572 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14573 case FLASH_5752VENDOR_ST_M45PE10
:
14574 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14576 case FLASH_5752VENDOR_ST_M45PE20
:
14577 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14579 case FLASH_5752VENDOR_ST_M45PE40
:
14580 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14585 tg3_flag_set(tp
, NO_NVRAM
);
14589 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14590 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14591 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14595 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14599 nvcfg1
= tr32(NVRAM_CFG1
);
14601 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14602 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14603 case FLASH_5717VENDOR_MICRO_EEPROM
:
14604 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14605 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14606 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14608 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14609 tw32(NVRAM_CFG1
, nvcfg1
);
14611 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14612 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14613 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14614 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14615 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14616 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14617 case FLASH_5717VENDOR_ATMEL_45USPT
:
14618 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14619 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14620 tg3_flag_set(tp
, FLASH
);
14622 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14623 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14624 /* Detect size with tg3_nvram_get_size() */
14626 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14627 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14628 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14631 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14635 case FLASH_5717VENDOR_ST_M_M25PE10
:
14636 case FLASH_5717VENDOR_ST_A_M25PE10
:
14637 case FLASH_5717VENDOR_ST_M_M45PE10
:
14638 case FLASH_5717VENDOR_ST_A_M45PE10
:
14639 case FLASH_5717VENDOR_ST_M_M25PE20
:
14640 case FLASH_5717VENDOR_ST_A_M25PE20
:
14641 case FLASH_5717VENDOR_ST_M_M45PE20
:
14642 case FLASH_5717VENDOR_ST_A_M45PE20
:
14643 case FLASH_5717VENDOR_ST_25USPT
:
14644 case FLASH_5717VENDOR_ST_45USPT
:
14645 tp
->nvram_jedecnum
= JEDEC_ST
;
14646 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14647 tg3_flag_set(tp
, FLASH
);
14649 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14650 case FLASH_5717VENDOR_ST_M_M25PE20
:
14651 case FLASH_5717VENDOR_ST_M_M45PE20
:
14652 /* Detect size with tg3_nvram_get_size() */
14654 case FLASH_5717VENDOR_ST_A_M25PE20
:
14655 case FLASH_5717VENDOR_ST_A_M45PE20
:
14656 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14659 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14664 tg3_flag_set(tp
, NO_NVRAM
);
14668 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14669 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14670 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14673 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14675 u32 nvcfg1
, nvmpinstrp
;
14677 nvcfg1
= tr32(NVRAM_CFG1
);
14678 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14680 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14681 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14682 tg3_flag_set(tp
, NO_NVRAM
);
14686 switch (nvmpinstrp
) {
14687 case FLASH_5762_EEPROM_HD
:
14688 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14690 case FLASH_5762_EEPROM_LD
:
14691 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14693 case FLASH_5720VENDOR_M_ST_M45PE20
:
14694 /* This pinstrap supports multiple sizes, so force it
14695 * to read the actual size from location 0xf0.
14697 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14702 switch (nvmpinstrp
) {
14703 case FLASH_5720_EEPROM_HD
:
14704 case FLASH_5720_EEPROM_LD
:
14705 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14706 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14708 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14709 tw32(NVRAM_CFG1
, nvcfg1
);
14710 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14711 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14713 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14715 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14716 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14717 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14718 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14719 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14720 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14721 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14722 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14723 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14724 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14725 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14726 case FLASH_5720VENDOR_ATMEL_45USPT
:
14727 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14728 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14729 tg3_flag_set(tp
, FLASH
);
14731 switch (nvmpinstrp
) {
14732 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14733 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14734 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14735 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14737 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14738 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14739 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14740 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14742 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14743 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14744 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14747 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14748 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14752 case FLASH_5720VENDOR_M_ST_M25PE10
:
14753 case FLASH_5720VENDOR_M_ST_M45PE10
:
14754 case FLASH_5720VENDOR_A_ST_M25PE10
:
14755 case FLASH_5720VENDOR_A_ST_M45PE10
:
14756 case FLASH_5720VENDOR_M_ST_M25PE20
:
14757 case FLASH_5720VENDOR_M_ST_M45PE20
:
14758 case FLASH_5720VENDOR_A_ST_M25PE20
:
14759 case FLASH_5720VENDOR_A_ST_M45PE20
:
14760 case FLASH_5720VENDOR_M_ST_M25PE40
:
14761 case FLASH_5720VENDOR_M_ST_M45PE40
:
14762 case FLASH_5720VENDOR_A_ST_M25PE40
:
14763 case FLASH_5720VENDOR_A_ST_M45PE40
:
14764 case FLASH_5720VENDOR_M_ST_M25PE80
:
14765 case FLASH_5720VENDOR_M_ST_M45PE80
:
14766 case FLASH_5720VENDOR_A_ST_M25PE80
:
14767 case FLASH_5720VENDOR_A_ST_M45PE80
:
14768 case FLASH_5720VENDOR_ST_25USPT
:
14769 case FLASH_5720VENDOR_ST_45USPT
:
14770 tp
->nvram_jedecnum
= JEDEC_ST
;
14771 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14772 tg3_flag_set(tp
, FLASH
);
14774 switch (nvmpinstrp
) {
14775 case FLASH_5720VENDOR_M_ST_M25PE20
:
14776 case FLASH_5720VENDOR_M_ST_M45PE20
:
14777 case FLASH_5720VENDOR_A_ST_M25PE20
:
14778 case FLASH_5720VENDOR_A_ST_M45PE20
:
14779 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14781 case FLASH_5720VENDOR_M_ST_M25PE40
:
14782 case FLASH_5720VENDOR_M_ST_M45PE40
:
14783 case FLASH_5720VENDOR_A_ST_M25PE40
:
14784 case FLASH_5720VENDOR_A_ST_M45PE40
:
14785 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14787 case FLASH_5720VENDOR_M_ST_M25PE80
:
14788 case FLASH_5720VENDOR_M_ST_M45PE80
:
14789 case FLASH_5720VENDOR_A_ST_M25PE80
:
14790 case FLASH_5720VENDOR_A_ST_M45PE80
:
14791 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14794 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14795 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14800 tg3_flag_set(tp
, NO_NVRAM
);
14804 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14805 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14806 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14808 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14811 if (tg3_nvram_read(tp
, 0, &val
))
14814 if (val
!= TG3_EEPROM_MAGIC
&&
14815 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14816 tg3_flag_set(tp
, NO_NVRAM
);
14820 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14821 static void tg3_nvram_init(struct tg3
*tp
)
14823 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14824 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14825 tg3_flag_clear(tp
, NVRAM
);
14826 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14827 tg3_flag_set(tp
, NO_NVRAM
);
14831 tw32_f(GRC_EEPROM_ADDR
,
14832 (EEPROM_ADDR_FSM_RESET
|
14833 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14834 EEPROM_ADDR_CLKPERD_SHIFT
)));
14838 /* Enable seeprom accesses. */
14839 tw32_f(GRC_LOCAL_CTRL
,
14840 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14843 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14844 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14845 tg3_flag_set(tp
, NVRAM
);
14847 if (tg3_nvram_lock(tp
)) {
14848 netdev_warn(tp
->dev
,
14849 "Cannot get nvram lock, %s failed\n",
14853 tg3_enable_nvram_access(tp
);
14855 tp
->nvram_size
= 0;
14857 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14858 tg3_get_5752_nvram_info(tp
);
14859 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14860 tg3_get_5755_nvram_info(tp
);
14861 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14862 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14863 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14864 tg3_get_5787_nvram_info(tp
);
14865 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14866 tg3_get_5761_nvram_info(tp
);
14867 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14868 tg3_get_5906_nvram_info(tp
);
14869 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14870 tg3_flag(tp
, 57765_CLASS
))
14871 tg3_get_57780_nvram_info(tp
);
14872 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14873 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14874 tg3_get_5717_nvram_info(tp
);
14875 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14876 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14877 tg3_get_5720_nvram_info(tp
);
14879 tg3_get_nvram_info(tp
);
14881 if (tp
->nvram_size
== 0)
14882 tg3_get_nvram_size(tp
);
14884 tg3_disable_nvram_access(tp
);
14885 tg3_nvram_unlock(tp
);
14888 tg3_flag_clear(tp
, NVRAM
);
14889 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14891 tg3_get_eeprom_size(tp
);
14895 struct subsys_tbl_ent
{
14896 u16 subsys_vendor
, subsys_devid
;
14900 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14901 /* Broadcom boards. */
14902 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14903 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14904 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14906 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14907 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14908 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14909 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14910 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14911 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14912 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14913 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14914 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14915 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14916 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14917 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14918 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14919 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14920 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14921 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14922 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14923 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14926 { TG3PCI_SUBVENDOR_ID_3COM
,
14927 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14928 { TG3PCI_SUBVENDOR_ID_3COM
,
14929 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14930 { TG3PCI_SUBVENDOR_ID_3COM
,
14931 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14932 { TG3PCI_SUBVENDOR_ID_3COM
,
14933 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14934 { TG3PCI_SUBVENDOR_ID_3COM
,
14935 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14938 { TG3PCI_SUBVENDOR_ID_DELL
,
14939 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14940 { TG3PCI_SUBVENDOR_ID_DELL
,
14941 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14942 { TG3PCI_SUBVENDOR_ID_DELL
,
14943 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14944 { TG3PCI_SUBVENDOR_ID_DELL
,
14945 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14947 /* Compaq boards. */
14948 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14949 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14950 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14951 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14952 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14953 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14954 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14955 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14956 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14957 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14960 { TG3PCI_SUBVENDOR_ID_IBM
,
14961 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14964 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14968 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14969 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14970 tp
->pdev
->subsystem_vendor
) &&
14971 (subsys_id_to_phy_id
[i
].subsys_devid
==
14972 tp
->pdev
->subsystem_device
))
14973 return &subsys_id_to_phy_id
[i
];
14978 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14982 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14983 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14985 /* Assume an onboard device and WOL capable by default. */
14986 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14987 tg3_flag_set(tp
, WOL_CAP
);
14989 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14990 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14991 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14992 tg3_flag_set(tp
, IS_NIC
);
14994 val
= tr32(VCPU_CFGSHDW
);
14995 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14996 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14997 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14998 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14999 tg3_flag_set(tp
, WOL_ENABLE
);
15000 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15005 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
15006 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
15007 u32 nic_cfg
, led_cfg
;
15008 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
15009 u32 nic_phy_id
, ver
, eeprom_phy_id
;
15010 int eeprom_phy_serdes
= 0;
15012 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
15013 tp
->nic_sram_data_cfg
= nic_cfg
;
15015 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
15016 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
15017 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15018 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15019 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
15020 (ver
> 0) && (ver
< 0x100))
15021 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
15023 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
15024 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
15026 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15027 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15028 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15029 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
15031 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
15032 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
15033 eeprom_phy_serdes
= 1;
15035 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
15036 if (nic_phy_id
!= 0) {
15037 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
15038 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
15040 eeprom_phy_id
= (id1
>> 16) << 10;
15041 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
15042 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
15046 tp
->phy_id
= eeprom_phy_id
;
15047 if (eeprom_phy_serdes
) {
15048 if (!tg3_flag(tp
, 5705_PLUS
))
15049 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15051 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
15054 if (tg3_flag(tp
, 5750_PLUS
))
15055 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
15056 SHASTA_EXT_LED_MODE_MASK
);
15058 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
15062 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
15063 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15066 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
15067 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15070 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
15071 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
15073 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15074 * read on some older 5700/5701 bootcode.
15076 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15077 tg3_asic_rev(tp
) == ASIC_REV_5701
)
15078 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15082 case SHASTA_EXT_LED_SHARED
:
15083 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15084 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15085 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15086 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15087 LED_CTRL_MODE_PHY_2
);
15089 if (tg3_flag(tp
, 5717_PLUS
) ||
15090 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15091 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15092 LED_CTRL_BLINK_RATE_MASK
;
15096 case SHASTA_EXT_LED_MAC
:
15097 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15100 case SHASTA_EXT_LED_COMBO
:
15101 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15102 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15103 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15104 LED_CTRL_MODE_PHY_2
);
15109 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15110 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15111 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15112 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15114 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15115 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15117 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15118 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15119 if ((tp
->pdev
->subsystem_vendor
==
15120 PCI_VENDOR_ID_ARIMA
) &&
15121 (tp
->pdev
->subsystem_device
== 0x205a ||
15122 tp
->pdev
->subsystem_device
== 0x2063))
15123 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15125 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15126 tg3_flag_set(tp
, IS_NIC
);
15129 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15130 tg3_flag_set(tp
, ENABLE_ASF
);
15131 if (tg3_flag(tp
, 5750_PLUS
))
15132 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15135 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15136 tg3_flag(tp
, 5750_PLUS
))
15137 tg3_flag_set(tp
, ENABLE_APE
);
15139 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15140 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15141 tg3_flag_clear(tp
, WOL_CAP
);
15143 if (tg3_flag(tp
, WOL_CAP
) &&
15144 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15145 tg3_flag_set(tp
, WOL_ENABLE
);
15146 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15149 if (cfg2
& (1 << 17))
15150 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15152 /* serdes signal pre-emphasis in register 0x590 set by */
15153 /* bootcode if bit 18 is set */
15154 if (cfg2
& (1 << 18))
15155 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15157 if ((tg3_flag(tp
, 57765_PLUS
) ||
15158 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15159 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15160 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15161 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15163 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15166 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15167 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15168 !tg3_flag(tp
, 57765_PLUS
) &&
15169 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15170 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15171 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15172 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15173 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15174 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15177 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15178 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15179 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15180 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15181 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15182 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15184 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15185 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15188 if (tg3_flag(tp
, WOL_CAP
))
15189 device_set_wakeup_enable(&tp
->pdev
->dev
,
15190 tg3_flag(tp
, WOL_ENABLE
));
15192 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15195 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15198 u32 val2
, off
= offset
* 8;
15200 err
= tg3_nvram_lock(tp
);
15204 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15205 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15206 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15207 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15210 for (i
= 0; i
< 100; i
++) {
15211 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15212 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15213 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15219 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15221 tg3_nvram_unlock(tp
);
15222 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15228 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15233 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15234 tw32(OTP_CTRL
, cmd
);
15236 /* Wait for up to 1 ms for command to execute. */
15237 for (i
= 0; i
< 100; i
++) {
15238 val
= tr32(OTP_STATUS
);
15239 if (val
& OTP_STATUS_CMD_DONE
)
15244 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15247 /* Read the gphy configuration from the OTP region of the chip. The gphy
15248 * configuration is a 32-bit value that straddles the alignment boundary.
15249 * We do two 32-bit reads and then shift and merge the results.
15251 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15253 u32 bhalf_otp
, thalf_otp
;
15255 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15257 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15260 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15262 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15265 thalf_otp
= tr32(OTP_READ_DATA
);
15267 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15269 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15272 bhalf_otp
= tr32(OTP_READ_DATA
);
15274 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15277 static void tg3_phy_init_link_config(struct tg3
*tp
)
15279 u32 adv
= ADVERTISED_Autoneg
;
15281 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15282 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15283 adv
|= ADVERTISED_1000baseT_Half
;
15284 adv
|= ADVERTISED_1000baseT_Full
;
15287 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15288 adv
|= ADVERTISED_100baseT_Half
|
15289 ADVERTISED_100baseT_Full
|
15290 ADVERTISED_10baseT_Half
|
15291 ADVERTISED_10baseT_Full
|
15294 adv
|= ADVERTISED_FIBRE
;
15296 tp
->link_config
.advertising
= adv
;
15297 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15298 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15299 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15300 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15301 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15306 static int tg3_phy_probe(struct tg3
*tp
)
15308 u32 hw_phy_id_1
, hw_phy_id_2
;
15309 u32 hw_phy_id
, hw_phy_id_masked
;
15312 /* flow control autonegotiation is default behavior */
15313 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15314 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15316 if (tg3_flag(tp
, ENABLE_APE
)) {
15317 switch (tp
->pci_fn
) {
15319 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15322 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15325 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15328 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15333 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15334 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15335 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15336 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15337 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15339 if (tg3_flag(tp
, USE_PHYLIB
))
15340 return tg3_phy_init(tp
);
15342 /* Reading the PHY ID register can conflict with ASF
15343 * firmware access to the PHY hardware.
15346 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15347 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15349 /* Now read the physical PHY_ID from the chip and verify
15350 * that it is sane. If it doesn't look good, we fall back
15351 * to either the hard-coded table based PHY_ID and failing
15352 * that the value found in the eeprom area.
15354 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15355 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15357 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15358 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15359 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15361 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15364 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15365 tp
->phy_id
= hw_phy_id
;
15366 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15367 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15369 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15371 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15372 /* Do nothing, phy ID already set up in
15373 * tg3_get_eeprom_hw_cfg().
15376 struct subsys_tbl_ent
*p
;
15378 /* No eeprom signature? Try the hardcoded
15379 * subsys device table.
15381 p
= tg3_lookup_by_subsys(tp
);
15383 tp
->phy_id
= p
->phy_id
;
15384 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15385 /* For now we saw the IDs 0xbc050cd0,
15386 * 0xbc050f80 and 0xbc050c30 on devices
15387 * connected to an BCM4785 and there are
15388 * probably more. Just assume that the phy is
15389 * supported when it is connected to a SSB core
15396 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15397 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15401 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15402 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15403 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15404 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15405 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15406 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15407 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15408 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15409 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15410 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15412 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15413 SUPPORTED_1000baseT_Full
;
15414 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15415 ADVERTISED_1000baseT_Full
;
15416 tp
->eee
.eee_enabled
= 1;
15417 tp
->eee
.tx_lpi_enabled
= 1;
15418 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15421 tg3_phy_init_link_config(tp
);
15423 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15424 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15425 !tg3_flag(tp
, ENABLE_APE
) &&
15426 !tg3_flag(tp
, ENABLE_ASF
)) {
15429 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15430 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15431 (bmsr
& BMSR_LSTATUS
))
15432 goto skip_phy_reset
;
15434 err
= tg3_phy_reset(tp
);
15438 tg3_phy_set_wirespeed(tp
);
15440 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15441 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15442 tp
->link_config
.flowctrl
);
15444 tg3_writephy(tp
, MII_BMCR
,
15445 BMCR_ANENABLE
| BMCR_ANRESTART
);
15450 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15451 err
= tg3_init_5401phy_dsp(tp
);
15455 err
= tg3_init_5401phy_dsp(tp
);
15461 static void tg3_read_vpd(struct tg3
*tp
)
15464 unsigned int block_end
, rosize
, len
;
15468 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15472 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15474 goto out_not_found
;
15476 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15477 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15478 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15480 if (block_end
> vpdlen
)
15481 goto out_not_found
;
15483 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15484 PCI_VPD_RO_KEYWORD_MFR_ID
);
15486 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15488 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15489 if (j
+ len
> block_end
|| len
!= 4 ||
15490 memcmp(&vpd_data
[j
], "1028", 4))
15493 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15494 PCI_VPD_RO_KEYWORD_VENDOR0
);
15498 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15500 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15501 if (j
+ len
> block_end
)
15504 if (len
>= sizeof(tp
->fw_ver
))
15505 len
= sizeof(tp
->fw_ver
) - 1;
15506 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15507 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15512 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15513 PCI_VPD_RO_KEYWORD_PARTNO
);
15515 goto out_not_found
;
15517 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15519 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15520 if (len
> TG3_BPN_SIZE
||
15521 (len
+ i
) > vpdlen
)
15522 goto out_not_found
;
15524 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15528 if (tp
->board_part_number
[0])
15532 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15533 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15534 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15535 strcpy(tp
->board_part_number
, "BCM5717");
15536 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15537 strcpy(tp
->board_part_number
, "BCM5718");
15540 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15541 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15542 strcpy(tp
->board_part_number
, "BCM57780");
15543 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15544 strcpy(tp
->board_part_number
, "BCM57760");
15545 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15546 strcpy(tp
->board_part_number
, "BCM57790");
15547 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15548 strcpy(tp
->board_part_number
, "BCM57788");
15551 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15552 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15553 strcpy(tp
->board_part_number
, "BCM57761");
15554 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15555 strcpy(tp
->board_part_number
, "BCM57765");
15556 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15557 strcpy(tp
->board_part_number
, "BCM57781");
15558 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15559 strcpy(tp
->board_part_number
, "BCM57785");
15560 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15561 strcpy(tp
->board_part_number
, "BCM57791");
15562 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15563 strcpy(tp
->board_part_number
, "BCM57795");
15566 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15567 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15568 strcpy(tp
->board_part_number
, "BCM57762");
15569 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15570 strcpy(tp
->board_part_number
, "BCM57766");
15571 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15572 strcpy(tp
->board_part_number
, "BCM57782");
15573 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15574 strcpy(tp
->board_part_number
, "BCM57786");
15577 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15578 strcpy(tp
->board_part_number
, "BCM95906");
15581 strcpy(tp
->board_part_number
, "none");
15585 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15589 if (tg3_nvram_read(tp
, offset
, &val
) ||
15590 (val
& 0xfc000000) != 0x0c000000 ||
15591 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15598 static void tg3_read_bc_ver(struct tg3
*tp
)
15600 u32 val
, offset
, start
, ver_offset
;
15602 bool newver
= false;
15604 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15605 tg3_nvram_read(tp
, 0x4, &start
))
15608 offset
= tg3_nvram_logical_addr(tp
, offset
);
15610 if (tg3_nvram_read(tp
, offset
, &val
))
15613 if ((val
& 0xfc000000) == 0x0c000000) {
15614 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15621 dst_off
= strlen(tp
->fw_ver
);
15624 if (TG3_VER_SIZE
- dst_off
< 16 ||
15625 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15628 offset
= offset
+ ver_offset
- start
;
15629 for (i
= 0; i
< 16; i
+= 4) {
15631 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15634 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15639 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15642 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15643 TG3_NVM_BCVER_MAJSFT
;
15644 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15645 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15646 "v%d.%02d", major
, minor
);
15650 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15652 u32 val
, major
, minor
;
15654 /* Use native endian representation */
15655 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15658 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15659 TG3_NVM_HWSB_CFG1_MAJSFT
;
15660 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15661 TG3_NVM_HWSB_CFG1_MINSFT
;
15663 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15666 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15668 u32 offset
, major
, minor
, build
;
15670 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15672 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15675 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15676 case TG3_EEPROM_SB_REVISION_0
:
15677 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15679 case TG3_EEPROM_SB_REVISION_2
:
15680 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15682 case TG3_EEPROM_SB_REVISION_3
:
15683 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15685 case TG3_EEPROM_SB_REVISION_4
:
15686 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15688 case TG3_EEPROM_SB_REVISION_5
:
15689 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15691 case TG3_EEPROM_SB_REVISION_6
:
15692 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15698 if (tg3_nvram_read(tp
, offset
, &val
))
15701 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15702 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15703 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15704 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15705 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15707 if (minor
> 99 || build
> 26)
15710 offset
= strlen(tp
->fw_ver
);
15711 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15712 " v%d.%02d", major
, minor
);
15715 offset
= strlen(tp
->fw_ver
);
15716 if (offset
< TG3_VER_SIZE
- 1)
15717 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15721 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15723 u32 val
, offset
, start
;
15726 for (offset
= TG3_NVM_DIR_START
;
15727 offset
< TG3_NVM_DIR_END
;
15728 offset
+= TG3_NVM_DIRENT_SIZE
) {
15729 if (tg3_nvram_read(tp
, offset
, &val
))
15732 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15736 if (offset
== TG3_NVM_DIR_END
)
15739 if (!tg3_flag(tp
, 5705_PLUS
))
15740 start
= 0x08000000;
15741 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15744 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15745 !tg3_fw_img_is_valid(tp
, offset
) ||
15746 tg3_nvram_read(tp
, offset
+ 8, &val
))
15749 offset
+= val
- start
;
15751 vlen
= strlen(tp
->fw_ver
);
15753 tp
->fw_ver
[vlen
++] = ',';
15754 tp
->fw_ver
[vlen
++] = ' ';
15756 for (i
= 0; i
< 4; i
++) {
15758 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15761 offset
+= sizeof(v
);
15763 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15764 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15768 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15773 static void tg3_probe_ncsi(struct tg3
*tp
)
15777 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15778 if (apedata
!= APE_SEG_SIG_MAGIC
)
15781 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15782 if (!(apedata
& APE_FW_STATUS_READY
))
15785 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15786 tg3_flag_set(tp
, APE_HAS_NCSI
);
15789 static void tg3_read_dash_ver(struct tg3
*tp
)
15795 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15797 if (tg3_flag(tp
, APE_HAS_NCSI
))
15799 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15804 vlen
= strlen(tp
->fw_ver
);
15806 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15808 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15809 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15810 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15811 (apedata
& APE_FW_VERSION_BLDMSK
));
15814 static void tg3_read_otp_ver(struct tg3
*tp
)
15818 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15821 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15822 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15823 TG3_OTP_MAGIC0_VALID(val
)) {
15824 u64 val64
= (u64
) val
<< 32 | val2
;
15828 for (i
= 0; i
< 7; i
++) {
15829 if ((val64
& 0xff) == 0)
15831 ver
= val64
& 0xff;
15834 vlen
= strlen(tp
->fw_ver
);
15835 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15839 static void tg3_read_fw_ver(struct tg3
*tp
)
15842 bool vpd_vers
= false;
15844 if (tp
->fw_ver
[0] != 0)
15847 if (tg3_flag(tp
, NO_NVRAM
)) {
15848 strcat(tp
->fw_ver
, "sb");
15849 tg3_read_otp_ver(tp
);
15853 if (tg3_nvram_read(tp
, 0, &val
))
15856 if (val
== TG3_EEPROM_MAGIC
)
15857 tg3_read_bc_ver(tp
);
15858 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15859 tg3_read_sb_ver(tp
, val
);
15860 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15861 tg3_read_hwsb_ver(tp
);
15863 if (tg3_flag(tp
, ENABLE_ASF
)) {
15864 if (tg3_flag(tp
, ENABLE_APE
)) {
15865 tg3_probe_ncsi(tp
);
15867 tg3_read_dash_ver(tp
);
15868 } else if (!vpd_vers
) {
15869 tg3_read_mgmtfw_ver(tp
);
15873 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15876 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15878 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15879 return TG3_RX_RET_MAX_SIZE_5717
;
15880 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15881 return TG3_RX_RET_MAX_SIZE_5700
;
15883 return TG3_RX_RET_MAX_SIZE_5705
;
15886 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15887 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15888 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15889 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15893 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15895 struct pci_dev
*peer
;
15896 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15898 for (func
= 0; func
< 8; func
++) {
15899 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15900 if (peer
&& peer
!= tp
->pdev
)
15904 /* 5704 can be configured in single-port mode, set peer to
15905 * tp->pdev in that case.
15913 * We don't need to keep the refcount elevated; there's no way
15914 * to remove one half of this device without removing the other
15921 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15923 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15924 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15927 /* All devices that use the alternate
15928 * ASIC REV location have a CPMU.
15930 tg3_flag_set(tp
, CPMU_PRESENT
);
15932 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15933 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15934 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15935 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15936 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15937 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
15938 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
15939 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15940 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15941 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
15942 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
15943 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15944 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15945 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15946 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15947 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15948 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15949 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15950 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15951 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15952 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15953 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15954 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15956 reg
= TG3PCI_PRODID_ASICREV
;
15958 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15961 /* Wrong chip ID in 5752 A0. This code can be removed later
15962 * as A0 is not in production.
15964 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15965 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15967 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15968 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15970 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15971 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15972 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15973 tg3_flag_set(tp
, 5717_PLUS
);
15975 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15976 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15977 tg3_flag_set(tp
, 57765_CLASS
);
15979 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15980 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15981 tg3_flag_set(tp
, 57765_PLUS
);
15983 /* Intentionally exclude ASIC_REV_5906 */
15984 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15985 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15986 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15987 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15988 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15989 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15990 tg3_flag(tp
, 57765_PLUS
))
15991 tg3_flag_set(tp
, 5755_PLUS
);
15993 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15994 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15995 tg3_flag_set(tp
, 5780_CLASS
);
15997 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15998 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15999 tg3_asic_rev(tp
) == ASIC_REV_5906
||
16000 tg3_flag(tp
, 5755_PLUS
) ||
16001 tg3_flag(tp
, 5780_CLASS
))
16002 tg3_flag_set(tp
, 5750_PLUS
);
16004 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16005 tg3_flag(tp
, 5750_PLUS
))
16006 tg3_flag_set(tp
, 5705_PLUS
);
16009 static bool tg3_10_100_only_device(struct tg3
*tp
,
16010 const struct pci_device_id
*ent
)
16012 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
16014 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16015 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
16016 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
16019 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
16020 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
16021 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
16031 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
16034 u32 pci_state_reg
, grc_misc_cfg
;
16039 /* Force memory write invalidate off. If we leave it on,
16040 * then on 5700_BX chips we have to enable a workaround.
16041 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16042 * to match the cacheline size. The Broadcom driver have this
16043 * workaround but turns MWI off all the times so never uses
16044 * it. This seems to suggest that the workaround is insufficient.
16046 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16047 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
16048 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16050 /* Important! -- Make sure register accesses are byteswapped
16051 * correctly. Also, for those chips that require it, make
16052 * sure that indirect register accesses are enabled before
16053 * the first operation.
16055 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16057 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
16058 MISC_HOST_CTRL_CHIPREV
);
16059 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16060 tp
->misc_host_ctrl
);
16062 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
16064 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16065 * we need to disable memory and use config. cycles
16066 * only to access all registers. The 5702/03 chips
16067 * can mistakenly decode the special cycles from the
16068 * ICH chipsets as memory write cycles, causing corruption
16069 * of register and memory space. Only certain ICH bridges
16070 * will drive special cycles with non-zero data during the
16071 * address phase which can fall within the 5703's address
16072 * range. This is not an ICH bug as the PCI spec allows
16073 * non-zero address during special cycles. However, only
16074 * these ICH bridges are known to drive non-zero addresses
16075 * during special cycles.
16077 * Since special cycles do not cross PCI bridges, we only
16078 * enable this workaround if the 5703 is on the secondary
16079 * bus of these ICH bridges.
16081 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
16082 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16083 static struct tg3_dev_id
{
16087 } ich_chipsets
[] = {
16088 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16090 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16092 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16094 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16098 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16099 struct pci_dev
*bridge
= NULL
;
16101 while (pci_id
->vendor
!= 0) {
16102 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16108 if (pci_id
->rev
!= PCI_ANY_ID
) {
16109 if (bridge
->revision
> pci_id
->rev
)
16112 if (bridge
->subordinate
&&
16113 (bridge
->subordinate
->number
==
16114 tp
->pdev
->bus
->number
)) {
16115 tg3_flag_set(tp
, ICH_WORKAROUND
);
16116 pci_dev_put(bridge
);
16122 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16123 static struct tg3_dev_id
{
16126 } bridge_chipsets
[] = {
16127 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16128 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16131 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16132 struct pci_dev
*bridge
= NULL
;
16134 while (pci_id
->vendor
!= 0) {
16135 bridge
= pci_get_device(pci_id
->vendor
,
16142 if (bridge
->subordinate
&&
16143 (bridge
->subordinate
->number
<=
16144 tp
->pdev
->bus
->number
) &&
16145 (bridge
->subordinate
->busn_res
.end
>=
16146 tp
->pdev
->bus
->number
)) {
16147 tg3_flag_set(tp
, 5701_DMA_BUG
);
16148 pci_dev_put(bridge
);
16154 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16155 * DMA addresses > 40-bit. This bridge may have other additional
16156 * 57xx devices behind it in some 4-port NIC designs for example.
16157 * Any tg3 device found behind the bridge will also need the 40-bit
16160 if (tg3_flag(tp
, 5780_CLASS
)) {
16161 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16162 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16164 struct pci_dev
*bridge
= NULL
;
16167 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16168 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16170 if (bridge
&& bridge
->subordinate
&&
16171 (bridge
->subordinate
->number
<=
16172 tp
->pdev
->bus
->number
) &&
16173 (bridge
->subordinate
->busn_res
.end
>=
16174 tp
->pdev
->bus
->number
)) {
16175 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16176 pci_dev_put(bridge
);
16182 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16183 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16184 tp
->pdev_peer
= tg3_find_peer(tp
);
16186 /* Determine TSO capabilities */
16187 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16188 ; /* Do nothing. HW bug. */
16189 else if (tg3_flag(tp
, 57765_PLUS
))
16190 tg3_flag_set(tp
, HW_TSO_3
);
16191 else if (tg3_flag(tp
, 5755_PLUS
) ||
16192 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16193 tg3_flag_set(tp
, HW_TSO_2
);
16194 else if (tg3_flag(tp
, 5750_PLUS
)) {
16195 tg3_flag_set(tp
, HW_TSO_1
);
16196 tg3_flag_set(tp
, TSO_BUG
);
16197 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16198 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16199 tg3_flag_clear(tp
, TSO_BUG
);
16200 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16201 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16202 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16203 tg3_flag_set(tp
, FW_TSO
);
16204 tg3_flag_set(tp
, TSO_BUG
);
16205 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16206 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16208 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16211 /* Selectively allow TSO based on operating conditions */
16212 if (tg3_flag(tp
, HW_TSO_1
) ||
16213 tg3_flag(tp
, HW_TSO_2
) ||
16214 tg3_flag(tp
, HW_TSO_3
) ||
16215 tg3_flag(tp
, FW_TSO
)) {
16216 /* For firmware TSO, assume ASF is disabled.
16217 * We'll disable TSO later if we discover ASF
16218 * is enabled in tg3_get_eeprom_hw_cfg().
16220 tg3_flag_set(tp
, TSO_CAPABLE
);
16222 tg3_flag_clear(tp
, TSO_CAPABLE
);
16223 tg3_flag_clear(tp
, TSO_BUG
);
16224 tp
->fw_needed
= NULL
;
16227 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16228 tp
->fw_needed
= FIRMWARE_TG3
;
16230 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16231 tp
->fw_needed
= FIRMWARE_TG357766
;
16235 if (tg3_flag(tp
, 5750_PLUS
)) {
16236 tg3_flag_set(tp
, SUPPORT_MSI
);
16237 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16238 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16239 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16240 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16241 tp
->pdev_peer
== tp
->pdev
))
16242 tg3_flag_clear(tp
, SUPPORT_MSI
);
16244 if (tg3_flag(tp
, 5755_PLUS
) ||
16245 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16246 tg3_flag_set(tp
, 1SHOT_MSI
);
16249 if (tg3_flag(tp
, 57765_PLUS
)) {
16250 tg3_flag_set(tp
, SUPPORT_MSIX
);
16251 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16257 if (tp
->irq_max
> 1) {
16258 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16259 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16261 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16262 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16263 tp
->txq_max
= tp
->irq_max
- 1;
16266 if (tg3_flag(tp
, 5755_PLUS
) ||
16267 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16268 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16270 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16271 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16273 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16274 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16275 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16276 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16277 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16279 if (tg3_flag(tp
, 57765_PLUS
) &&
16280 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16281 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16283 if (!tg3_flag(tp
, 5705_PLUS
) ||
16284 tg3_flag(tp
, 5780_CLASS
) ||
16285 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16286 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16288 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16291 if (pci_is_pcie(tp
->pdev
)) {
16294 tg3_flag_set(tp
, PCI_EXPRESS
);
16296 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16297 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16298 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16299 tg3_flag_clear(tp
, HW_TSO_2
);
16300 tg3_flag_clear(tp
, TSO_CAPABLE
);
16302 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16303 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16304 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16305 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16306 tg3_flag_set(tp
, CLKREQ_BUG
);
16307 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16308 tg3_flag_set(tp
, L1PLLPD_EN
);
16310 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16311 /* BCM5785 devices are effectively PCIe devices, and should
16312 * follow PCIe codepaths, but do not have a PCIe capabilities
16315 tg3_flag_set(tp
, PCI_EXPRESS
);
16316 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16317 tg3_flag(tp
, 5780_CLASS
)) {
16318 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16319 if (!tp
->pcix_cap
) {
16320 dev_err(&tp
->pdev
->dev
,
16321 "Cannot find PCI-X capability, aborting\n");
16325 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16326 tg3_flag_set(tp
, PCIX_MODE
);
16329 /* If we have an AMD 762 or VIA K8T800 chipset, write
16330 * reordering to the mailbox registers done by the host
16331 * controller can cause major troubles. We read back from
16332 * every mailbox register write to force the writes to be
16333 * posted to the chip in order.
16335 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16336 !tg3_flag(tp
, PCI_EXPRESS
))
16337 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16339 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16340 &tp
->pci_cacheline_sz
);
16341 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16342 &tp
->pci_lat_timer
);
16343 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16344 tp
->pci_lat_timer
< 64) {
16345 tp
->pci_lat_timer
= 64;
16346 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16347 tp
->pci_lat_timer
);
16350 /* Important! -- It is critical that the PCI-X hw workaround
16351 * situation is decided before the first MMIO register access.
16353 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16354 /* 5700 BX chips need to have their TX producer index
16355 * mailboxes written twice to workaround a bug.
16357 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16359 /* If we are in PCI-X mode, enable register write workaround.
16361 * The workaround is to use indirect register accesses
16362 * for all chip writes not to mailbox registers.
16364 if (tg3_flag(tp
, PCIX_MODE
)) {
16367 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16369 /* The chip can have it's power management PCI config
16370 * space registers clobbered due to this bug.
16371 * So explicitly force the chip into D0 here.
16373 pci_read_config_dword(tp
->pdev
,
16374 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16376 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16377 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16378 pci_write_config_dword(tp
->pdev
,
16379 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16382 /* Also, force SERR#/PERR# in PCI command. */
16383 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16384 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16385 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16389 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16390 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16391 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16392 tg3_flag_set(tp
, PCI_32BIT
);
16394 /* Chip-specific fixup from Broadcom driver */
16395 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16396 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16397 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16398 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16401 /* Default fast path register access methods */
16402 tp
->read32
= tg3_read32
;
16403 tp
->write32
= tg3_write32
;
16404 tp
->read32_mbox
= tg3_read32
;
16405 tp
->write32_mbox
= tg3_write32
;
16406 tp
->write32_tx_mbox
= tg3_write32
;
16407 tp
->write32_rx_mbox
= tg3_write32
;
16409 /* Various workaround register access methods */
16410 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16411 tp
->write32
= tg3_write_indirect_reg32
;
16412 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16413 (tg3_flag(tp
, PCI_EXPRESS
) &&
16414 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16416 * Back to back register writes can cause problems on these
16417 * chips, the workaround is to read back all reg writes
16418 * except those to mailbox regs.
16420 * See tg3_write_indirect_reg32().
16422 tp
->write32
= tg3_write_flush_reg32
;
16425 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16426 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16427 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16428 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16431 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16432 tp
->read32
= tg3_read_indirect_reg32
;
16433 tp
->write32
= tg3_write_indirect_reg32
;
16434 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16435 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16436 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16437 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16442 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16443 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16444 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16446 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16447 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16448 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16449 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16450 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16453 if (tp
->write32
== tg3_write_indirect_reg32
||
16454 (tg3_flag(tp
, PCIX_MODE
) &&
16455 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16456 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16457 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16459 /* The memory arbiter has to be enabled in order for SRAM accesses
16460 * to succeed. Normally on powerup the tg3 chip firmware will make
16461 * sure it is enabled, but other entities such as system netboot
16462 * code might disable it.
16464 val
= tr32(MEMARB_MODE
);
16465 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16467 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16468 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16469 tg3_flag(tp
, 5780_CLASS
)) {
16470 if (tg3_flag(tp
, PCIX_MODE
)) {
16471 pci_read_config_dword(tp
->pdev
,
16472 tp
->pcix_cap
+ PCI_X_STATUS
,
16474 tp
->pci_fn
= val
& 0x7;
16476 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16477 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16478 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16479 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16480 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16481 val
= tr32(TG3_CPMU_STATUS
);
16483 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16484 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16486 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16487 TG3_CPMU_STATUS_FSHFT_5719
;
16490 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16491 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16492 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16495 /* Get eeprom hw config before calling tg3_set_power_state().
16496 * In particular, the TG3_FLAG_IS_NIC flag must be
16497 * determined before calling tg3_set_power_state() so that
16498 * we know whether or not to switch out of Vaux power.
16499 * When the flag is set, it means that GPIO1 is used for eeprom
16500 * write protect and also implies that it is a LOM where GPIOs
16501 * are not used to switch power.
16503 tg3_get_eeprom_hw_cfg(tp
);
16505 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16506 tg3_flag_clear(tp
, TSO_CAPABLE
);
16507 tg3_flag_clear(tp
, TSO_BUG
);
16508 tp
->fw_needed
= NULL
;
16511 if (tg3_flag(tp
, ENABLE_APE
)) {
16512 /* Allow reads and writes to the
16513 * APE register and memory space.
16515 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16516 PCISTATE_ALLOW_APE_SHMEM_WR
|
16517 PCISTATE_ALLOW_APE_PSPACE_WR
;
16518 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16521 tg3_ape_lock_init(tp
);
16524 /* Set up tp->grc_local_ctrl before calling
16525 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16526 * will bring 5700's external PHY out of reset.
16527 * It is also used as eeprom write protect on LOMs.
16529 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16530 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16531 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16532 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16533 GRC_LCLCTRL_GPIO_OUTPUT1
);
16534 /* Unused GPIO3 must be driven as output on 5752 because there
16535 * are no pull-up resistors on unused GPIO pins.
16537 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16538 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16540 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16541 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16542 tg3_flag(tp
, 57765_CLASS
))
16543 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16545 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16546 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16547 /* Turn off the debug UART. */
16548 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16549 if (tg3_flag(tp
, IS_NIC
))
16550 /* Keep VMain power. */
16551 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16552 GRC_LCLCTRL_GPIO_OUTPUT0
;
16555 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16556 tp
->grc_local_ctrl
|=
16557 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16559 /* Switch out of Vaux if it is a NIC */
16560 tg3_pwrsrc_switch_to_vmain(tp
);
16562 /* Derive initial jumbo mode from MTU assigned in
16563 * ether_setup() via the alloc_etherdev() call
16565 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16566 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16568 /* Determine WakeOnLan speed to use. */
16569 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16570 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16571 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16572 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16573 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16575 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16578 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16579 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16581 /* A few boards don't want Ethernet@WireSpeed phy feature */
16582 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16583 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16584 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16585 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16586 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16587 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16588 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16590 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16591 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16592 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16593 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16594 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16596 if (tg3_flag(tp
, 5705_PLUS
) &&
16597 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16598 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16599 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16600 !tg3_flag(tp
, 57765_PLUS
)) {
16601 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16602 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16603 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16604 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16605 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16606 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16607 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16608 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16609 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16611 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16614 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16615 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16616 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16617 if (tp
->phy_otp
== 0)
16618 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16621 if (tg3_flag(tp
, CPMU_PRESENT
))
16622 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16624 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16626 tp
->coalesce_mode
= 0;
16627 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16628 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16629 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16631 /* Set these bits to enable statistics workaround. */
16632 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16633 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16634 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16635 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16636 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16637 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16640 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16641 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16642 tg3_flag_set(tp
, USE_PHYLIB
);
16644 err
= tg3_mdio_init(tp
);
16648 /* Initialize data/descriptor byte/word swapping. */
16649 val
= tr32(GRC_MODE
);
16650 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16651 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16652 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16653 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16654 GRC_MODE_B2HRX_ENABLE
|
16655 GRC_MODE_HTX2B_ENABLE
|
16656 GRC_MODE_HOST_STACKUP
);
16658 val
&= GRC_MODE_HOST_STACKUP
;
16660 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16662 tg3_switch_clocks(tp
);
16664 /* Clear this out for sanity. */
16665 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16667 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16668 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16670 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16672 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16673 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16674 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16675 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16676 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16677 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16678 void __iomem
*sram_base
;
16680 /* Write some dummy words into the SRAM status block
16681 * area, see if it reads back correctly. If the return
16682 * value is bad, force enable the PCIX workaround.
16684 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16686 writel(0x00000000, sram_base
);
16687 writel(0x00000000, sram_base
+ 4);
16688 writel(0xffffffff, sram_base
+ 4);
16689 if (readl(sram_base
) != 0x00000000)
16690 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16695 tg3_nvram_init(tp
);
16697 /* If the device has an NVRAM, no need to load patch firmware */
16698 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16699 !tg3_flag(tp
, NO_NVRAM
))
16700 tp
->fw_needed
= NULL
;
16702 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16703 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16705 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16706 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16707 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16708 tg3_flag_set(tp
, IS_5788
);
16710 if (!tg3_flag(tp
, IS_5788
) &&
16711 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16712 tg3_flag_set(tp
, TAGGED_STATUS
);
16713 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16714 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16715 HOSTCC_MODE_CLRTICK_TXBD
);
16717 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16718 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16719 tp
->misc_host_ctrl
);
16722 /* Preserve the APE MAC_MODE bits */
16723 if (tg3_flag(tp
, ENABLE_APE
))
16724 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16728 if (tg3_10_100_only_device(tp
, ent
))
16729 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16731 err
= tg3_phy_probe(tp
);
16733 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16734 /* ... but do not return immediately ... */
16739 tg3_read_fw_ver(tp
);
16741 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16742 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16744 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16745 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16747 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16750 /* 5700 {AX,BX} chips have a broken status block link
16751 * change bit implementation, so we must use the
16752 * status register in those cases.
16754 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16755 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16757 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16759 /* The led_ctrl is set during tg3_phy_probe, here we might
16760 * have to force the link status polling mechanism based
16761 * upon subsystem IDs.
16763 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16764 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16765 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16766 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16767 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16770 /* For all SERDES we poll the MAC status register. */
16771 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16772 tg3_flag_set(tp
, POLL_SERDES
);
16774 tg3_flag_clear(tp
, POLL_SERDES
);
16776 if (tg3_flag(tp
, ENABLE_APE
) && tg3_flag(tp
, ENABLE_ASF
))
16777 tg3_flag_set(tp
, POLL_CPMU_LINK
);
16779 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16780 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16781 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16782 tg3_flag(tp
, PCIX_MODE
)) {
16783 tp
->rx_offset
= NET_SKB_PAD
;
16784 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16785 tp
->rx_copy_thresh
= ~(u16
)0;
16789 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16790 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16791 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16793 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16795 /* Increment the rx prod index on the rx std ring by at most
16796 * 8 for these chips to workaround hw errata.
16798 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16799 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16800 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16801 tp
->rx_std_max_post
= 8;
16803 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16804 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16805 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16810 #ifdef CONFIG_SPARC
16811 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16813 struct net_device
*dev
= tp
->dev
;
16814 struct pci_dev
*pdev
= tp
->pdev
;
16815 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16816 const unsigned char *addr
;
16819 addr
= of_get_property(dp
, "local-mac-address", &len
);
16820 if (addr
&& len
== ETH_ALEN
) {
16821 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
16827 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16829 struct net_device
*dev
= tp
->dev
;
16831 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, ETH_ALEN
);
16836 static int tg3_get_device_address(struct tg3
*tp
)
16838 struct net_device
*dev
= tp
->dev
;
16839 u32 hi
, lo
, mac_offset
;
16843 #ifdef CONFIG_SPARC
16844 if (!tg3_get_macaddr_sparc(tp
))
16848 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16849 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16850 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16855 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16856 tg3_flag(tp
, 5780_CLASS
)) {
16857 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16859 if (tg3_nvram_lock(tp
))
16860 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16862 tg3_nvram_unlock(tp
);
16863 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16864 if (tp
->pci_fn
& 1)
16866 if (tp
->pci_fn
> 1)
16867 mac_offset
+= 0x18c;
16868 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16871 /* First try to get it from MAC address mailbox. */
16872 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16873 if ((hi
>> 16) == 0x484b) {
16874 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16875 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16877 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16878 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16879 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16880 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16881 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16883 /* Some old bootcode may report a 0 MAC address in SRAM */
16884 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16887 /* Next, try NVRAM. */
16888 if (!tg3_flag(tp
, NO_NVRAM
) &&
16889 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16890 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16891 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16892 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16894 /* Finally just fetch it out of the MAC control regs. */
16896 hi
= tr32(MAC_ADDR_0_HIGH
);
16897 lo
= tr32(MAC_ADDR_0_LOW
);
16899 dev
->dev_addr
[5] = lo
& 0xff;
16900 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16901 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16902 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16903 dev
->dev_addr
[1] = hi
& 0xff;
16904 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16908 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16909 #ifdef CONFIG_SPARC
16910 if (!tg3_get_default_macaddr_sparc(tp
))
16918 #define BOUNDARY_SINGLE_CACHELINE 1
16919 #define BOUNDARY_MULTI_CACHELINE 2
16921 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16923 int cacheline_size
;
16927 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16929 cacheline_size
= 1024;
16931 cacheline_size
= (int) byte
* 4;
16933 /* On 5703 and later chips, the boundary bits have no
16936 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16937 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16938 !tg3_flag(tp
, PCI_EXPRESS
))
16941 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16942 goal
= BOUNDARY_MULTI_CACHELINE
;
16944 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16945 goal
= BOUNDARY_SINGLE_CACHELINE
;
16951 if (tg3_flag(tp
, 57765_PLUS
)) {
16952 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16959 /* PCI controllers on most RISC systems tend to disconnect
16960 * when a device tries to burst across a cache-line boundary.
16961 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16963 * Unfortunately, for PCI-E there are only limited
16964 * write-side controls for this, and thus for reads
16965 * we will still get the disconnects. We'll also waste
16966 * these PCI cycles for both read and write for chips
16967 * other than 5700 and 5701 which do not implement the
16970 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16971 switch (cacheline_size
) {
16976 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16977 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16978 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16980 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16981 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16986 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16987 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16991 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16992 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16995 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16996 switch (cacheline_size
) {
17000 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17001 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17002 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
17008 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
17009 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
17013 switch (cacheline_size
) {
17015 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17016 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
17017 DMA_RWCTRL_WRITE_BNDRY_16
);
17022 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17023 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
17024 DMA_RWCTRL_WRITE_BNDRY_32
);
17029 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17030 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
17031 DMA_RWCTRL_WRITE_BNDRY_64
);
17036 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
17037 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
17038 DMA_RWCTRL_WRITE_BNDRY_128
);
17043 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
17044 DMA_RWCTRL_WRITE_BNDRY_256
);
17047 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
17048 DMA_RWCTRL_WRITE_BNDRY_512
);
17052 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
17053 DMA_RWCTRL_WRITE_BNDRY_1024
);
17062 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
17063 int size
, bool to_device
)
17065 struct tg3_internal_buffer_desc test_desc
;
17066 u32 sram_dma_descs
;
17069 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
17071 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
17072 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
17073 tw32(RDMAC_STATUS
, 0);
17074 tw32(WDMAC_STATUS
, 0);
17076 tw32(BUFMGR_MODE
, 0);
17077 tw32(FTQ_RESET
, 0);
17079 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
17080 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
17081 test_desc
.nic_mbuf
= 0x00002100;
17082 test_desc
.len
= size
;
17085 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17086 * the *second* time the tg3 driver was getting loaded after an
17089 * Broadcom tells me:
17090 * ...the DMA engine is connected to the GRC block and a DMA
17091 * reset may affect the GRC block in some unpredictable way...
17092 * The behavior of resets to individual blocks has not been tested.
17094 * Broadcom noted the GRC reset will also reset all sub-components.
17097 test_desc
.cqid_sqid
= (13 << 8) | 2;
17099 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17102 test_desc
.cqid_sqid
= (16 << 8) | 7;
17104 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17107 test_desc
.flags
= 0x00000005;
17109 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17112 val
= *(((u32
*)&test_desc
) + i
);
17113 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17114 sram_dma_descs
+ (i
* sizeof(u32
)));
17115 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17117 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17120 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17122 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17125 for (i
= 0; i
< 40; i
++) {
17129 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17131 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17132 if ((val
& 0xffff) == sram_dma_descs
) {
17143 #define TEST_BUFFER_SIZE 0x2000
17145 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
17146 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17150 static int tg3_test_dma(struct tg3
*tp
)
17152 dma_addr_t buf_dma
;
17153 u32
*buf
, saved_dma_rwctrl
;
17156 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17157 &buf_dma
, GFP_KERNEL
);
17163 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17164 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17166 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17168 if (tg3_flag(tp
, 57765_PLUS
))
17171 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17172 /* DMA read watermark not used on PCIE */
17173 tp
->dma_rwctrl
|= 0x00180000;
17174 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17175 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17176 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17177 tp
->dma_rwctrl
|= 0x003f0000;
17179 tp
->dma_rwctrl
|= 0x003f000f;
17181 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17182 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17183 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17184 u32 read_water
= 0x7;
17186 /* If the 5704 is behind the EPB bridge, we can
17187 * do the less restrictive ONE_DMA workaround for
17188 * better performance.
17190 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17191 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17192 tp
->dma_rwctrl
|= 0x8000;
17193 else if (ccval
== 0x6 || ccval
== 0x7)
17194 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17196 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17198 /* Set bit 23 to enable PCIX hw bug fix */
17200 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17201 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17203 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17204 /* 5780 always in PCIX mode */
17205 tp
->dma_rwctrl
|= 0x00144000;
17206 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17207 /* 5714 always in PCIX mode */
17208 tp
->dma_rwctrl
|= 0x00148000;
17210 tp
->dma_rwctrl
|= 0x001b000f;
17213 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17214 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17216 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17217 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17218 tp
->dma_rwctrl
&= 0xfffffff0;
17220 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17221 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17222 /* Remove this if it causes problems for some boards. */
17223 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17225 /* On 5700/5701 chips, we need to set this bit.
17226 * Otherwise the chip will issue cacheline transactions
17227 * to streamable DMA memory with not all the byte
17228 * enables turned on. This is an error on several
17229 * RISC PCI controllers, in particular sparc64.
17231 * On 5703/5704 chips, this bit has been reassigned
17232 * a different meaning. In particular, it is used
17233 * on those chips to enable a PCI-X workaround.
17235 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17238 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17241 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17242 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17245 /* It is best to perform DMA test with maximum write burst size
17246 * to expose the 5700/5701 write DMA bug.
17248 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17249 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17250 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17255 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17258 /* Send the buffer to the chip. */
17259 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17261 dev_err(&tp
->pdev
->dev
,
17262 "%s: Buffer write failed. err = %d\n",
17267 /* Now read it back. */
17268 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17270 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17271 "err = %d\n", __func__
, ret
);
17276 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17280 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17281 DMA_RWCTRL_WRITE_BNDRY_16
) {
17282 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17283 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17284 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17287 dev_err(&tp
->pdev
->dev
,
17288 "%s: Buffer corrupted on read back! "
17289 "(%d != %d)\n", __func__
, p
[i
], i
);
17295 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17301 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17302 DMA_RWCTRL_WRITE_BNDRY_16
) {
17303 /* DMA test passed without adjusting DMA boundary,
17304 * now look for chipsets that are known to expose the
17305 * DMA bug without failing the test.
17307 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17308 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17309 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17311 /* Safe to use the calculated DMA boundary. */
17312 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17315 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17319 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17324 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17326 if (tg3_flag(tp
, 57765_PLUS
)) {
17327 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17328 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17329 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17330 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17331 tp
->bufmgr_config
.mbuf_high_water
=
17332 DEFAULT_MB_HIGH_WATER_57765
;
17334 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17335 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17336 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17337 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17338 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17339 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17340 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17341 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17342 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17343 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17344 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17345 tp
->bufmgr_config
.mbuf_high_water
=
17346 DEFAULT_MB_HIGH_WATER_5705
;
17347 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17348 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17349 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17350 tp
->bufmgr_config
.mbuf_high_water
=
17351 DEFAULT_MB_HIGH_WATER_5906
;
17354 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17355 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17356 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17357 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17358 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17359 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17361 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17362 DEFAULT_MB_RDMA_LOW_WATER
;
17363 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17364 DEFAULT_MB_MACRX_LOW_WATER
;
17365 tp
->bufmgr_config
.mbuf_high_water
=
17366 DEFAULT_MB_HIGH_WATER
;
17368 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17369 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17370 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17371 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17372 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17373 DEFAULT_MB_HIGH_WATER_JUMBO
;
17376 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17377 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17380 static char *tg3_phy_string(struct tg3
*tp
)
17382 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17383 case TG3_PHY_ID_BCM5400
: return "5400";
17384 case TG3_PHY_ID_BCM5401
: return "5401";
17385 case TG3_PHY_ID_BCM5411
: return "5411";
17386 case TG3_PHY_ID_BCM5701
: return "5701";
17387 case TG3_PHY_ID_BCM5703
: return "5703";
17388 case TG3_PHY_ID_BCM5704
: return "5704";
17389 case TG3_PHY_ID_BCM5705
: return "5705";
17390 case TG3_PHY_ID_BCM5750
: return "5750";
17391 case TG3_PHY_ID_BCM5752
: return "5752";
17392 case TG3_PHY_ID_BCM5714
: return "5714";
17393 case TG3_PHY_ID_BCM5780
: return "5780";
17394 case TG3_PHY_ID_BCM5755
: return "5755";
17395 case TG3_PHY_ID_BCM5787
: return "5787";
17396 case TG3_PHY_ID_BCM5784
: return "5784";
17397 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17398 case TG3_PHY_ID_BCM5906
: return "5906";
17399 case TG3_PHY_ID_BCM5761
: return "5761";
17400 case TG3_PHY_ID_BCM5718C
: return "5718C";
17401 case TG3_PHY_ID_BCM5718S
: return "5718S";
17402 case TG3_PHY_ID_BCM57765
: return "57765";
17403 case TG3_PHY_ID_BCM5719C
: return "5719C";
17404 case TG3_PHY_ID_BCM5720C
: return "5720C";
17405 case TG3_PHY_ID_BCM5762
: return "5762C";
17406 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17407 case 0: return "serdes";
17408 default: return "unknown";
17412 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17414 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17415 strcpy(str
, "PCI Express");
17417 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17418 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17420 strcpy(str
, "PCIX:");
17422 if ((clock_ctrl
== 7) ||
17423 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17424 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17425 strcat(str
, "133MHz");
17426 else if (clock_ctrl
== 0)
17427 strcat(str
, "33MHz");
17428 else if (clock_ctrl
== 2)
17429 strcat(str
, "50MHz");
17430 else if (clock_ctrl
== 4)
17431 strcat(str
, "66MHz");
17432 else if (clock_ctrl
== 6)
17433 strcat(str
, "100MHz");
17435 strcpy(str
, "PCI:");
17436 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17437 strcat(str
, "66MHz");
17439 strcat(str
, "33MHz");
17441 if (tg3_flag(tp
, PCI_32BIT
))
17442 strcat(str
, ":32-bit");
17444 strcat(str
, ":64-bit");
17448 static void tg3_init_coal(struct tg3
*tp
)
17450 struct ethtool_coalesce
*ec
= &tp
->coal
;
17452 memset(ec
, 0, sizeof(*ec
));
17453 ec
->cmd
= ETHTOOL_GCOALESCE
;
17454 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17455 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17456 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17457 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17458 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17459 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17460 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17461 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17462 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17464 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17465 HOSTCC_MODE_CLRTICK_TXBD
)) {
17466 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17467 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17468 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17469 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17472 if (tg3_flag(tp
, 5705_PLUS
)) {
17473 ec
->rx_coalesce_usecs_irq
= 0;
17474 ec
->tx_coalesce_usecs_irq
= 0;
17475 ec
->stats_block_coalesce_usecs
= 0;
17479 static int tg3_init_one(struct pci_dev
*pdev
,
17480 const struct pci_device_id
*ent
)
17482 struct net_device
*dev
;
17485 u32 sndmbx
, rcvmbx
, intmbx
;
17487 u64 dma_mask
, persist_dma_mask
;
17488 netdev_features_t features
= 0;
17490 printk_once(KERN_INFO
"%s\n", version
);
17492 err
= pci_enable_device(pdev
);
17494 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17498 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17500 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17501 goto err_out_disable_pdev
;
17504 pci_set_master(pdev
);
17506 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17509 goto err_out_free_res
;
17512 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17514 tp
= netdev_priv(dev
);
17517 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17518 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17522 tp
->msg_enable
= tg3_debug
;
17524 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17526 if (pdev_is_ssb_gige_core(pdev
)) {
17527 tg3_flag_set(tp
, IS_SSB_CORE
);
17528 if (ssb_gige_must_flush_posted_writes(pdev
))
17529 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17530 if (ssb_gige_one_dma_at_once(pdev
))
17531 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17532 if (ssb_gige_have_roboswitch(pdev
)) {
17533 tg3_flag_set(tp
, USE_PHYLIB
);
17534 tg3_flag_set(tp
, ROBOSWITCH
);
17536 if (ssb_gige_is_rgmii(pdev
))
17537 tg3_flag_set(tp
, RGMII_MODE
);
17540 /* The word/byte swap controls here control register access byte
17541 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17544 tp
->misc_host_ctrl
=
17545 MISC_HOST_CTRL_MASK_PCI_INT
|
17546 MISC_HOST_CTRL_WORD_SWAP
|
17547 MISC_HOST_CTRL_INDIR_ACCESS
|
17548 MISC_HOST_CTRL_PCISTATE_RW
;
17550 /* The NONFRM (non-frame) byte/word swap controls take effect
17551 * on descriptor entries, anything which isn't packet data.
17553 * The StrongARM chips on the board (one for tx, one for rx)
17554 * are running in big-endian mode.
17556 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17557 GRC_MODE_WSWAP_NONFRM_DATA
);
17558 #ifdef __BIG_ENDIAN
17559 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17561 spin_lock_init(&tp
->lock
);
17562 spin_lock_init(&tp
->indirect_lock
);
17563 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17565 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17567 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17569 goto err_out_free_dev
;
17572 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17573 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17574 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17575 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17576 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17577 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17578 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17579 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17580 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17581 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17582 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17583 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17584 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17585 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17586 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17587 tg3_flag_set(tp
, ENABLE_APE
);
17588 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17589 if (!tp
->aperegs
) {
17590 dev_err(&pdev
->dev
,
17591 "Cannot map APE registers, aborting\n");
17593 goto err_out_iounmap
;
17597 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17598 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17600 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17601 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17602 dev
->netdev_ops
= &tg3_netdev_ops
;
17603 dev
->irq
= pdev
->irq
;
17605 err
= tg3_get_invariants(tp
, ent
);
17607 dev_err(&pdev
->dev
,
17608 "Problem fetching invariants of chip, aborting\n");
17609 goto err_out_apeunmap
;
17612 /* The EPB bridge inside 5714, 5715, and 5780 and any
17613 * device behind the EPB cannot support DMA addresses > 40-bit.
17614 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17615 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17616 * do DMA address check in tg3_start_xmit().
17618 if (tg3_flag(tp
, IS_5788
))
17619 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17620 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17621 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17622 #ifdef CONFIG_HIGHMEM
17623 dma_mask
= DMA_BIT_MASK(64);
17626 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17628 /* Configure DMA attributes. */
17629 if (dma_mask
> DMA_BIT_MASK(32)) {
17630 err
= pci_set_dma_mask(pdev
, dma_mask
);
17632 features
|= NETIF_F_HIGHDMA
;
17633 err
= pci_set_consistent_dma_mask(pdev
,
17636 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17637 "DMA for consistent allocations\n");
17638 goto err_out_apeunmap
;
17642 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17643 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17645 dev_err(&pdev
->dev
,
17646 "No usable DMA configuration, aborting\n");
17647 goto err_out_apeunmap
;
17651 tg3_init_bufmgr_config(tp
);
17653 features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
17655 /* 5700 B0 chips do not support checksumming correctly due
17656 * to hardware bugs.
17658 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17659 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17661 if (tg3_flag(tp
, 5755_PLUS
))
17662 features
|= NETIF_F_IPV6_CSUM
;
17665 /* TSO is on by default on chips that support hardware TSO.
17666 * Firmware TSO on older chips gives lower performance, so it
17667 * is off by default, but can be enabled using ethtool.
17669 if ((tg3_flag(tp
, HW_TSO_1
) ||
17670 tg3_flag(tp
, HW_TSO_2
) ||
17671 tg3_flag(tp
, HW_TSO_3
)) &&
17672 (features
& NETIF_F_IP_CSUM
))
17673 features
|= NETIF_F_TSO
;
17674 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17675 if (features
& NETIF_F_IPV6_CSUM
)
17676 features
|= NETIF_F_TSO6
;
17677 if (tg3_flag(tp
, HW_TSO_3
) ||
17678 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17679 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17680 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17681 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17682 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17683 features
|= NETIF_F_TSO_ECN
;
17686 dev
->features
|= features
;
17687 dev
->vlan_features
|= features
;
17690 * Add loopback capability only for a subset of devices that support
17691 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17692 * loopback for the remaining devices.
17694 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17695 !tg3_flag(tp
, CPMU_PRESENT
))
17696 /* Add the loopback capability */
17697 features
|= NETIF_F_LOOPBACK
;
17699 dev
->hw_features
|= features
;
17700 dev
->priv_flags
|= IFF_UNICAST_FLT
;
17702 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17703 !tg3_flag(tp
, TSO_CAPABLE
) &&
17704 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17705 tg3_flag_set(tp
, MAX_RXPEND_64
);
17706 tp
->rx_pending
= 63;
17709 err
= tg3_get_device_address(tp
);
17711 dev_err(&pdev
->dev
,
17712 "Could not obtain valid ethernet address, aborting\n");
17713 goto err_out_apeunmap
;
17717 * Reset chip in case UNDI or EFI driver did not shutdown
17718 * DMA self test will enable WDMAC and we'll see (spurious)
17719 * pending DMA on the PCI bus at that point.
17721 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17722 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17723 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17724 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17727 err
= tg3_test_dma(tp
);
17729 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17730 goto err_out_apeunmap
;
17733 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17734 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17735 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17736 for (i
= 0; i
< tp
->irq_max
; i
++) {
17737 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17740 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17742 tnapi
->int_mbox
= intmbx
;
17748 tnapi
->consmbox
= rcvmbx
;
17749 tnapi
->prodmbox
= sndmbx
;
17752 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17754 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17756 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17760 * If we support MSIX, we'll be using RSS. If we're using
17761 * RSS, the first vector only handles link interrupts and the
17762 * remaining vectors handle rx and tx interrupts. Reuse the
17763 * mailbox values for the next iteration. The values we setup
17764 * above are still useful for the single vectored mode.
17779 pci_set_drvdata(pdev
, dev
);
17781 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17782 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17783 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17784 tg3_flag_set(tp
, PTP_CAPABLE
);
17786 tg3_timer_init(tp
);
17788 tg3_carrier_off(tp
);
17790 err
= register_netdev(dev
);
17792 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17793 goto err_out_apeunmap
;
17796 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17797 tp
->board_part_number
,
17798 tg3_chip_rev_id(tp
),
17799 tg3_bus_string(tp
, str
),
17802 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17803 struct phy_device
*phydev
;
17804 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
17806 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17807 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17811 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17812 ethtype
= "10/100Base-TX";
17813 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17814 ethtype
= "1000Base-SX";
17816 ethtype
= "10/100/1000Base-T";
17818 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17819 "(WireSpeed[%d], EEE[%d])\n",
17820 tg3_phy_string(tp
), ethtype
,
17821 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17822 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17825 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17826 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17827 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17828 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17829 tg3_flag(tp
, ENABLE_ASF
) != 0,
17830 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17831 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17833 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17834 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17836 pci_save_state(pdev
);
17842 iounmap(tp
->aperegs
);
17843 tp
->aperegs
= NULL
;
17856 pci_release_regions(pdev
);
17858 err_out_disable_pdev
:
17859 if (pci_is_enabled(pdev
))
17860 pci_disable_device(pdev
);
17864 static void tg3_remove_one(struct pci_dev
*pdev
)
17866 struct net_device
*dev
= pci_get_drvdata(pdev
);
17869 struct tg3
*tp
= netdev_priv(dev
);
17871 release_firmware(tp
->fw
);
17873 tg3_reset_task_cancel(tp
);
17875 if (tg3_flag(tp
, USE_PHYLIB
)) {
17880 unregister_netdev(dev
);
17882 iounmap(tp
->aperegs
);
17883 tp
->aperegs
= NULL
;
17890 pci_release_regions(pdev
);
17891 pci_disable_device(pdev
);
17895 #ifdef CONFIG_PM_SLEEP
17896 static int tg3_suspend(struct device
*device
)
17898 struct pci_dev
*pdev
= to_pci_dev(device
);
17899 struct net_device
*dev
= pci_get_drvdata(pdev
);
17900 struct tg3
*tp
= netdev_priv(dev
);
17905 if (!netif_running(dev
))
17908 tg3_reset_task_cancel(tp
);
17910 tg3_netif_stop(tp
);
17912 tg3_timer_stop(tp
);
17914 tg3_full_lock(tp
, 1);
17915 tg3_disable_ints(tp
);
17916 tg3_full_unlock(tp
);
17918 netif_device_detach(dev
);
17920 tg3_full_lock(tp
, 0);
17921 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17922 tg3_flag_clear(tp
, INIT_COMPLETE
);
17923 tg3_full_unlock(tp
);
17925 err
= tg3_power_down_prepare(tp
);
17929 tg3_full_lock(tp
, 0);
17931 tg3_flag_set(tp
, INIT_COMPLETE
);
17932 err2
= tg3_restart_hw(tp
, true);
17936 tg3_timer_start(tp
);
17938 netif_device_attach(dev
);
17939 tg3_netif_start(tp
);
17942 tg3_full_unlock(tp
);
17953 static int tg3_resume(struct device
*device
)
17955 struct pci_dev
*pdev
= to_pci_dev(device
);
17956 struct net_device
*dev
= pci_get_drvdata(pdev
);
17957 struct tg3
*tp
= netdev_priv(dev
);
17962 if (!netif_running(dev
))
17965 netif_device_attach(dev
);
17967 tg3_full_lock(tp
, 0);
17969 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
17971 tg3_flag_set(tp
, INIT_COMPLETE
);
17972 err
= tg3_restart_hw(tp
,
17973 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
17977 tg3_timer_start(tp
);
17979 tg3_netif_start(tp
);
17982 tg3_full_unlock(tp
);
17991 #endif /* CONFIG_PM_SLEEP */
17993 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17995 static void tg3_shutdown(struct pci_dev
*pdev
)
17997 struct net_device
*dev
= pci_get_drvdata(pdev
);
17998 struct tg3
*tp
= netdev_priv(dev
);
18001 netif_device_detach(dev
);
18003 if (netif_running(dev
))
18006 if (system_state
== SYSTEM_POWER_OFF
)
18007 tg3_power_down(tp
);
18013 * tg3_io_error_detected - called when PCI error is detected
18014 * @pdev: Pointer to PCI device
18015 * @state: The current pci connection state
18017 * This function is called after a PCI bus error affecting
18018 * this device has been detected.
18020 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
18021 pci_channel_state_t state
)
18023 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18024 struct tg3
*tp
= netdev_priv(netdev
);
18025 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
18027 netdev_info(netdev
, "PCI I/O error detected\n");
18031 /* We probably don't have netdev yet */
18032 if (!netdev
|| !netif_running(netdev
))
18037 tg3_netif_stop(tp
);
18039 tg3_timer_stop(tp
);
18041 /* Want to make sure that the reset task doesn't run */
18042 tg3_reset_task_cancel(tp
);
18044 netif_device_detach(netdev
);
18046 /* Clean up software state, even if MMIO is blocked */
18047 tg3_full_lock(tp
, 0);
18048 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
18049 tg3_full_unlock(tp
);
18052 if (state
== pci_channel_io_perm_failure
) {
18054 tg3_napi_enable(tp
);
18057 err
= PCI_ERS_RESULT_DISCONNECT
;
18059 pci_disable_device(pdev
);
18068 * tg3_io_slot_reset - called after the pci bus has been reset.
18069 * @pdev: Pointer to PCI device
18071 * Restart the card from scratch, as if from a cold-boot.
18072 * At this point, the card has exprienced a hard reset,
18073 * followed by fixups by BIOS, and has its config space
18074 * set up identically to what it was at cold boot.
18076 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
18078 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18079 struct tg3
*tp
= netdev_priv(netdev
);
18080 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
18085 if (pci_enable_device(pdev
)) {
18086 dev_err(&pdev
->dev
,
18087 "Cannot re-enable PCI device after reset.\n");
18091 pci_set_master(pdev
);
18092 pci_restore_state(pdev
);
18093 pci_save_state(pdev
);
18095 if (!netdev
|| !netif_running(netdev
)) {
18096 rc
= PCI_ERS_RESULT_RECOVERED
;
18100 err
= tg3_power_up(tp
);
18104 rc
= PCI_ERS_RESULT_RECOVERED
;
18107 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18108 tg3_napi_enable(tp
);
18117 * tg3_io_resume - called when traffic can start flowing again.
18118 * @pdev: Pointer to PCI device
18120 * This callback is called when the error recovery driver tells
18121 * us that its OK to resume normal operation.
18123 static void tg3_io_resume(struct pci_dev
*pdev
)
18125 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18126 struct tg3
*tp
= netdev_priv(netdev
);
18131 if (!netif_running(netdev
))
18134 tg3_full_lock(tp
, 0);
18135 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18136 tg3_flag_set(tp
, INIT_COMPLETE
);
18137 err
= tg3_restart_hw(tp
, true);
18139 tg3_full_unlock(tp
);
18140 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18144 netif_device_attach(netdev
);
18146 tg3_timer_start(tp
);
18148 tg3_netif_start(tp
);
18150 tg3_full_unlock(tp
);
18158 static const struct pci_error_handlers tg3_err_handler
= {
18159 .error_detected
= tg3_io_error_detected
,
18160 .slot_reset
= tg3_io_slot_reset
,
18161 .resume
= tg3_io_resume
18164 static struct pci_driver tg3_driver
= {
18165 .name
= DRV_MODULE_NAME
,
18166 .id_table
= tg3_pci_tbl
,
18167 .probe
= tg3_init_one
,
18168 .remove
= tg3_remove_one
,
18169 .err_handler
= &tg3_err_handler
,
18170 .driver
.pm
= &tg3_pm_ops
,
18171 .shutdown
= tg3_shutdown
,
18174 module_pci_driver(tg3_driver
);