2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
69 return test_bit(flag
, bits
);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
79 clear_bit(flag
, bits
);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 123
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "March 21, 2012"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU 60
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version
[] __devinitdata
=
213 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION
);
219 MODULE_FIRMWARE(FIRMWARE_TG3
);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
223 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug
, int, 0);
225 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
309 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
315 static const struct {
316 const char string
[ETH_GSTRING_LEN
];
317 } ethtool_stats_keys
[] = {
320 { "rx_ucast_packets" },
321 { "rx_mcast_packets" },
322 { "rx_bcast_packets" },
324 { "rx_align_errors" },
325 { "rx_xon_pause_rcvd" },
326 { "rx_xoff_pause_rcvd" },
327 { "rx_mac_ctrl_rcvd" },
328 { "rx_xoff_entered" },
329 { "rx_frame_too_long_errors" },
331 { "rx_undersize_packets" },
332 { "rx_in_length_errors" },
333 { "rx_out_length_errors" },
334 { "rx_64_or_less_octet_packets" },
335 { "rx_65_to_127_octet_packets" },
336 { "rx_128_to_255_octet_packets" },
337 { "rx_256_to_511_octet_packets" },
338 { "rx_512_to_1023_octet_packets" },
339 { "rx_1024_to_1522_octet_packets" },
340 { "rx_1523_to_2047_octet_packets" },
341 { "rx_2048_to_4095_octet_packets" },
342 { "rx_4096_to_8191_octet_packets" },
343 { "rx_8192_to_9022_octet_packets" },
350 { "tx_flow_control" },
352 { "tx_single_collisions" },
353 { "tx_mult_collisions" },
355 { "tx_excessive_collisions" },
356 { "tx_late_collisions" },
357 { "tx_collide_2times" },
358 { "tx_collide_3times" },
359 { "tx_collide_4times" },
360 { "tx_collide_5times" },
361 { "tx_collide_6times" },
362 { "tx_collide_7times" },
363 { "tx_collide_8times" },
364 { "tx_collide_9times" },
365 { "tx_collide_10times" },
366 { "tx_collide_11times" },
367 { "tx_collide_12times" },
368 { "tx_collide_13times" },
369 { "tx_collide_14times" },
370 { "tx_collide_15times" },
371 { "tx_ucast_packets" },
372 { "tx_mcast_packets" },
373 { "tx_bcast_packets" },
374 { "tx_carrier_sense_errors" },
378 { "dma_writeq_full" },
379 { "dma_write_prioq_full" },
383 { "rx_threshold_hit" },
385 { "dma_readq_full" },
386 { "dma_read_prioq_full" },
387 { "tx_comp_queue_full" },
389 { "ring_set_send_prod_index" },
390 { "ring_status_update" },
392 { "nic_avoided_irqs" },
393 { "nic_tx_threshold_hit" },
395 { "mbuf_lwm_thresh_hit" },
398 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401 static const struct {
402 const char string
[ETH_GSTRING_LEN
];
403 } ethtool_test_keys
[] = {
404 { "nvram test (online) " },
405 { "link test (online) " },
406 { "register test (offline)" },
407 { "memory test (offline)" },
408 { "mac loopback test (offline)" },
409 { "phy loopback test (offline)" },
410 { "ext loopback test (offline)" },
411 { "interrupt test (offline)" },
414 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
419 writel(val
, tp
->regs
+ off
);
422 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
424 return readl(tp
->regs
+ off
);
427 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
429 writel(val
, tp
->aperegs
+ off
);
432 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
434 return readl(tp
->aperegs
+ off
);
437 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
441 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
443 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
444 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
447 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
449 writel(val
, tp
->regs
+ off
);
450 readl(tp
->regs
+ off
);
453 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
458 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
459 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
460 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
461 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
465 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
469 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
470 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
471 TG3_64BIT_REG_LOW
, val
);
474 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
475 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
476 TG3_64BIT_REG_LOW
, val
);
480 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
482 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
483 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
485 /* In indirect mode when disabling interrupts, we also need
486 * to clear the interrupt bit in the GRC local ctrl register.
488 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
490 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
491 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
495 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
500 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
501 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
502 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
503 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508 * where it is unsafe to read back the register without some delay.
509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
512 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
514 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
515 /* Non-posted methods */
516 tp
->write32(tp
, off
, val
);
519 tg3_write32(tp
, off
, val
);
524 /* Wait again after the read for the posted method to guarantee that
525 * the wait time is met.
531 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
533 tp
->write32_mbox(tp
, off
, val
);
534 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
535 tp
->read32_mbox(tp
, off
);
538 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
540 void __iomem
*mbox
= tp
->regs
+ off
;
542 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
544 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
548 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
550 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
553 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
555 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
558 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
564 #define tw32(reg, val) tp->write32(tp, reg, val)
565 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg) tp->read32(tp, reg)
569 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
573 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
574 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
577 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
578 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
579 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
580 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
582 /* Always leave this as zero. */
583 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
585 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
586 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
588 /* Always leave this as zero. */
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
591 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
594 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
598 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
599 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
604 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
605 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
606 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
607 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
609 /* Always leave this as zero. */
610 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
612 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
613 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
615 /* Always leave this as zero. */
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
618 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
621 static void tg3_ape_lock_init(struct tg3
*tp
)
626 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
627 regbase
= TG3_APE_LOCK_GRANT
;
629 regbase
= TG3_APE_PER_LOCK_GRANT
;
631 /* Make sure the driver hasn't any stale locks. */
632 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
634 case TG3_APE_LOCK_PHY0
:
635 case TG3_APE_LOCK_PHY1
:
636 case TG3_APE_LOCK_PHY2
:
637 case TG3_APE_LOCK_PHY3
:
638 bit
= APE_LOCK_GRANT_DRIVER
;
642 bit
= APE_LOCK_GRANT_DRIVER
;
644 bit
= 1 << tp
->pci_fn
;
646 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
651 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
655 u32 status
, req
, gnt
, bit
;
657 if (!tg3_flag(tp
, ENABLE_APE
))
661 case TG3_APE_LOCK_GPIO
:
662 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
664 case TG3_APE_LOCK_GRC
:
665 case TG3_APE_LOCK_MEM
:
667 bit
= APE_LOCK_REQ_DRIVER
;
669 bit
= 1 << tp
->pci_fn
;
675 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
676 req
= TG3_APE_LOCK_REQ
;
677 gnt
= TG3_APE_LOCK_GRANT
;
679 req
= TG3_APE_PER_LOCK_REQ
;
680 gnt
= TG3_APE_PER_LOCK_GRANT
;
685 tg3_ape_write32(tp
, req
+ off
, bit
);
687 /* Wait for up to 1 millisecond to acquire lock. */
688 for (i
= 0; i
< 100; i
++) {
689 status
= tg3_ape_read32(tp
, gnt
+ off
);
696 /* Revoke the lock request. */
697 tg3_ape_write32(tp
, gnt
+ off
, bit
);
704 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
708 if (!tg3_flag(tp
, ENABLE_APE
))
712 case TG3_APE_LOCK_GPIO
:
713 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
715 case TG3_APE_LOCK_GRC
:
716 case TG3_APE_LOCK_MEM
:
718 bit
= APE_LOCK_GRANT_DRIVER
;
720 bit
= 1 << tp
->pci_fn
;
726 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
727 gnt
= TG3_APE_LOCK_GRANT
;
729 gnt
= TG3_APE_PER_LOCK_GRANT
;
731 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
734 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
739 /* NCSI does not support APE events */
740 if (tg3_flag(tp
, APE_HAS_NCSI
))
743 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
744 if (apedata
!= APE_SEG_SIG_MAGIC
)
747 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
748 if (!(apedata
& APE_FW_STATUS_READY
))
751 /* Wait for up to 1 millisecond for APE to service previous event. */
752 for (i
= 0; i
< 10; i
++) {
753 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
756 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
758 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
759 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
760 event
| APE_EVENT_STATUS_EVENT_PENDING
);
762 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
764 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
770 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
771 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
774 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
779 if (!tg3_flag(tp
, ENABLE_APE
))
783 case RESET_KIND_INIT
:
784 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
785 APE_HOST_SEG_SIG_MAGIC
);
786 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
787 APE_HOST_SEG_LEN_MAGIC
);
788 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
789 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
790 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
792 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
793 APE_HOST_BEHAV_NO_PHYLOCK
);
794 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
795 TG3_APE_HOST_DRVR_STATE_START
);
797 event
= APE_EVENT_STATUS_STATE_START
;
799 case RESET_KIND_SHUTDOWN
:
800 /* With the interface we are currently using,
801 * APE does not track driver state. Wiping
802 * out the HOST SEGMENT SIGNATURE forces
803 * the APE to assume OS absent status.
805 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
807 if (device_may_wakeup(&tp
->pdev
->dev
) &&
808 tg3_flag(tp
, WOL_ENABLE
)) {
809 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
810 TG3_APE_HOST_WOL_SPEED_AUTO
);
811 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
813 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
815 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
817 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
819 case RESET_KIND_SUSPEND
:
820 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
826 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
828 tg3_ape_send_event(tp
, event
);
831 static void tg3_disable_ints(struct tg3
*tp
)
835 tw32(TG3PCI_MISC_HOST_CTRL
,
836 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
837 for (i
= 0; i
< tp
->irq_max
; i
++)
838 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
841 static void tg3_enable_ints(struct tg3
*tp
)
848 tw32(TG3PCI_MISC_HOST_CTRL
,
849 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
851 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
852 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
853 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
855 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
856 if (tg3_flag(tp
, 1SHOT_MSI
))
857 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
859 tp
->coal_now
|= tnapi
->coal_now
;
862 /* Force an initial interrupt */
863 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
864 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
865 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
867 tw32(HOSTCC_MODE
, tp
->coal_now
);
869 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
872 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
874 struct tg3
*tp
= tnapi
->tp
;
875 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
876 unsigned int work_exists
= 0;
878 /* check for phy events */
879 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
880 if (sblk
->status
& SD_STATUS_LINK_CHG
)
884 /* check for TX work to do */
885 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
888 /* check for RX work to do */
889 if (tnapi
->rx_rcb_prod_idx
&&
890 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
897 * similar to tg3_enable_ints, but it accurately determines whether there
898 * is new work pending and can return without flushing the PIO write
899 * which reenables interrupts
901 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
903 struct tg3
*tp
= tnapi
->tp
;
905 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
908 /* When doing tagged status, this work check is unnecessary.
909 * The last_tag we write above tells the chip which piece of
910 * work we've completed.
912 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
913 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
914 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
917 static void tg3_switch_clocks(struct tg3
*tp
)
922 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
925 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
927 orig_clock_ctrl
= clock_ctrl
;
928 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
929 CLOCK_CTRL_CLKRUN_OENABLE
|
931 tp
->pci_clock_ctrl
= clock_ctrl
;
933 if (tg3_flag(tp
, 5705_PLUS
)) {
934 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
935 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
936 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
938 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
939 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
941 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
943 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
944 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
947 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
950 #define PHY_BUSY_LOOPS 5000
952 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
958 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
960 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
966 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
967 MI_COM_PHY_ADDR_MASK
);
968 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
969 MI_COM_REG_ADDR_MASK
);
970 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
972 tw32_f(MAC_MI_COM
, frame_val
);
974 loops
= PHY_BUSY_LOOPS
;
977 frame_val
= tr32(MAC_MI_COM
);
979 if ((frame_val
& MI_COM_BUSY
) == 0) {
981 frame_val
= tr32(MAC_MI_COM
);
989 *val
= frame_val
& MI_COM_DATA_MASK
;
993 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
994 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1001 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1007 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1008 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1011 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1013 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1017 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1018 MI_COM_PHY_ADDR_MASK
);
1019 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1020 MI_COM_REG_ADDR_MASK
);
1021 frame_val
|= (val
& MI_COM_DATA_MASK
);
1022 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1024 tw32_f(MAC_MI_COM
, frame_val
);
1026 loops
= PHY_BUSY_LOOPS
;
1027 while (loops
!= 0) {
1029 frame_val
= tr32(MAC_MI_COM
);
1030 if ((frame_val
& MI_COM_BUSY
) == 0) {
1032 frame_val
= tr32(MAC_MI_COM
);
1042 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1043 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1050 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1054 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1058 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1062 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1063 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1067 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1073 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1077 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1081 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1085 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1086 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1090 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1096 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1100 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1102 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1107 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1111 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1113 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1118 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1122 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1123 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1124 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1126 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1131 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1133 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1134 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1136 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1139 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1144 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1150 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1152 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1154 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1155 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1160 static int tg3_bmcr_reset(struct tg3
*tp
)
1165 /* OK, reset it, and poll the BMCR_RESET bit until it
1166 * clears or we time out.
1168 phy_control
= BMCR_RESET
;
1169 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1175 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1179 if ((phy_control
& BMCR_RESET
) == 0) {
1191 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1193 struct tg3
*tp
= bp
->priv
;
1196 spin_lock_bh(&tp
->lock
);
1198 if (tg3_readphy(tp
, reg
, &val
))
1201 spin_unlock_bh(&tp
->lock
);
1206 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1208 struct tg3
*tp
= bp
->priv
;
1211 spin_lock_bh(&tp
->lock
);
1213 if (tg3_writephy(tp
, reg
, val
))
1216 spin_unlock_bh(&tp
->lock
);
1221 static int tg3_mdio_reset(struct mii_bus
*bp
)
1226 static void tg3_mdio_config_5785(struct tg3
*tp
)
1229 struct phy_device
*phydev
;
1231 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1232 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1233 case PHY_ID_BCM50610
:
1234 case PHY_ID_BCM50610M
:
1235 val
= MAC_PHYCFG2_50610_LED_MODES
;
1237 case PHY_ID_BCMAC131
:
1238 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1240 case PHY_ID_RTL8211C
:
1241 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1243 case PHY_ID_RTL8201E
:
1244 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1250 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1251 tw32(MAC_PHYCFG2
, val
);
1253 val
= tr32(MAC_PHYCFG1
);
1254 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1255 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1256 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1257 tw32(MAC_PHYCFG1
, val
);
1262 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1263 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1264 MAC_PHYCFG2_FMODE_MASK_MASK
|
1265 MAC_PHYCFG2_GMODE_MASK_MASK
|
1266 MAC_PHYCFG2_ACT_MASK_MASK
|
1267 MAC_PHYCFG2_QUAL_MASK_MASK
|
1268 MAC_PHYCFG2_INBAND_ENABLE
;
1270 tw32(MAC_PHYCFG2
, val
);
1272 val
= tr32(MAC_PHYCFG1
);
1273 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1274 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1275 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1276 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1277 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1278 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1279 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1281 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1282 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1283 tw32(MAC_PHYCFG1
, val
);
1285 val
= tr32(MAC_EXT_RGMII_MODE
);
1286 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1287 MAC_RGMII_MODE_RX_QUALITY
|
1288 MAC_RGMII_MODE_RX_ACTIVITY
|
1289 MAC_RGMII_MODE_RX_ENG_DET
|
1290 MAC_RGMII_MODE_TX_ENABLE
|
1291 MAC_RGMII_MODE_TX_LOWPWR
|
1292 MAC_RGMII_MODE_TX_RESET
);
1293 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1294 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1295 val
|= MAC_RGMII_MODE_RX_INT_B
|
1296 MAC_RGMII_MODE_RX_QUALITY
|
1297 MAC_RGMII_MODE_RX_ACTIVITY
|
1298 MAC_RGMII_MODE_RX_ENG_DET
;
1299 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1300 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1301 MAC_RGMII_MODE_TX_LOWPWR
|
1302 MAC_RGMII_MODE_TX_RESET
;
1304 tw32(MAC_EXT_RGMII_MODE
, val
);
1307 static void tg3_mdio_start(struct tg3
*tp
)
1309 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1310 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1313 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1314 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1315 tg3_mdio_config_5785(tp
);
1318 static int tg3_mdio_init(struct tg3
*tp
)
1322 struct phy_device
*phydev
;
1324 if (tg3_flag(tp
, 5717_PLUS
)) {
1327 tp
->phy_addr
= tp
->pci_fn
+ 1;
1329 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1330 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1332 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1333 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1337 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1341 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1344 tp
->mdio_bus
= mdiobus_alloc();
1345 if (tp
->mdio_bus
== NULL
)
1348 tp
->mdio_bus
->name
= "tg3 mdio bus";
1349 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1350 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1351 tp
->mdio_bus
->priv
= tp
;
1352 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1353 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1354 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1355 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1356 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1357 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1359 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1360 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1362 /* The bus registration will look for all the PHYs on the mdio bus.
1363 * Unfortunately, it does not ensure the PHY is powered up before
1364 * accessing the PHY ID registers. A chip reset is the
1365 * quickest way to bring the device back to an operational state..
1367 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1370 i
= mdiobus_register(tp
->mdio_bus
);
1372 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1373 mdiobus_free(tp
->mdio_bus
);
1377 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1379 if (!phydev
|| !phydev
->drv
) {
1380 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1381 mdiobus_unregister(tp
->mdio_bus
);
1382 mdiobus_free(tp
->mdio_bus
);
1386 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1387 case PHY_ID_BCM57780
:
1388 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1389 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1391 case PHY_ID_BCM50610
:
1392 case PHY_ID_BCM50610M
:
1393 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1394 PHY_BRCM_RX_REFCLK_UNUSED
|
1395 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1396 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1397 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1398 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1399 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1400 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1401 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1402 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1404 case PHY_ID_RTL8211C
:
1405 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1407 case PHY_ID_RTL8201E
:
1408 case PHY_ID_BCMAC131
:
1409 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1410 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1411 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1415 tg3_flag_set(tp
, MDIOBUS_INITED
);
1417 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1418 tg3_mdio_config_5785(tp
);
1423 static void tg3_mdio_fini(struct tg3
*tp
)
1425 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1426 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1427 mdiobus_unregister(tp
->mdio_bus
);
1428 mdiobus_free(tp
->mdio_bus
);
1432 /* tp->lock is held. */
1433 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1437 val
= tr32(GRC_RX_CPU_EVENT
);
1438 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1439 tw32_f(GRC_RX_CPU_EVENT
, val
);
1441 tp
->last_event_jiffies
= jiffies
;
1444 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1446 /* tp->lock is held. */
1447 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1450 unsigned int delay_cnt
;
1453 /* If enough time has passed, no wait is necessary. */
1454 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1455 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1457 if (time_remain
< 0)
1460 /* Check if we can shorten the wait time. */
1461 delay_cnt
= jiffies_to_usecs(time_remain
);
1462 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1463 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1464 delay_cnt
= (delay_cnt
>> 3) + 1;
1466 for (i
= 0; i
< delay_cnt
; i
++) {
1467 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1473 /* tp->lock is held. */
1474 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1479 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1481 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1482 val
|= (reg
& 0xffff);
1486 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1488 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1489 val
|= (reg
& 0xffff);
1493 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1494 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1496 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1497 val
|= (reg
& 0xffff);
1501 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1508 /* tp->lock is held. */
1509 static void tg3_ump_link_report(struct tg3
*tp
)
1513 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1516 tg3_phy_gather_ump_data(tp
, data
);
1518 tg3_wait_for_event_ack(tp
);
1520 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1521 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1522 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1523 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1524 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1525 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1527 tg3_generate_fw_event(tp
);
1530 /* tp->lock is held. */
1531 static void tg3_stop_fw(struct tg3
*tp
)
1533 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1534 /* Wait for RX cpu to ACK the previous event. */
1535 tg3_wait_for_event_ack(tp
);
1537 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1539 tg3_generate_fw_event(tp
);
1541 /* Wait for RX cpu to ACK this event. */
1542 tg3_wait_for_event_ack(tp
);
1546 /* tp->lock is held. */
1547 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1549 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1550 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1552 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1554 case RESET_KIND_INIT
:
1555 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1559 case RESET_KIND_SHUTDOWN
:
1560 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1564 case RESET_KIND_SUSPEND
:
1565 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1574 if (kind
== RESET_KIND_INIT
||
1575 kind
== RESET_KIND_SUSPEND
)
1576 tg3_ape_driver_state_change(tp
, kind
);
1579 /* tp->lock is held. */
1580 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1582 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1584 case RESET_KIND_INIT
:
1585 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1586 DRV_STATE_START_DONE
);
1589 case RESET_KIND_SHUTDOWN
:
1590 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1591 DRV_STATE_UNLOAD_DONE
);
1599 if (kind
== RESET_KIND_SHUTDOWN
)
1600 tg3_ape_driver_state_change(tp
, kind
);
1603 /* tp->lock is held. */
1604 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1606 if (tg3_flag(tp
, ENABLE_ASF
)) {
1608 case RESET_KIND_INIT
:
1609 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1613 case RESET_KIND_SHUTDOWN
:
1614 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1618 case RESET_KIND_SUSPEND
:
1619 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1629 static int tg3_poll_fw(struct tg3
*tp
)
1634 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1637 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1638 /* Wait up to 20ms for init done. */
1639 for (i
= 0; i
< 200; i
++) {
1640 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1647 /* Wait for firmware initialization to complete. */
1648 for (i
= 0; i
< 100000; i
++) {
1649 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1650 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1655 /* Chip might not be fitted with firmware. Some Sun onboard
1656 * parts are configured like that. So don't signal the timeout
1657 * of the above loop as an error, but do report the lack of
1658 * running firmware once.
1660 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1661 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1663 netdev_info(tp
->dev
, "No firmware running\n");
1666 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1667 /* The 57765 A0 needs a little more
1668 * time to do some important work.
1676 static void tg3_link_report(struct tg3
*tp
)
1678 if (!netif_carrier_ok(tp
->dev
)) {
1679 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1680 tg3_ump_link_report(tp
);
1681 } else if (netif_msg_link(tp
)) {
1682 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1683 (tp
->link_config
.active_speed
== SPEED_1000
?
1685 (tp
->link_config
.active_speed
== SPEED_100
?
1687 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1690 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1691 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1693 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1696 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1697 netdev_info(tp
->dev
, "EEE is %s\n",
1698 tp
->setlpicnt
? "enabled" : "disabled");
1700 tg3_ump_link_report(tp
);
1704 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1708 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1709 miireg
= ADVERTISE_1000XPAUSE
;
1710 else if (flow_ctrl
& FLOW_CTRL_TX
)
1711 miireg
= ADVERTISE_1000XPSE_ASYM
;
1712 else if (flow_ctrl
& FLOW_CTRL_RX
)
1713 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1720 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1724 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1725 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1726 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1727 if (lcladv
& ADVERTISE_1000XPAUSE
)
1729 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1736 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1740 u32 old_rx_mode
= tp
->rx_mode
;
1741 u32 old_tx_mode
= tp
->tx_mode
;
1743 if (tg3_flag(tp
, USE_PHYLIB
))
1744 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1746 autoneg
= tp
->link_config
.autoneg
;
1748 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1749 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1750 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1752 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1754 flowctrl
= tp
->link_config
.flowctrl
;
1756 tp
->link_config
.active_flowctrl
= flowctrl
;
1758 if (flowctrl
& FLOW_CTRL_RX
)
1759 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1761 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1763 if (old_rx_mode
!= tp
->rx_mode
)
1764 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1766 if (flowctrl
& FLOW_CTRL_TX
)
1767 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1769 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1771 if (old_tx_mode
!= tp
->tx_mode
)
1772 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1775 static void tg3_adjust_link(struct net_device
*dev
)
1777 u8 oldflowctrl
, linkmesg
= 0;
1778 u32 mac_mode
, lcl_adv
, rmt_adv
;
1779 struct tg3
*tp
= netdev_priv(dev
);
1780 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1782 spin_lock_bh(&tp
->lock
);
1784 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1785 MAC_MODE_HALF_DUPLEX
);
1787 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1793 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1794 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1795 else if (phydev
->speed
== SPEED_1000
||
1796 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1797 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1799 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1801 if (phydev
->duplex
== DUPLEX_HALF
)
1802 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1804 lcl_adv
= mii_advertise_flowctrl(
1805 tp
->link_config
.flowctrl
);
1808 rmt_adv
= LPA_PAUSE_CAP
;
1809 if (phydev
->asym_pause
)
1810 rmt_adv
|= LPA_PAUSE_ASYM
;
1813 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1815 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1817 if (mac_mode
!= tp
->mac_mode
) {
1818 tp
->mac_mode
= mac_mode
;
1819 tw32_f(MAC_MODE
, tp
->mac_mode
);
1823 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1824 if (phydev
->speed
== SPEED_10
)
1826 MAC_MI_STAT_10MBPS_MODE
|
1827 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1829 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1832 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1833 tw32(MAC_TX_LENGTHS
,
1834 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1835 (6 << TX_LENGTHS_IPG_SHIFT
) |
1836 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1838 tw32(MAC_TX_LENGTHS
,
1839 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1840 (6 << TX_LENGTHS_IPG_SHIFT
) |
1841 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1843 if (phydev
->link
!= tp
->old_link
||
1844 phydev
->speed
!= tp
->link_config
.active_speed
||
1845 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1846 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1849 tp
->old_link
= phydev
->link
;
1850 tp
->link_config
.active_speed
= phydev
->speed
;
1851 tp
->link_config
.active_duplex
= phydev
->duplex
;
1853 spin_unlock_bh(&tp
->lock
);
1856 tg3_link_report(tp
);
1859 static int tg3_phy_init(struct tg3
*tp
)
1861 struct phy_device
*phydev
;
1863 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1866 /* Bring the PHY back to a known state. */
1869 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1871 /* Attach the MAC to the PHY. */
1872 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1873 phydev
->dev_flags
, phydev
->interface
);
1874 if (IS_ERR(phydev
)) {
1875 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1876 return PTR_ERR(phydev
);
1879 /* Mask with MAC supported features. */
1880 switch (phydev
->interface
) {
1881 case PHY_INTERFACE_MODE_GMII
:
1882 case PHY_INTERFACE_MODE_RGMII
:
1883 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1884 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1886 SUPPORTED_Asym_Pause
);
1890 case PHY_INTERFACE_MODE_MII
:
1891 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1893 SUPPORTED_Asym_Pause
);
1896 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1900 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1902 phydev
->advertising
= phydev
->supported
;
1907 static void tg3_phy_start(struct tg3
*tp
)
1909 struct phy_device
*phydev
;
1911 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1914 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1916 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1917 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1918 phydev
->speed
= tp
->link_config
.speed
;
1919 phydev
->duplex
= tp
->link_config
.duplex
;
1920 phydev
->autoneg
= tp
->link_config
.autoneg
;
1921 phydev
->advertising
= tp
->link_config
.advertising
;
1926 phy_start_aneg(phydev
);
1929 static void tg3_phy_stop(struct tg3
*tp
)
1931 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1934 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1937 static void tg3_phy_fini(struct tg3
*tp
)
1939 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1940 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1941 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1945 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
1950 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
1953 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
1954 /* Cannot do read-modify-write on 5401 */
1955 err
= tg3_phy_auxctl_write(tp
,
1956 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1957 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
1962 err
= tg3_phy_auxctl_read(tp
,
1963 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1967 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
1968 err
= tg3_phy_auxctl_write(tp
,
1969 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
1975 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1979 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1982 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1983 phytest
| MII_TG3_FET_SHADOW_EN
);
1984 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1986 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1988 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1989 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1991 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1995 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1999 if (!tg3_flag(tp
, 5705_PLUS
) ||
2000 (tg3_flag(tp
, 5717_PLUS
) &&
2001 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2004 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2005 tg3_phy_fet_toggle_apd(tp
, enable
);
2009 reg
= MII_TG3_MISC_SHDW_WREN
|
2010 MII_TG3_MISC_SHDW_SCR5_SEL
|
2011 MII_TG3_MISC_SHDW_SCR5_LPED
|
2012 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2013 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2014 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2015 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
2016 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2018 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2021 reg
= MII_TG3_MISC_SHDW_WREN
|
2022 MII_TG3_MISC_SHDW_APD_SEL
|
2023 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2025 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2027 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2030 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2034 if (!tg3_flag(tp
, 5705_PLUS
) ||
2035 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2038 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2041 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2042 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2044 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2045 ephy
| MII_TG3_FET_SHADOW_EN
);
2046 if (!tg3_readphy(tp
, reg
, &phy
)) {
2048 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2050 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2051 tg3_writephy(tp
, reg
, phy
);
2053 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2058 ret
= tg3_phy_auxctl_read(tp
,
2059 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2062 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2064 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2065 tg3_phy_auxctl_write(tp
,
2066 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2071 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2076 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2079 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2081 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2082 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2085 static void tg3_phy_apply_otp(struct tg3
*tp
)
2094 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2097 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2098 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2099 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2101 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2102 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2103 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2105 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2106 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2107 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2109 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2110 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2112 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2113 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2115 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2116 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2117 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2119 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2122 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2126 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2131 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2132 current_link_up
== 1 &&
2133 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2134 (tp
->link_config
.active_speed
== SPEED_100
||
2135 tp
->link_config
.active_speed
== SPEED_1000
)) {
2138 if (tp
->link_config
.active_speed
== SPEED_1000
)
2139 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2141 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2143 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2145 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2146 TG3_CL45_D7_EEERES_STAT
, &val
);
2148 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2149 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2153 if (!tp
->setlpicnt
) {
2154 if (current_link_up
== 1 &&
2155 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2156 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2157 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2160 val
= tr32(TG3_CPMU_EEE_MODE
);
2161 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2165 static void tg3_phy_eee_enable(struct tg3
*tp
)
2169 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2170 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2171 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2172 tg3_flag(tp
, 57765_CLASS
)) &&
2173 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2174 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2175 MII_TG3_DSP_TAP26_RMRXSTO
;
2176 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2177 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2180 val
= tr32(TG3_CPMU_EEE_MODE
);
2181 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2184 static int tg3_wait_macro_done(struct tg3
*tp
)
2191 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2192 if ((tmp32
& 0x1000) == 0)
2202 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2204 static const u32 test_pat
[4][6] = {
2205 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2206 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2207 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2208 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2212 for (chan
= 0; chan
< 4; chan
++) {
2215 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2216 (chan
* 0x2000) | 0x0200);
2217 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2219 for (i
= 0; i
< 6; i
++)
2220 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2223 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2224 if (tg3_wait_macro_done(tp
)) {
2229 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2230 (chan
* 0x2000) | 0x0200);
2231 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2232 if (tg3_wait_macro_done(tp
)) {
2237 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2238 if (tg3_wait_macro_done(tp
)) {
2243 for (i
= 0; i
< 6; i
+= 2) {
2246 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2247 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2248 tg3_wait_macro_done(tp
)) {
2254 if (low
!= test_pat
[chan
][i
] ||
2255 high
!= test_pat
[chan
][i
+1]) {
2256 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2257 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2258 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2268 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2272 for (chan
= 0; chan
< 4; chan
++) {
2275 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2276 (chan
* 0x2000) | 0x0200);
2277 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2278 for (i
= 0; i
< 6; i
++)
2279 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2280 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2281 if (tg3_wait_macro_done(tp
))
2288 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2290 u32 reg32
, phy9_orig
;
2291 int retries
, do_phy_reset
, err
;
2297 err
= tg3_bmcr_reset(tp
);
2303 /* Disable transmitter and interrupt. */
2304 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2308 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2310 /* Set full-duplex, 1000 mbps. */
2311 tg3_writephy(tp
, MII_BMCR
,
2312 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2314 /* Set to master mode. */
2315 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2318 tg3_writephy(tp
, MII_CTRL1000
,
2319 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2321 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2325 /* Block the PHY control access. */
2326 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2328 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2331 } while (--retries
);
2333 err
= tg3_phy_reset_chanpat(tp
);
2337 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2339 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2340 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2342 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2344 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2346 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2348 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2355 /* This will reset the tigon3 PHY if there is no valid
2356 * link unless the FORCE argument is non-zero.
2358 static int tg3_phy_reset(struct tg3
*tp
)
2363 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2364 val
= tr32(GRC_MISC_CFG
);
2365 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2368 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2369 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2373 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2374 netif_carrier_off(tp
->dev
);
2375 tg3_link_report(tp
);
2378 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2379 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2380 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2381 err
= tg3_phy_reset_5703_4_5(tp
);
2388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2389 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2390 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2391 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2393 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2396 err
= tg3_bmcr_reset(tp
);
2400 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2401 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2402 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2404 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2407 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2408 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2409 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2410 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2411 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2412 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2414 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2418 if (tg3_flag(tp
, 5717_PLUS
) &&
2419 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2422 tg3_phy_apply_otp(tp
);
2424 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2425 tg3_phy_toggle_apd(tp
, true);
2427 tg3_phy_toggle_apd(tp
, false);
2430 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2432 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2433 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2434 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2437 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2438 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2439 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2442 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2443 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2444 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2445 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2446 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2447 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2449 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2450 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2451 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2452 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2453 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2454 tg3_writephy(tp
, MII_TG3_TEST1
,
2455 MII_TG3_TEST1_TRIM_EN
| 0x4);
2457 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2459 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2463 /* Set Extended packet length bit (bit 14) on all chips that */
2464 /* support jumbo frames */
2465 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2466 /* Cannot do read-modify-write on 5401 */
2467 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2468 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2469 /* Set bit 14 with read-modify-write to preserve other bits */
2470 err
= tg3_phy_auxctl_read(tp
,
2471 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2473 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2474 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2477 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2478 * jumbo frames transmission.
2480 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2481 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2482 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2483 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2486 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2487 /* adjust output voltage */
2488 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2491 tg3_phy_toggle_automdix(tp
, 1);
2492 tg3_phy_set_wirespeed(tp
);
2496 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2497 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2498 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2499 TG3_GPIO_MSG_NEED_VAUX)
2500 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2501 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2502 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2503 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2504 (TG3_GPIO_MSG_DRVR_PRES << 12))
2506 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2507 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2508 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2509 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2510 (TG3_GPIO_MSG_NEED_VAUX << 12))
2512 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2516 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2517 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2518 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2520 status
= tr32(TG3_CPMU_DRV_STATUS
);
2522 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2523 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2524 status
|= (newstat
<< shift
);
2526 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2527 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2528 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2530 tw32(TG3_CPMU_DRV_STATUS
, status
);
2532 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2535 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2537 if (!tg3_flag(tp
, IS_NIC
))
2540 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2541 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2542 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2543 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2546 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2548 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2549 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2551 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2553 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2554 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2560 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2564 if (!tg3_flag(tp
, IS_NIC
) ||
2565 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2566 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2569 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2571 tw32_wait_f(GRC_LOCAL_CTRL
,
2572 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2573 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2575 tw32_wait_f(GRC_LOCAL_CTRL
,
2577 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2579 tw32_wait_f(GRC_LOCAL_CTRL
,
2580 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2581 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2584 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2586 if (!tg3_flag(tp
, IS_NIC
))
2589 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2590 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2591 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2592 (GRC_LCLCTRL_GPIO_OE0
|
2593 GRC_LCLCTRL_GPIO_OE1
|
2594 GRC_LCLCTRL_GPIO_OE2
|
2595 GRC_LCLCTRL_GPIO_OUTPUT0
|
2596 GRC_LCLCTRL_GPIO_OUTPUT1
),
2597 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2598 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2599 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2600 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2601 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2602 GRC_LCLCTRL_GPIO_OE1
|
2603 GRC_LCLCTRL_GPIO_OE2
|
2604 GRC_LCLCTRL_GPIO_OUTPUT0
|
2605 GRC_LCLCTRL_GPIO_OUTPUT1
|
2607 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2608 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2610 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2611 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2612 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2614 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2615 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2616 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2619 u32 grc_local_ctrl
= 0;
2621 /* Workaround to prevent overdrawing Amps. */
2622 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2623 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2624 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2626 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2629 /* On 5753 and variants, GPIO2 cannot be used. */
2630 no_gpio2
= tp
->nic_sram_data_cfg
&
2631 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2633 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2634 GRC_LCLCTRL_GPIO_OE1
|
2635 GRC_LCLCTRL_GPIO_OE2
|
2636 GRC_LCLCTRL_GPIO_OUTPUT1
|
2637 GRC_LCLCTRL_GPIO_OUTPUT2
;
2639 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2640 GRC_LCLCTRL_GPIO_OUTPUT2
);
2642 tw32_wait_f(GRC_LOCAL_CTRL
,
2643 tp
->grc_local_ctrl
| grc_local_ctrl
,
2644 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2646 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2648 tw32_wait_f(GRC_LOCAL_CTRL
,
2649 tp
->grc_local_ctrl
| grc_local_ctrl
,
2650 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2653 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2654 tw32_wait_f(GRC_LOCAL_CTRL
,
2655 tp
->grc_local_ctrl
| grc_local_ctrl
,
2656 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2661 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2665 /* Serialize power state transitions */
2666 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2669 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2670 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2672 msg
= tg3_set_function_status(tp
, msg
);
2674 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2677 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2678 tg3_pwrsrc_switch_to_vaux(tp
);
2680 tg3_pwrsrc_die_with_vmain(tp
);
2683 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2686 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2688 bool need_vaux
= false;
2690 /* The GPIOs do something completely different on 57765. */
2691 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2694 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2695 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2696 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2697 tg3_frob_aux_power_5717(tp
, include_wol
?
2698 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2702 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2703 struct net_device
*dev_peer
;
2705 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2707 /* remove_one() may have been run on the peer. */
2709 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2711 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2714 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2715 tg3_flag(tp_peer
, ENABLE_ASF
))
2720 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2721 tg3_flag(tp
, ENABLE_ASF
))
2725 tg3_pwrsrc_switch_to_vaux(tp
);
2727 tg3_pwrsrc_die_with_vmain(tp
);
2730 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2732 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2734 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2735 if (speed
!= SPEED_10
)
2737 } else if (speed
== SPEED_10
)
2743 static bool tg3_phy_power_bug(struct tg3
*tp
)
2745 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
2750 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2759 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
2768 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2772 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2773 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2774 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2775 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2778 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2779 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2780 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2785 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2787 val
= tr32(GRC_MISC_CFG
);
2788 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2791 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2793 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2796 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2797 tg3_writephy(tp
, MII_BMCR
,
2798 BMCR_ANENABLE
| BMCR_ANRESTART
);
2800 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2801 phytest
| MII_TG3_FET_SHADOW_EN
);
2802 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2803 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2805 MII_TG3_FET_SHDW_AUXMODE4
,
2808 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2811 } else if (do_low_power
) {
2812 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2813 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2815 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2816 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2817 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2818 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2821 /* The PHY should not be powered down on some chips because
2824 if (tg3_phy_power_bug(tp
))
2827 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2828 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2829 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2830 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2831 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2832 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2835 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2838 /* tp->lock is held. */
2839 static int tg3_nvram_lock(struct tg3
*tp
)
2841 if (tg3_flag(tp
, NVRAM
)) {
2844 if (tp
->nvram_lock_cnt
== 0) {
2845 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2846 for (i
= 0; i
< 8000; i
++) {
2847 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2852 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2856 tp
->nvram_lock_cnt
++;
2861 /* tp->lock is held. */
2862 static void tg3_nvram_unlock(struct tg3
*tp
)
2864 if (tg3_flag(tp
, NVRAM
)) {
2865 if (tp
->nvram_lock_cnt
> 0)
2866 tp
->nvram_lock_cnt
--;
2867 if (tp
->nvram_lock_cnt
== 0)
2868 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2872 /* tp->lock is held. */
2873 static void tg3_enable_nvram_access(struct tg3
*tp
)
2875 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2876 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2878 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2882 /* tp->lock is held. */
2883 static void tg3_disable_nvram_access(struct tg3
*tp
)
2885 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2886 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2888 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2892 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2893 u32 offset
, u32
*val
)
2898 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2901 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2902 EEPROM_ADDR_DEVID_MASK
|
2904 tw32(GRC_EEPROM_ADDR
,
2906 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2907 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2908 EEPROM_ADDR_ADDR_MASK
) |
2909 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2911 for (i
= 0; i
< 1000; i
++) {
2912 tmp
= tr32(GRC_EEPROM_ADDR
);
2914 if (tmp
& EEPROM_ADDR_COMPLETE
)
2918 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2921 tmp
= tr32(GRC_EEPROM_DATA
);
2924 * The data will always be opposite the native endian
2925 * format. Perform a blind byteswap to compensate.
2932 #define NVRAM_CMD_TIMEOUT 10000
2934 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2938 tw32(NVRAM_CMD
, nvram_cmd
);
2939 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2941 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2947 if (i
== NVRAM_CMD_TIMEOUT
)
2953 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2955 if (tg3_flag(tp
, NVRAM
) &&
2956 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2957 tg3_flag(tp
, FLASH
) &&
2958 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2959 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2961 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2962 ATMEL_AT45DB0X1B_PAGE_POS
) +
2963 (addr
% tp
->nvram_pagesize
);
2968 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2970 if (tg3_flag(tp
, NVRAM
) &&
2971 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2972 tg3_flag(tp
, FLASH
) &&
2973 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2974 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2976 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2977 tp
->nvram_pagesize
) +
2978 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2983 /* NOTE: Data read in from NVRAM is byteswapped according to
2984 * the byteswapping settings for all other register accesses.
2985 * tg3 devices are BE devices, so on a BE machine, the data
2986 * returned will be exactly as it is seen in NVRAM. On a LE
2987 * machine, the 32-bit value will be byteswapped.
2989 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2993 if (!tg3_flag(tp
, NVRAM
))
2994 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2996 offset
= tg3_nvram_phys_addr(tp
, offset
);
2998 if (offset
> NVRAM_ADDR_MSK
)
3001 ret
= tg3_nvram_lock(tp
);
3005 tg3_enable_nvram_access(tp
);
3007 tw32(NVRAM_ADDR
, offset
);
3008 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3009 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3012 *val
= tr32(NVRAM_RDDATA
);
3014 tg3_disable_nvram_access(tp
);
3016 tg3_nvram_unlock(tp
);
3021 /* Ensures NVRAM data is in bytestream format. */
3022 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3025 int res
= tg3_nvram_read(tp
, offset
, &v
);
3027 *val
= cpu_to_be32(v
);
3031 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3032 u32 offset
, u32 len
, u8
*buf
)
3037 for (i
= 0; i
< len
; i
+= 4) {
3043 memcpy(&data
, buf
+ i
, 4);
3046 * The SEEPROM interface expects the data to always be opposite
3047 * the native endian format. We accomplish this by reversing
3048 * all the operations that would have been performed on the
3049 * data from a call to tg3_nvram_read_be32().
3051 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3053 val
= tr32(GRC_EEPROM_ADDR
);
3054 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3056 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3058 tw32(GRC_EEPROM_ADDR
, val
|
3059 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3060 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3064 for (j
= 0; j
< 1000; j
++) {
3065 val
= tr32(GRC_EEPROM_ADDR
);
3067 if (val
& EEPROM_ADDR_COMPLETE
)
3071 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3080 /* offset and length are dword aligned */
3081 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3085 u32 pagesize
= tp
->nvram_pagesize
;
3086 u32 pagemask
= pagesize
- 1;
3090 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3096 u32 phy_addr
, page_off
, size
;
3098 phy_addr
= offset
& ~pagemask
;
3100 for (j
= 0; j
< pagesize
; j
+= 4) {
3101 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3102 (__be32
*) (tmp
+ j
));
3109 page_off
= offset
& pagemask
;
3116 memcpy(tmp
+ page_off
, buf
, size
);
3118 offset
= offset
+ (pagesize
- page_off
);
3120 tg3_enable_nvram_access(tp
);
3123 * Before we can erase the flash page, we need
3124 * to issue a special "write enable" command.
3126 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3128 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3131 /* Erase the target page */
3132 tw32(NVRAM_ADDR
, phy_addr
);
3134 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3135 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3137 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3140 /* Issue another write enable to start the write. */
3141 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3143 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3146 for (j
= 0; j
< pagesize
; j
+= 4) {
3149 data
= *((__be32
*) (tmp
+ j
));
3151 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3153 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3155 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3159 nvram_cmd
|= NVRAM_CMD_FIRST
;
3160 else if (j
== (pagesize
- 4))
3161 nvram_cmd
|= NVRAM_CMD_LAST
;
3163 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3171 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3172 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3179 /* offset and length are dword aligned */
3180 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3185 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3186 u32 page_off
, phy_addr
, nvram_cmd
;
3189 memcpy(&data
, buf
+ i
, 4);
3190 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3192 page_off
= offset
% tp
->nvram_pagesize
;
3194 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3196 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3198 if (page_off
== 0 || i
== 0)
3199 nvram_cmd
|= NVRAM_CMD_FIRST
;
3200 if (page_off
== (tp
->nvram_pagesize
- 4))
3201 nvram_cmd
|= NVRAM_CMD_LAST
;
3204 nvram_cmd
|= NVRAM_CMD_LAST
;
3206 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3207 !tg3_flag(tp
, FLASH
) ||
3208 !tg3_flag(tp
, 57765_PLUS
))
3209 tw32(NVRAM_ADDR
, phy_addr
);
3211 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
3212 !tg3_flag(tp
, 5755_PLUS
) &&
3213 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3214 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3217 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3218 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3222 if (!tg3_flag(tp
, FLASH
)) {
3223 /* We always do complete word writes to eeprom. */
3224 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3227 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3234 /* offset and length are dword aligned */
3235 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3239 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3240 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3241 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3245 if (!tg3_flag(tp
, NVRAM
)) {
3246 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3250 ret
= tg3_nvram_lock(tp
);
3254 tg3_enable_nvram_access(tp
);
3255 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3256 tw32(NVRAM_WRITE1
, 0x406);
3258 grc_mode
= tr32(GRC_MODE
);
3259 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3261 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3262 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3265 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3269 grc_mode
= tr32(GRC_MODE
);
3270 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3272 tg3_disable_nvram_access(tp
);
3273 tg3_nvram_unlock(tp
);
3276 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3277 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3284 #define RX_CPU_SCRATCH_BASE 0x30000
3285 #define RX_CPU_SCRATCH_SIZE 0x04000
3286 #define TX_CPU_SCRATCH_BASE 0x34000
3287 #define TX_CPU_SCRATCH_SIZE 0x04000
3289 /* tp->lock is held. */
3290 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3294 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3296 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3297 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3299 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3302 if (offset
== RX_CPU_BASE
) {
3303 for (i
= 0; i
< 10000; i
++) {
3304 tw32(offset
+ CPU_STATE
, 0xffffffff);
3305 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3306 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3310 tw32(offset
+ CPU_STATE
, 0xffffffff);
3311 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3314 for (i
= 0; i
< 10000; i
++) {
3315 tw32(offset
+ CPU_STATE
, 0xffffffff);
3316 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3317 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3323 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3324 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3328 /* Clear firmware's nvram arbitration. */
3329 if (tg3_flag(tp
, NVRAM
))
3330 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3335 unsigned int fw_base
;
3336 unsigned int fw_len
;
3337 const __be32
*fw_data
;
3340 /* tp->lock is held. */
3341 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3342 u32 cpu_scratch_base
, int cpu_scratch_size
,
3343 struct fw_info
*info
)
3345 int err
, lock_err
, i
;
3346 void (*write_op
)(struct tg3
*, u32
, u32
);
3348 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3350 "%s: Trying to load TX cpu firmware which is 5705\n",
3355 if (tg3_flag(tp
, 5705_PLUS
))
3356 write_op
= tg3_write_mem
;
3358 write_op
= tg3_write_indirect_reg32
;
3360 /* It is possible that bootcode is still loading at this point.
3361 * Get the nvram lock first before halting the cpu.
3363 lock_err
= tg3_nvram_lock(tp
);
3364 err
= tg3_halt_cpu(tp
, cpu_base
);
3366 tg3_nvram_unlock(tp
);
3370 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3371 write_op(tp
, cpu_scratch_base
+ i
, 0);
3372 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3373 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3374 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3375 write_op(tp
, (cpu_scratch_base
+
3376 (info
->fw_base
& 0xffff) +
3378 be32_to_cpu(info
->fw_data
[i
]));
3386 /* tp->lock is held. */
3387 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3389 struct fw_info info
;
3390 const __be32
*fw_data
;
3393 fw_data
= (void *)tp
->fw
->data
;
3395 /* Firmware blob starts with version numbers, followed by
3396 start address and length. We are setting complete length.
3397 length = end_address_of_bss - start_address_of_text.
3398 Remainder is the blob to be loaded contiguously
3399 from start address. */
3401 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3402 info
.fw_len
= tp
->fw
->size
- 12;
3403 info
.fw_data
= &fw_data
[3];
3405 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3406 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3411 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3412 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3417 /* Now startup only the RX cpu. */
3418 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3419 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3421 for (i
= 0; i
< 5; i
++) {
3422 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3424 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3425 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3426 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3430 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3431 "should be %08x\n", __func__
,
3432 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3435 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3436 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3441 /* tp->lock is held. */
3442 static int tg3_load_tso_firmware(struct tg3
*tp
)
3444 struct fw_info info
;
3445 const __be32
*fw_data
;
3446 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3449 if (tg3_flag(tp
, HW_TSO_1
) ||
3450 tg3_flag(tp
, HW_TSO_2
) ||
3451 tg3_flag(tp
, HW_TSO_3
))
3454 fw_data
= (void *)tp
->fw
->data
;
3456 /* Firmware blob starts with version numbers, followed by
3457 start address and length. We are setting complete length.
3458 length = end_address_of_bss - start_address_of_text.
3459 Remainder is the blob to be loaded contiguously
3460 from start address. */
3462 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3463 cpu_scratch_size
= tp
->fw_len
;
3464 info
.fw_len
= tp
->fw
->size
- 12;
3465 info
.fw_data
= &fw_data
[3];
3467 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3468 cpu_base
= RX_CPU_BASE
;
3469 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3471 cpu_base
= TX_CPU_BASE
;
3472 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3473 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3476 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3477 cpu_scratch_base
, cpu_scratch_size
,
3482 /* Now startup the cpu. */
3483 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3484 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3486 for (i
= 0; i
< 5; i
++) {
3487 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3489 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3490 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3491 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3496 "%s fails to set CPU PC, is %08x should be %08x\n",
3497 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3500 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3501 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3506 /* tp->lock is held. */
3507 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3509 u32 addr_high
, addr_low
;
3512 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3513 tp
->dev
->dev_addr
[1]);
3514 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3515 (tp
->dev
->dev_addr
[3] << 16) |
3516 (tp
->dev
->dev_addr
[4] << 8) |
3517 (tp
->dev
->dev_addr
[5] << 0));
3518 for (i
= 0; i
< 4; i
++) {
3519 if (i
== 1 && skip_mac_1
)
3521 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3522 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3525 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3526 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3527 for (i
= 0; i
< 12; i
++) {
3528 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3529 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3533 addr_high
= (tp
->dev
->dev_addr
[0] +
3534 tp
->dev
->dev_addr
[1] +
3535 tp
->dev
->dev_addr
[2] +
3536 tp
->dev
->dev_addr
[3] +
3537 tp
->dev
->dev_addr
[4] +
3538 tp
->dev
->dev_addr
[5]) &
3539 TX_BACKOFF_SEED_MASK
;
3540 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3543 static void tg3_enable_register_access(struct tg3
*tp
)
3546 * Make sure register accesses (indirect or otherwise) will function
3549 pci_write_config_dword(tp
->pdev
,
3550 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3553 static int tg3_power_up(struct tg3
*tp
)
3557 tg3_enable_register_access(tp
);
3559 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3561 /* Switch out of Vaux if it is a NIC */
3562 tg3_pwrsrc_switch_to_vmain(tp
);
3564 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3570 static int tg3_setup_phy(struct tg3
*, int);
3572 static int tg3_power_down_prepare(struct tg3
*tp
)
3575 bool device_should_wake
, do_low_power
;
3577 tg3_enable_register_access(tp
);
3579 /* Restore the CLKREQ setting. */
3580 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3583 pci_read_config_word(tp
->pdev
,
3584 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3586 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
3587 pci_write_config_word(tp
->pdev
,
3588 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3592 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3593 tw32(TG3PCI_MISC_HOST_CTRL
,
3594 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3596 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3597 tg3_flag(tp
, WOL_ENABLE
);
3599 if (tg3_flag(tp
, USE_PHYLIB
)) {
3600 do_low_power
= false;
3601 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3602 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3603 struct phy_device
*phydev
;
3604 u32 phyid
, advertising
;
3606 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3608 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3610 tp
->link_config
.speed
= phydev
->speed
;
3611 tp
->link_config
.duplex
= phydev
->duplex
;
3612 tp
->link_config
.autoneg
= phydev
->autoneg
;
3613 tp
->link_config
.advertising
= phydev
->advertising
;
3615 advertising
= ADVERTISED_TP
|
3617 ADVERTISED_Autoneg
|
3618 ADVERTISED_10baseT_Half
;
3620 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3621 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3623 ADVERTISED_100baseT_Half
|
3624 ADVERTISED_100baseT_Full
|
3625 ADVERTISED_10baseT_Full
;
3627 advertising
|= ADVERTISED_10baseT_Full
;
3630 phydev
->advertising
= advertising
;
3632 phy_start_aneg(phydev
);
3634 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3635 if (phyid
!= PHY_ID_BCMAC131
) {
3636 phyid
&= PHY_BCM_OUI_MASK
;
3637 if (phyid
== PHY_BCM_OUI_1
||
3638 phyid
== PHY_BCM_OUI_2
||
3639 phyid
== PHY_BCM_OUI_3
)
3640 do_low_power
= true;
3644 do_low_power
= true;
3646 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3647 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3649 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3650 tg3_setup_phy(tp
, 0);
3653 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3656 val
= tr32(GRC_VCPU_EXT_CTRL
);
3657 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3658 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3662 for (i
= 0; i
< 200; i
++) {
3663 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3664 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3669 if (tg3_flag(tp
, WOL_CAP
))
3670 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3671 WOL_DRV_STATE_SHUTDOWN
|
3675 if (device_should_wake
) {
3678 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3680 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3681 tg3_phy_auxctl_write(tp
,
3682 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3683 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3684 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3685 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3689 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3690 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3692 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3694 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3695 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3697 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3698 SPEED_100
: SPEED_10
;
3699 if (tg3_5700_link_polarity(tp
, speed
))
3700 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3702 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3705 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3708 if (!tg3_flag(tp
, 5750_PLUS
))
3709 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3711 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3712 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3713 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3714 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3716 if (tg3_flag(tp
, ENABLE_APE
))
3717 mac_mode
|= MAC_MODE_APE_TX_EN
|
3718 MAC_MODE_APE_RX_EN
|
3719 MAC_MODE_TDE_ENABLE
;
3721 tw32_f(MAC_MODE
, mac_mode
);
3724 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3728 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3729 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3730 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3733 base_val
= tp
->pci_clock_ctrl
;
3734 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3735 CLOCK_CTRL_TXCLK_DISABLE
);
3737 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3738 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3739 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3740 tg3_flag(tp
, CPMU_PRESENT
) ||
3741 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3743 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3744 u32 newbits1
, newbits2
;
3746 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3747 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3748 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3749 CLOCK_CTRL_TXCLK_DISABLE
|
3751 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3752 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3753 newbits1
= CLOCK_CTRL_625_CORE
;
3754 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3756 newbits1
= CLOCK_CTRL_ALTCLK
;
3757 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3760 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3763 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3766 if (!tg3_flag(tp
, 5705_PLUS
)) {
3769 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3770 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3771 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3772 CLOCK_CTRL_TXCLK_DISABLE
|
3773 CLOCK_CTRL_44MHZ_CORE
);
3775 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3778 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3779 tp
->pci_clock_ctrl
| newbits3
, 40);
3783 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3784 tg3_power_down_phy(tp
, do_low_power
);
3786 tg3_frob_aux_power(tp
, true);
3788 /* Workaround for unstable PLL clock */
3789 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3790 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3791 u32 val
= tr32(0x7d00);
3793 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3795 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3798 err
= tg3_nvram_lock(tp
);
3799 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3801 tg3_nvram_unlock(tp
);
3805 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3810 static void tg3_power_down(struct tg3
*tp
)
3812 tg3_power_down_prepare(tp
);
3814 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3815 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3818 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3820 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3821 case MII_TG3_AUX_STAT_10HALF
:
3823 *duplex
= DUPLEX_HALF
;
3826 case MII_TG3_AUX_STAT_10FULL
:
3828 *duplex
= DUPLEX_FULL
;
3831 case MII_TG3_AUX_STAT_100HALF
:
3833 *duplex
= DUPLEX_HALF
;
3836 case MII_TG3_AUX_STAT_100FULL
:
3838 *duplex
= DUPLEX_FULL
;
3841 case MII_TG3_AUX_STAT_1000HALF
:
3842 *speed
= SPEED_1000
;
3843 *duplex
= DUPLEX_HALF
;
3846 case MII_TG3_AUX_STAT_1000FULL
:
3847 *speed
= SPEED_1000
;
3848 *duplex
= DUPLEX_FULL
;
3852 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3853 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3855 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3859 *speed
= SPEED_UNKNOWN
;
3860 *duplex
= DUPLEX_UNKNOWN
;
3865 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3870 new_adv
= ADVERTISE_CSMA
;
3871 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
3872 new_adv
|= mii_advertise_flowctrl(flowctrl
);
3874 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3878 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3879 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
3881 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3882 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3883 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3885 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3890 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3893 tw32(TG3_CPMU_EEE_MODE
,
3894 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3896 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
3901 /* Advertise 100-BaseTX EEE ability */
3902 if (advertise
& ADVERTISED_100baseT_Full
)
3903 val
|= MDIO_AN_EEE_ADV_100TX
;
3904 /* Advertise 1000-BaseT EEE ability */
3905 if (advertise
& ADVERTISED_1000baseT_Full
)
3906 val
|= MDIO_AN_EEE_ADV_1000T
;
3907 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3911 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3913 case ASIC_REV_57765
:
3914 case ASIC_REV_57766
:
3916 /* If we advertised any eee advertisements above... */
3918 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3919 MII_TG3_DSP_TAP26_RMRXSTO
|
3920 MII_TG3_DSP_TAP26_OPCSINPT
;
3921 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3924 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3925 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3926 MII_TG3_DSP_CH34TP2_HIBW01
);
3929 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
3938 static void tg3_phy_copper_begin(struct tg3
*tp
)
3940 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
3941 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3944 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3945 adv
= ADVERTISED_10baseT_Half
|
3946 ADVERTISED_10baseT_Full
;
3947 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3948 adv
|= ADVERTISED_100baseT_Half
|
3949 ADVERTISED_100baseT_Full
;
3951 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
3953 adv
= tp
->link_config
.advertising
;
3954 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3955 adv
&= ~(ADVERTISED_1000baseT_Half
|
3956 ADVERTISED_1000baseT_Full
);
3958 fc
= tp
->link_config
.flowctrl
;
3961 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
3963 tg3_writephy(tp
, MII_BMCR
,
3964 BMCR_ANENABLE
| BMCR_ANRESTART
);
3967 u32 bmcr
, orig_bmcr
;
3969 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3970 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3972 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
3973 /* With autoneg disabled, 5715 only links up when the
3974 * advertisement register has the configured speed
3977 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
3981 switch (tp
->link_config
.speed
) {
3987 bmcr
|= BMCR_SPEED100
;
3991 bmcr
|= BMCR_SPEED1000
;
3995 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3996 bmcr
|= BMCR_FULLDPLX
;
3998 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3999 (bmcr
!= orig_bmcr
)) {
4000 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4001 for (i
= 0; i
< 1500; i
++) {
4005 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4006 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4008 if (!(tmp
& BMSR_LSTATUS
)) {
4013 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4019 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4023 /* Turn off tap power management. */
4024 /* Set Extended packet length bit */
4025 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4027 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4028 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4029 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4030 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4031 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4038 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4040 u32 advmsk
, tgtadv
, advertising
;
4042 advertising
= tp
->link_config
.advertising
;
4043 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4045 advmsk
= ADVERTISE_ALL
;
4046 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4047 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4048 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4051 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4054 if ((*lcladv
& advmsk
) != tgtadv
)
4057 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4060 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4062 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4066 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4067 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)) {
4068 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4069 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4070 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4072 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4075 if (tg3_ctrl
!= tgtadv
)
4082 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4086 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4089 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4092 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4095 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4098 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4099 tp
->link_config
.rmt_adv
= lpeth
;
4104 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4106 int current_link_up
;
4108 u32 lcl_adv
, rmt_adv
;
4116 (MAC_STATUS_SYNC_CHANGED
|
4117 MAC_STATUS_CFG_CHANGED
|
4118 MAC_STATUS_MI_COMPLETION
|
4119 MAC_STATUS_LNKSTATE_CHANGED
));
4122 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4124 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4128 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4130 /* Some third-party PHYs need to be reset on link going
4133 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
4134 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
4135 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
4136 netif_carrier_ok(tp
->dev
)) {
4137 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4138 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4139 !(bmsr
& BMSR_LSTATUS
))
4145 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4146 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4147 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4148 !tg3_flag(tp
, INIT_COMPLETE
))
4151 if (!(bmsr
& BMSR_LSTATUS
)) {
4152 err
= tg3_init_5401phy_dsp(tp
);
4156 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4157 for (i
= 0; i
< 1000; i
++) {
4159 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4160 (bmsr
& BMSR_LSTATUS
)) {
4166 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4167 TG3_PHY_REV_BCM5401_B0
&&
4168 !(bmsr
& BMSR_LSTATUS
) &&
4169 tp
->link_config
.active_speed
== SPEED_1000
) {
4170 err
= tg3_phy_reset(tp
);
4172 err
= tg3_init_5401phy_dsp(tp
);
4177 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4178 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
4179 /* 5701 {A0,B0} CRC bug workaround */
4180 tg3_writephy(tp
, 0x15, 0x0a75);
4181 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4182 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4183 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4186 /* Clear pending interrupts... */
4187 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4188 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4190 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4191 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4192 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4193 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4195 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
4196 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
4197 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4198 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4199 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4201 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4204 current_link_up
= 0;
4205 current_speed
= SPEED_UNKNOWN
;
4206 current_duplex
= DUPLEX_UNKNOWN
;
4207 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4208 tp
->link_config
.rmt_adv
= 0;
4210 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4211 err
= tg3_phy_auxctl_read(tp
,
4212 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4214 if (!err
&& !(val
& (1 << 10))) {
4215 tg3_phy_auxctl_write(tp
,
4216 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4223 for (i
= 0; i
< 100; i
++) {
4224 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4225 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4226 (bmsr
& BMSR_LSTATUS
))
4231 if (bmsr
& BMSR_LSTATUS
) {
4234 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4235 for (i
= 0; i
< 2000; i
++) {
4237 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4242 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4247 for (i
= 0; i
< 200; i
++) {
4248 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4249 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4251 if (bmcr
&& bmcr
!= 0x7fff)
4259 tp
->link_config
.active_speed
= current_speed
;
4260 tp
->link_config
.active_duplex
= current_duplex
;
4262 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4263 if ((bmcr
& BMCR_ANENABLE
) &&
4264 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4265 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4266 current_link_up
= 1;
4268 if (!(bmcr
& BMCR_ANENABLE
) &&
4269 tp
->link_config
.speed
== current_speed
&&
4270 tp
->link_config
.duplex
== current_duplex
&&
4271 tp
->link_config
.flowctrl
==
4272 tp
->link_config
.active_flowctrl
) {
4273 current_link_up
= 1;
4277 if (current_link_up
== 1 &&
4278 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4281 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4282 reg
= MII_TG3_FET_GEN_STAT
;
4283 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4285 reg
= MII_TG3_EXT_STAT
;
4286 bit
= MII_TG3_EXT_STAT_MDIX
;
4289 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4290 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4292 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4297 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4298 tg3_phy_copper_begin(tp
);
4300 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4301 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4302 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4303 current_link_up
= 1;
4306 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4307 if (current_link_up
== 1) {
4308 if (tp
->link_config
.active_speed
== SPEED_100
||
4309 tp
->link_config
.active_speed
== SPEED_10
)
4310 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4312 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4313 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4314 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4316 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4318 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4319 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4320 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4322 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4323 if (current_link_up
== 1 &&
4324 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4325 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4327 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4330 /* ??? Without this setting Netgear GA302T PHY does not
4331 * ??? send/receive packets...
4333 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4334 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4335 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4336 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4340 tw32_f(MAC_MODE
, tp
->mac_mode
);
4343 tg3_phy_eee_adjust(tp
, current_link_up
);
4345 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4346 /* Polled via timer. */
4347 tw32_f(MAC_EVENT
, 0);
4349 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4353 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4354 current_link_up
== 1 &&
4355 tp
->link_config
.active_speed
== SPEED_1000
&&
4356 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4359 (MAC_STATUS_SYNC_CHANGED
|
4360 MAC_STATUS_CFG_CHANGED
));
4363 NIC_SRAM_FIRMWARE_MBOX
,
4364 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4367 /* Prevent send BD corruption. */
4368 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4369 u16 oldlnkctl
, newlnkctl
;
4371 pci_read_config_word(tp
->pdev
,
4372 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
4374 if (tp
->link_config
.active_speed
== SPEED_100
||
4375 tp
->link_config
.active_speed
== SPEED_10
)
4376 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4378 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
4379 if (newlnkctl
!= oldlnkctl
)
4380 pci_write_config_word(tp
->pdev
,
4381 pci_pcie_cap(tp
->pdev
) +
4382 PCI_EXP_LNKCTL
, newlnkctl
);
4385 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4386 if (current_link_up
)
4387 netif_carrier_on(tp
->dev
);
4389 netif_carrier_off(tp
->dev
);
4390 tg3_link_report(tp
);
4396 struct tg3_fiber_aneginfo
{
4398 #define ANEG_STATE_UNKNOWN 0
4399 #define ANEG_STATE_AN_ENABLE 1
4400 #define ANEG_STATE_RESTART_INIT 2
4401 #define ANEG_STATE_RESTART 3
4402 #define ANEG_STATE_DISABLE_LINK_OK 4
4403 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4404 #define ANEG_STATE_ABILITY_DETECT 6
4405 #define ANEG_STATE_ACK_DETECT_INIT 7
4406 #define ANEG_STATE_ACK_DETECT 8
4407 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4408 #define ANEG_STATE_COMPLETE_ACK 10
4409 #define ANEG_STATE_IDLE_DETECT_INIT 11
4410 #define ANEG_STATE_IDLE_DETECT 12
4411 #define ANEG_STATE_LINK_OK 13
4412 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4413 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4416 #define MR_AN_ENABLE 0x00000001
4417 #define MR_RESTART_AN 0x00000002
4418 #define MR_AN_COMPLETE 0x00000004
4419 #define MR_PAGE_RX 0x00000008
4420 #define MR_NP_LOADED 0x00000010
4421 #define MR_TOGGLE_TX 0x00000020
4422 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4423 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4424 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4425 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4426 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4427 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4428 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4429 #define MR_TOGGLE_RX 0x00002000
4430 #define MR_NP_RX 0x00004000
4432 #define MR_LINK_OK 0x80000000
4434 unsigned long link_time
, cur_time
;
4436 u32 ability_match_cfg
;
4437 int ability_match_count
;
4439 char ability_match
, idle_match
, ack_match
;
4441 u32 txconfig
, rxconfig
;
4442 #define ANEG_CFG_NP 0x00000080
4443 #define ANEG_CFG_ACK 0x00000040
4444 #define ANEG_CFG_RF2 0x00000020
4445 #define ANEG_CFG_RF1 0x00000010
4446 #define ANEG_CFG_PS2 0x00000001
4447 #define ANEG_CFG_PS1 0x00008000
4448 #define ANEG_CFG_HD 0x00004000
4449 #define ANEG_CFG_FD 0x00002000
4450 #define ANEG_CFG_INVAL 0x00001f06
4455 #define ANEG_TIMER_ENAB 2
4456 #define ANEG_FAILED -1
4458 #define ANEG_STATE_SETTLE_TIME 10000
4460 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4461 struct tg3_fiber_aneginfo
*ap
)
4464 unsigned long delta
;
4468 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4472 ap
->ability_match_cfg
= 0;
4473 ap
->ability_match_count
= 0;
4474 ap
->ability_match
= 0;
4480 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4481 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4483 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4484 ap
->ability_match_cfg
= rx_cfg_reg
;
4485 ap
->ability_match
= 0;
4486 ap
->ability_match_count
= 0;
4488 if (++ap
->ability_match_count
> 1) {
4489 ap
->ability_match
= 1;
4490 ap
->ability_match_cfg
= rx_cfg_reg
;
4493 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4501 ap
->ability_match_cfg
= 0;
4502 ap
->ability_match_count
= 0;
4503 ap
->ability_match
= 0;
4509 ap
->rxconfig
= rx_cfg_reg
;
4512 switch (ap
->state
) {
4513 case ANEG_STATE_UNKNOWN
:
4514 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4515 ap
->state
= ANEG_STATE_AN_ENABLE
;
4518 case ANEG_STATE_AN_ENABLE
:
4519 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4520 if (ap
->flags
& MR_AN_ENABLE
) {
4523 ap
->ability_match_cfg
= 0;
4524 ap
->ability_match_count
= 0;
4525 ap
->ability_match
= 0;
4529 ap
->state
= ANEG_STATE_RESTART_INIT
;
4531 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4535 case ANEG_STATE_RESTART_INIT
:
4536 ap
->link_time
= ap
->cur_time
;
4537 ap
->flags
&= ~(MR_NP_LOADED
);
4539 tw32(MAC_TX_AUTO_NEG
, 0);
4540 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4541 tw32_f(MAC_MODE
, tp
->mac_mode
);
4544 ret
= ANEG_TIMER_ENAB
;
4545 ap
->state
= ANEG_STATE_RESTART
;
4548 case ANEG_STATE_RESTART
:
4549 delta
= ap
->cur_time
- ap
->link_time
;
4550 if (delta
> ANEG_STATE_SETTLE_TIME
)
4551 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4553 ret
= ANEG_TIMER_ENAB
;
4556 case ANEG_STATE_DISABLE_LINK_OK
:
4560 case ANEG_STATE_ABILITY_DETECT_INIT
:
4561 ap
->flags
&= ~(MR_TOGGLE_TX
);
4562 ap
->txconfig
= ANEG_CFG_FD
;
4563 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4564 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4565 ap
->txconfig
|= ANEG_CFG_PS1
;
4566 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4567 ap
->txconfig
|= ANEG_CFG_PS2
;
4568 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4569 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4570 tw32_f(MAC_MODE
, tp
->mac_mode
);
4573 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4576 case ANEG_STATE_ABILITY_DETECT
:
4577 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4578 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4581 case ANEG_STATE_ACK_DETECT_INIT
:
4582 ap
->txconfig
|= ANEG_CFG_ACK
;
4583 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4584 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4585 tw32_f(MAC_MODE
, tp
->mac_mode
);
4588 ap
->state
= ANEG_STATE_ACK_DETECT
;
4591 case ANEG_STATE_ACK_DETECT
:
4592 if (ap
->ack_match
!= 0) {
4593 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4594 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4595 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4597 ap
->state
= ANEG_STATE_AN_ENABLE
;
4599 } else if (ap
->ability_match
!= 0 &&
4600 ap
->rxconfig
== 0) {
4601 ap
->state
= ANEG_STATE_AN_ENABLE
;
4605 case ANEG_STATE_COMPLETE_ACK_INIT
:
4606 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4610 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4611 MR_LP_ADV_HALF_DUPLEX
|
4612 MR_LP_ADV_SYM_PAUSE
|
4613 MR_LP_ADV_ASYM_PAUSE
|
4614 MR_LP_ADV_REMOTE_FAULT1
|
4615 MR_LP_ADV_REMOTE_FAULT2
|
4616 MR_LP_ADV_NEXT_PAGE
|
4619 if (ap
->rxconfig
& ANEG_CFG_FD
)
4620 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4621 if (ap
->rxconfig
& ANEG_CFG_HD
)
4622 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4623 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4624 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4625 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4626 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4627 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4628 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4629 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4630 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4631 if (ap
->rxconfig
& ANEG_CFG_NP
)
4632 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4634 ap
->link_time
= ap
->cur_time
;
4636 ap
->flags
^= (MR_TOGGLE_TX
);
4637 if (ap
->rxconfig
& 0x0008)
4638 ap
->flags
|= MR_TOGGLE_RX
;
4639 if (ap
->rxconfig
& ANEG_CFG_NP
)
4640 ap
->flags
|= MR_NP_RX
;
4641 ap
->flags
|= MR_PAGE_RX
;
4643 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4644 ret
= ANEG_TIMER_ENAB
;
4647 case ANEG_STATE_COMPLETE_ACK
:
4648 if (ap
->ability_match
!= 0 &&
4649 ap
->rxconfig
== 0) {
4650 ap
->state
= ANEG_STATE_AN_ENABLE
;
4653 delta
= ap
->cur_time
- ap
->link_time
;
4654 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4655 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4656 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4658 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4659 !(ap
->flags
& MR_NP_RX
)) {
4660 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4668 case ANEG_STATE_IDLE_DETECT_INIT
:
4669 ap
->link_time
= ap
->cur_time
;
4670 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4671 tw32_f(MAC_MODE
, tp
->mac_mode
);
4674 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4675 ret
= ANEG_TIMER_ENAB
;
4678 case ANEG_STATE_IDLE_DETECT
:
4679 if (ap
->ability_match
!= 0 &&
4680 ap
->rxconfig
== 0) {
4681 ap
->state
= ANEG_STATE_AN_ENABLE
;
4684 delta
= ap
->cur_time
- ap
->link_time
;
4685 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4686 /* XXX another gem from the Broadcom driver :( */
4687 ap
->state
= ANEG_STATE_LINK_OK
;
4691 case ANEG_STATE_LINK_OK
:
4692 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4696 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4697 /* ??? unimplemented */
4700 case ANEG_STATE_NEXT_PAGE_WAIT
:
4701 /* ??? unimplemented */
4712 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4715 struct tg3_fiber_aneginfo aninfo
;
4716 int status
= ANEG_FAILED
;
4720 tw32_f(MAC_TX_AUTO_NEG
, 0);
4722 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4723 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4726 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4729 memset(&aninfo
, 0, sizeof(aninfo
));
4730 aninfo
.flags
|= MR_AN_ENABLE
;
4731 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4732 aninfo
.cur_time
= 0;
4734 while (++tick
< 195000) {
4735 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4736 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4742 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4743 tw32_f(MAC_MODE
, tp
->mac_mode
);
4746 *txflags
= aninfo
.txconfig
;
4747 *rxflags
= aninfo
.flags
;
4749 if (status
== ANEG_DONE
&&
4750 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4751 MR_LP_ADV_FULL_DUPLEX
)))
4757 static void tg3_init_bcm8002(struct tg3
*tp
)
4759 u32 mac_status
= tr32(MAC_STATUS
);
4762 /* Reset when initting first time or we have a link. */
4763 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4764 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4767 /* Set PLL lock range. */
4768 tg3_writephy(tp
, 0x16, 0x8007);
4771 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4773 /* Wait for reset to complete. */
4774 /* XXX schedule_timeout() ... */
4775 for (i
= 0; i
< 500; i
++)
4778 /* Config mode; select PMA/Ch 1 regs. */
4779 tg3_writephy(tp
, 0x10, 0x8411);
4781 /* Enable auto-lock and comdet, select txclk for tx. */
4782 tg3_writephy(tp
, 0x11, 0x0a10);
4784 tg3_writephy(tp
, 0x18, 0x00a0);
4785 tg3_writephy(tp
, 0x16, 0x41ff);
4787 /* Assert and deassert POR. */
4788 tg3_writephy(tp
, 0x13, 0x0400);
4790 tg3_writephy(tp
, 0x13, 0x0000);
4792 tg3_writephy(tp
, 0x11, 0x0a50);
4794 tg3_writephy(tp
, 0x11, 0x0a10);
4796 /* Wait for signal to stabilize */
4797 /* XXX schedule_timeout() ... */
4798 for (i
= 0; i
< 15000; i
++)
4801 /* Deselect the channel register so we can read the PHYID
4804 tg3_writephy(tp
, 0x10, 0x8011);
4807 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4810 u32 sg_dig_ctrl
, sg_dig_status
;
4811 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4812 int workaround
, port_a
;
4813 int current_link_up
;
4816 expected_sg_dig_ctrl
= 0;
4819 current_link_up
= 0;
4821 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4822 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4824 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4827 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4828 /* preserve bits 20-23 for voltage regulator */
4829 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4832 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4834 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4835 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4837 u32 val
= serdes_cfg
;
4843 tw32_f(MAC_SERDES_CFG
, val
);
4846 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4848 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4849 tg3_setup_flow_control(tp
, 0, 0);
4850 current_link_up
= 1;
4855 /* Want auto-negotiation. */
4856 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4858 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4859 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4860 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4861 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4862 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4864 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4865 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4866 tp
->serdes_counter
&&
4867 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4868 MAC_STATUS_RCVD_CFG
)) ==
4869 MAC_STATUS_PCS_SYNCED
)) {
4870 tp
->serdes_counter
--;
4871 current_link_up
= 1;
4876 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4877 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4879 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4881 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4882 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4883 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4884 MAC_STATUS_SIGNAL_DET
)) {
4885 sg_dig_status
= tr32(SG_DIG_STATUS
);
4886 mac_status
= tr32(MAC_STATUS
);
4888 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4889 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4890 u32 local_adv
= 0, remote_adv
= 0;
4892 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4893 local_adv
|= ADVERTISE_1000XPAUSE
;
4894 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4895 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4897 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4898 remote_adv
|= LPA_1000XPAUSE
;
4899 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4900 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4902 tp
->link_config
.rmt_adv
=
4903 mii_adv_to_ethtool_adv_x(remote_adv
);
4905 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4906 current_link_up
= 1;
4907 tp
->serdes_counter
= 0;
4908 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4909 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4910 if (tp
->serdes_counter
)
4911 tp
->serdes_counter
--;
4914 u32 val
= serdes_cfg
;
4921 tw32_f(MAC_SERDES_CFG
, val
);
4924 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4927 /* Link parallel detection - link is up */
4928 /* only if we have PCS_SYNC and not */
4929 /* receiving config code words */
4930 mac_status
= tr32(MAC_STATUS
);
4931 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4932 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4933 tg3_setup_flow_control(tp
, 0, 0);
4934 current_link_up
= 1;
4936 TG3_PHYFLG_PARALLEL_DETECT
;
4937 tp
->serdes_counter
=
4938 SERDES_PARALLEL_DET_TIMEOUT
;
4940 goto restart_autoneg
;
4944 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4945 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4949 return current_link_up
;
4952 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4954 int current_link_up
= 0;
4956 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4959 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4960 u32 txflags
, rxflags
;
4963 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4964 u32 local_adv
= 0, remote_adv
= 0;
4966 if (txflags
& ANEG_CFG_PS1
)
4967 local_adv
|= ADVERTISE_1000XPAUSE
;
4968 if (txflags
& ANEG_CFG_PS2
)
4969 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4971 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4972 remote_adv
|= LPA_1000XPAUSE
;
4973 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4974 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4976 tp
->link_config
.rmt_adv
=
4977 mii_adv_to_ethtool_adv_x(remote_adv
);
4979 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4981 current_link_up
= 1;
4983 for (i
= 0; i
< 30; i
++) {
4986 (MAC_STATUS_SYNC_CHANGED
|
4987 MAC_STATUS_CFG_CHANGED
));
4989 if ((tr32(MAC_STATUS
) &
4990 (MAC_STATUS_SYNC_CHANGED
|
4991 MAC_STATUS_CFG_CHANGED
)) == 0)
4995 mac_status
= tr32(MAC_STATUS
);
4996 if (current_link_up
== 0 &&
4997 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4998 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4999 current_link_up
= 1;
5001 tg3_setup_flow_control(tp
, 0, 0);
5003 /* Forcing 1000FD link up. */
5004 current_link_up
= 1;
5006 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5009 tw32_f(MAC_MODE
, tp
->mac_mode
);
5014 return current_link_up
;
5017 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5020 u16 orig_active_speed
;
5021 u8 orig_active_duplex
;
5023 int current_link_up
;
5026 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5027 orig_active_speed
= tp
->link_config
.active_speed
;
5028 orig_active_duplex
= tp
->link_config
.active_duplex
;
5030 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5031 netif_carrier_ok(tp
->dev
) &&
5032 tg3_flag(tp
, INIT_COMPLETE
)) {
5033 mac_status
= tr32(MAC_STATUS
);
5034 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5035 MAC_STATUS_SIGNAL_DET
|
5036 MAC_STATUS_CFG_CHANGED
|
5037 MAC_STATUS_RCVD_CFG
);
5038 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5039 MAC_STATUS_SIGNAL_DET
)) {
5040 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5041 MAC_STATUS_CFG_CHANGED
));
5046 tw32_f(MAC_TX_AUTO_NEG
, 0);
5048 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5049 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5050 tw32_f(MAC_MODE
, tp
->mac_mode
);
5053 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5054 tg3_init_bcm8002(tp
);
5056 /* Enable link change event even when serdes polling. */
5057 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5060 current_link_up
= 0;
5061 tp
->link_config
.rmt_adv
= 0;
5062 mac_status
= tr32(MAC_STATUS
);
5064 if (tg3_flag(tp
, HW_AUTONEG
))
5065 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5067 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5069 tp
->napi
[0].hw_status
->status
=
5070 (SD_STATUS_UPDATED
|
5071 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5073 for (i
= 0; i
< 100; i
++) {
5074 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5075 MAC_STATUS_CFG_CHANGED
));
5077 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5078 MAC_STATUS_CFG_CHANGED
|
5079 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5083 mac_status
= tr32(MAC_STATUS
);
5084 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5085 current_link_up
= 0;
5086 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5087 tp
->serdes_counter
== 0) {
5088 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5089 MAC_MODE_SEND_CONFIGS
));
5091 tw32_f(MAC_MODE
, tp
->mac_mode
);
5095 if (current_link_up
== 1) {
5096 tp
->link_config
.active_speed
= SPEED_1000
;
5097 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5098 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5099 LED_CTRL_LNKLED_OVERRIDE
|
5100 LED_CTRL_1000MBPS_ON
));
5102 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5103 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5104 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5105 LED_CTRL_LNKLED_OVERRIDE
|
5106 LED_CTRL_TRAFFIC_OVERRIDE
));
5109 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5110 if (current_link_up
)
5111 netif_carrier_on(tp
->dev
);
5113 netif_carrier_off(tp
->dev
);
5114 tg3_link_report(tp
);
5116 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5117 if (orig_pause_cfg
!= now_pause_cfg
||
5118 orig_active_speed
!= tp
->link_config
.active_speed
||
5119 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5120 tg3_link_report(tp
);
5126 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5128 int current_link_up
, err
= 0;
5132 u32 local_adv
, remote_adv
;
5134 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5135 tw32_f(MAC_MODE
, tp
->mac_mode
);
5141 (MAC_STATUS_SYNC_CHANGED
|
5142 MAC_STATUS_CFG_CHANGED
|
5143 MAC_STATUS_MI_COMPLETION
|
5144 MAC_STATUS_LNKSTATE_CHANGED
));
5150 current_link_up
= 0;
5151 current_speed
= SPEED_UNKNOWN
;
5152 current_duplex
= DUPLEX_UNKNOWN
;
5153 tp
->link_config
.rmt_adv
= 0;
5155 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5156 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
5158 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5159 bmsr
|= BMSR_LSTATUS
;
5161 bmsr
&= ~BMSR_LSTATUS
;
5164 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5166 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5167 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5168 /* do nothing, just check for link up at the end */
5169 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5172 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5173 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5174 ADVERTISE_1000XPAUSE
|
5175 ADVERTISE_1000XPSE_ASYM
|
5178 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5179 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5181 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5182 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5183 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5184 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5186 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5187 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5188 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5195 bmcr
&= ~BMCR_SPEED1000
;
5196 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5198 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5199 new_bmcr
|= BMCR_FULLDPLX
;
5201 if (new_bmcr
!= bmcr
) {
5202 /* BMCR_SPEED1000 is a reserved bit that needs
5203 * to be set on write.
5205 new_bmcr
|= BMCR_SPEED1000
;
5207 /* Force a linkdown */
5208 if (netif_carrier_ok(tp
->dev
)) {
5211 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5212 adv
&= ~(ADVERTISE_1000XFULL
|
5213 ADVERTISE_1000XHALF
|
5215 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5216 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5220 netif_carrier_off(tp
->dev
);
5222 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5224 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5225 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5226 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
5228 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5229 bmsr
|= BMSR_LSTATUS
;
5231 bmsr
&= ~BMSR_LSTATUS
;
5233 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5237 if (bmsr
& BMSR_LSTATUS
) {
5238 current_speed
= SPEED_1000
;
5239 current_link_up
= 1;
5240 if (bmcr
& BMCR_FULLDPLX
)
5241 current_duplex
= DUPLEX_FULL
;
5243 current_duplex
= DUPLEX_HALF
;
5248 if (bmcr
& BMCR_ANENABLE
) {
5251 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5252 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5253 common
= local_adv
& remote_adv
;
5254 if (common
& (ADVERTISE_1000XHALF
|
5255 ADVERTISE_1000XFULL
)) {
5256 if (common
& ADVERTISE_1000XFULL
)
5257 current_duplex
= DUPLEX_FULL
;
5259 current_duplex
= DUPLEX_HALF
;
5261 tp
->link_config
.rmt_adv
=
5262 mii_adv_to_ethtool_adv_x(remote_adv
);
5263 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5264 /* Link is up via parallel detect */
5266 current_link_up
= 0;
5271 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5272 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5274 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5275 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5276 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5278 tw32_f(MAC_MODE
, tp
->mac_mode
);
5281 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5283 tp
->link_config
.active_speed
= current_speed
;
5284 tp
->link_config
.active_duplex
= current_duplex
;
5286 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5287 if (current_link_up
)
5288 netif_carrier_on(tp
->dev
);
5290 netif_carrier_off(tp
->dev
);
5291 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5293 tg3_link_report(tp
);
5298 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5300 if (tp
->serdes_counter
) {
5301 /* Give autoneg time to complete. */
5302 tp
->serdes_counter
--;
5306 if (!netif_carrier_ok(tp
->dev
) &&
5307 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5310 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5311 if (bmcr
& BMCR_ANENABLE
) {
5314 /* Select shadow register 0x1f */
5315 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5316 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5318 /* Select expansion interrupt status register */
5319 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5320 MII_TG3_DSP_EXP1_INT_STAT
);
5321 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5322 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5324 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5325 /* We have signal detect and not receiving
5326 * config code words, link is up by parallel
5330 bmcr
&= ~BMCR_ANENABLE
;
5331 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5332 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5333 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5336 } else if (netif_carrier_ok(tp
->dev
) &&
5337 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5338 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5341 /* Select expansion interrupt status register */
5342 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5343 MII_TG3_DSP_EXP1_INT_STAT
);
5344 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5348 /* Config code words received, turn on autoneg. */
5349 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5350 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5352 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5358 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5363 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5364 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5365 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5366 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5368 err
= tg3_setup_copper_phy(tp
, force_reset
);
5370 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5373 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5374 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5376 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5381 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5382 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5383 tw32(GRC_MISC_CFG
, val
);
5386 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5387 (6 << TX_LENGTHS_IPG_SHIFT
);
5388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5389 val
|= tr32(MAC_TX_LENGTHS
) &
5390 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5391 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5393 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5394 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5395 tw32(MAC_TX_LENGTHS
, val
|
5396 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5398 tw32(MAC_TX_LENGTHS
, val
|
5399 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5401 if (!tg3_flag(tp
, 5705_PLUS
)) {
5402 if (netif_carrier_ok(tp
->dev
)) {
5403 tw32(HOSTCC_STAT_COAL_TICKS
,
5404 tp
->coal
.stats_block_coalesce_usecs
);
5406 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5410 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5411 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5412 if (!netif_carrier_ok(tp
->dev
))
5413 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5416 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5417 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5423 static inline int tg3_irq_sync(struct tg3
*tp
)
5425 return tp
->irq_sync
;
5428 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5432 dst
= (u32
*)((u8
*)dst
+ off
);
5433 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5434 *dst
++ = tr32(off
+ i
);
5437 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5439 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5440 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5441 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5442 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5443 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5444 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5445 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5446 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5447 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5448 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5449 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5450 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5451 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5452 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5453 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5454 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5455 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5456 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5457 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5459 if (tg3_flag(tp
, SUPPORT_MSIX
))
5460 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5462 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5463 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5464 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5465 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5466 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5467 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5468 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5469 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5471 if (!tg3_flag(tp
, 5705_PLUS
)) {
5472 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5473 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5474 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5477 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5478 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5479 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5480 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5481 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5483 if (tg3_flag(tp
, NVRAM
))
5484 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5487 static void tg3_dump_state(struct tg3
*tp
)
5492 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5494 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5498 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5499 /* Read up to but not including private PCI registers */
5500 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5501 regs
[i
/ sizeof(u32
)] = tr32(i
);
5503 tg3_dump_legacy_regs(tp
, regs
);
5505 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5506 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5507 !regs
[i
+ 2] && !regs
[i
+ 3])
5510 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5512 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5517 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5518 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5520 /* SW status block */
5522 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5524 tnapi
->hw_status
->status
,
5525 tnapi
->hw_status
->status_tag
,
5526 tnapi
->hw_status
->rx_jumbo_consumer
,
5527 tnapi
->hw_status
->rx_consumer
,
5528 tnapi
->hw_status
->rx_mini_consumer
,
5529 tnapi
->hw_status
->idx
[0].rx_producer
,
5530 tnapi
->hw_status
->idx
[0].tx_consumer
);
5533 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5535 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5536 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5538 tnapi
->prodring
.rx_std_prod_idx
,
5539 tnapi
->prodring
.rx_std_cons_idx
,
5540 tnapi
->prodring
.rx_jmb_prod_idx
,
5541 tnapi
->prodring
.rx_jmb_cons_idx
);
5545 /* This is called whenever we suspect that the system chipset is re-
5546 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5547 * is bogus tx completions. We try to recover by setting the
5548 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5551 static void tg3_tx_recover(struct tg3
*tp
)
5553 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5554 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5556 netdev_warn(tp
->dev
,
5557 "The system may be re-ordering memory-mapped I/O "
5558 "cycles to the network device, attempting to recover. "
5559 "Please report the problem to the driver maintainer "
5560 "and include system chipset information.\n");
5562 spin_lock(&tp
->lock
);
5563 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5564 spin_unlock(&tp
->lock
);
5567 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5569 /* Tell compiler to fetch tx indices from memory. */
5571 return tnapi
->tx_pending
-
5572 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5575 /* Tigon3 never reports partial packet sends. So we do not
5576 * need special logic to handle SKBs that have not had all
5577 * of their frags sent yet, like SunGEM does.
5579 static void tg3_tx(struct tg3_napi
*tnapi
)
5581 struct tg3
*tp
= tnapi
->tp
;
5582 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5583 u32 sw_idx
= tnapi
->tx_cons
;
5584 struct netdev_queue
*txq
;
5585 int index
= tnapi
- tp
->napi
;
5586 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5588 if (tg3_flag(tp
, ENABLE_TSS
))
5591 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5593 while (sw_idx
!= hw_idx
) {
5594 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5595 struct sk_buff
*skb
= ri
->skb
;
5598 if (unlikely(skb
== NULL
)) {
5603 pci_unmap_single(tp
->pdev
,
5604 dma_unmap_addr(ri
, mapping
),
5610 while (ri
->fragmented
) {
5611 ri
->fragmented
= false;
5612 sw_idx
= NEXT_TX(sw_idx
);
5613 ri
= &tnapi
->tx_buffers
[sw_idx
];
5616 sw_idx
= NEXT_TX(sw_idx
);
5618 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5619 ri
= &tnapi
->tx_buffers
[sw_idx
];
5620 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5623 pci_unmap_page(tp
->pdev
,
5624 dma_unmap_addr(ri
, mapping
),
5625 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5628 while (ri
->fragmented
) {
5629 ri
->fragmented
= false;
5630 sw_idx
= NEXT_TX(sw_idx
);
5631 ri
= &tnapi
->tx_buffers
[sw_idx
];
5634 sw_idx
= NEXT_TX(sw_idx
);
5638 bytes_compl
+= skb
->len
;
5642 if (unlikely(tx_bug
)) {
5648 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
5650 tnapi
->tx_cons
= sw_idx
;
5652 /* Need to make the tx_cons update visible to tg3_start_xmit()
5653 * before checking for netif_queue_stopped(). Without the
5654 * memory barrier, there is a small possibility that tg3_start_xmit()
5655 * will miss it and cause the queue to be stopped forever.
5659 if (unlikely(netif_tx_queue_stopped(txq
) &&
5660 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5661 __netif_tx_lock(txq
, smp_processor_id());
5662 if (netif_tx_queue_stopped(txq
) &&
5663 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5664 netif_tx_wake_queue(txq
);
5665 __netif_tx_unlock(txq
);
5669 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5674 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5675 map_sz
, PCI_DMA_FROMDEVICE
);
5680 /* Returns size of skb allocated or < 0 on error.
5682 * We only need to fill in the address because the other members
5683 * of the RX descriptor are invariant, see tg3_init_rings.
5685 * Note the purposeful assymetry of cpu vs. chip accesses. For
5686 * posting buffers we only dirty the first cache line of the RX
5687 * descriptor (containing the address). Whereas for the RX status
5688 * buffers the cpu only reads the last cacheline of the RX descriptor
5689 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5691 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5692 u32 opaque_key
, u32 dest_idx_unmasked
)
5694 struct tg3_rx_buffer_desc
*desc
;
5695 struct ring_info
*map
;
5698 int skb_size
, data_size
, dest_idx
;
5700 switch (opaque_key
) {
5701 case RXD_OPAQUE_RING_STD
:
5702 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5703 desc
= &tpr
->rx_std
[dest_idx
];
5704 map
= &tpr
->rx_std_buffers
[dest_idx
];
5705 data_size
= tp
->rx_pkt_map_sz
;
5708 case RXD_OPAQUE_RING_JUMBO
:
5709 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5710 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5711 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5712 data_size
= TG3_RX_JMB_MAP_SZ
;
5719 /* Do not overwrite any of the map or rp information
5720 * until we are sure we can commit to a new buffer.
5722 * Callers depend upon this behavior and assume that
5723 * we leave everything unchanged if we fail.
5725 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
5726 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5727 data
= kmalloc(skb_size
, GFP_ATOMIC
);
5731 mapping
= pci_map_single(tp
->pdev
,
5732 data
+ TG3_RX_OFFSET(tp
),
5734 PCI_DMA_FROMDEVICE
);
5735 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5741 dma_unmap_addr_set(map
, mapping
, mapping
);
5743 desc
->addr_hi
= ((u64
)mapping
>> 32);
5744 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5749 /* We only need to move over in the address because the other
5750 * members of the RX descriptor are invariant. See notes above
5751 * tg3_alloc_rx_data for full details.
5753 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5754 struct tg3_rx_prodring_set
*dpr
,
5755 u32 opaque_key
, int src_idx
,
5756 u32 dest_idx_unmasked
)
5758 struct tg3
*tp
= tnapi
->tp
;
5759 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5760 struct ring_info
*src_map
, *dest_map
;
5761 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5764 switch (opaque_key
) {
5765 case RXD_OPAQUE_RING_STD
:
5766 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5767 dest_desc
= &dpr
->rx_std
[dest_idx
];
5768 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5769 src_desc
= &spr
->rx_std
[src_idx
];
5770 src_map
= &spr
->rx_std_buffers
[src_idx
];
5773 case RXD_OPAQUE_RING_JUMBO
:
5774 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5775 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5776 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5777 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5778 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5785 dest_map
->data
= src_map
->data
;
5786 dma_unmap_addr_set(dest_map
, mapping
,
5787 dma_unmap_addr(src_map
, mapping
));
5788 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5789 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5791 /* Ensure that the update to the skb happens after the physical
5792 * addresses have been transferred to the new BD location.
5796 src_map
->data
= NULL
;
5799 /* The RX ring scheme is composed of multiple rings which post fresh
5800 * buffers to the chip, and one special ring the chip uses to report
5801 * status back to the host.
5803 * The special ring reports the status of received packets to the
5804 * host. The chip does not write into the original descriptor the
5805 * RX buffer was obtained from. The chip simply takes the original
5806 * descriptor as provided by the host, updates the status and length
5807 * field, then writes this into the next status ring entry.
5809 * Each ring the host uses to post buffers to the chip is described
5810 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5811 * it is first placed into the on-chip ram. When the packet's length
5812 * is known, it walks down the TG3_BDINFO entries to select the ring.
5813 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5814 * which is within the range of the new packet's length is chosen.
5816 * The "separate ring for rx status" scheme may sound queer, but it makes
5817 * sense from a cache coherency perspective. If only the host writes
5818 * to the buffer post rings, and only the chip writes to the rx status
5819 * rings, then cache lines never move beyond shared-modified state.
5820 * If both the host and chip were to write into the same ring, cache line
5821 * eviction could occur since both entities want it in an exclusive state.
5823 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5825 struct tg3
*tp
= tnapi
->tp
;
5826 u32 work_mask
, rx_std_posted
= 0;
5827 u32 std_prod_idx
, jmb_prod_idx
;
5828 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5831 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5833 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5835 * We need to order the read of hw_idx and the read of
5836 * the opaque cookie.
5841 std_prod_idx
= tpr
->rx_std_prod_idx
;
5842 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5843 while (sw_idx
!= hw_idx
&& budget
> 0) {
5844 struct ring_info
*ri
;
5845 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5847 struct sk_buff
*skb
;
5848 dma_addr_t dma_addr
;
5849 u32 opaque_key
, desc_idx
, *post_ptr
;
5852 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5853 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5854 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5855 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5856 dma_addr
= dma_unmap_addr(ri
, mapping
);
5858 post_ptr
= &std_prod_idx
;
5860 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5861 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5862 dma_addr
= dma_unmap_addr(ri
, mapping
);
5864 post_ptr
= &jmb_prod_idx
;
5866 goto next_pkt_nopost
;
5868 work_mask
|= opaque_key
;
5870 if (desc
->err_vlan
& RXD_ERR_MASK
) {
5872 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5873 desc_idx
, *post_ptr
);
5875 /* Other statistics kept track of by card. */
5880 prefetch(data
+ TG3_RX_OFFSET(tp
));
5881 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5884 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5887 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
5892 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5893 PCI_DMA_FROMDEVICE
);
5895 skb
= build_skb(data
);
5898 goto drop_it_no_recycle
;
5900 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5901 /* Ensure that the update to the data happens
5902 * after the usage of the old DMA mapping.
5909 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5910 desc_idx
, *post_ptr
);
5912 skb
= netdev_alloc_skb(tp
->dev
,
5913 len
+ TG3_RAW_IP_ALIGN
);
5915 goto drop_it_no_recycle
;
5917 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
5918 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5920 data
+ TG3_RX_OFFSET(tp
),
5922 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5926 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5927 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5928 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5929 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5930 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5932 skb_checksum_none_assert(skb
);
5934 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5936 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5937 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5939 goto drop_it_no_recycle
;
5942 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5943 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5944 __vlan_hwaccel_put_tag(skb
,
5945 desc
->err_vlan
& RXD_VLAN_MASK
);
5947 napi_gro_receive(&tnapi
->napi
, skb
);
5955 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5956 tpr
->rx_std_prod_idx
= std_prod_idx
&
5957 tp
->rx_std_ring_mask
;
5958 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5959 tpr
->rx_std_prod_idx
);
5960 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5965 sw_idx
&= tp
->rx_ret_ring_mask
;
5967 /* Refresh hw_idx to see if there is new work */
5968 if (sw_idx
== hw_idx
) {
5969 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5974 /* ACK the status ring. */
5975 tnapi
->rx_rcb_ptr
= sw_idx
;
5976 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5978 /* Refill RX ring(s). */
5979 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5980 /* Sync BD data before updating mailbox */
5983 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5984 tpr
->rx_std_prod_idx
= std_prod_idx
&
5985 tp
->rx_std_ring_mask
;
5986 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5987 tpr
->rx_std_prod_idx
);
5989 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5990 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5991 tp
->rx_jmb_ring_mask
;
5992 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5993 tpr
->rx_jmb_prod_idx
);
5996 } else if (work_mask
) {
5997 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5998 * updated before the producer indices can be updated.
6002 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6003 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6005 if (tnapi
!= &tp
->napi
[1]) {
6006 tp
->rx_refill
= true;
6007 napi_schedule(&tp
->napi
[1].napi
);
6014 static void tg3_poll_link(struct tg3
*tp
)
6016 /* handle link change and other phy events */
6017 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6018 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6020 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6021 sblk
->status
= SD_STATUS_UPDATED
|
6022 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6023 spin_lock(&tp
->lock
);
6024 if (tg3_flag(tp
, USE_PHYLIB
)) {
6026 (MAC_STATUS_SYNC_CHANGED
|
6027 MAC_STATUS_CFG_CHANGED
|
6028 MAC_STATUS_MI_COMPLETION
|
6029 MAC_STATUS_LNKSTATE_CHANGED
));
6032 tg3_setup_phy(tp
, 0);
6033 spin_unlock(&tp
->lock
);
6038 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6039 struct tg3_rx_prodring_set
*dpr
,
6040 struct tg3_rx_prodring_set
*spr
)
6042 u32 si
, di
, cpycnt
, src_prod_idx
;
6046 src_prod_idx
= spr
->rx_std_prod_idx
;
6048 /* Make sure updates to the rx_std_buffers[] entries and the
6049 * standard producer index are seen in the correct order.
6053 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6056 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6057 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6059 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6060 spr
->rx_std_cons_idx
;
6062 cpycnt
= min(cpycnt
,
6063 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6065 si
= spr
->rx_std_cons_idx
;
6066 di
= dpr
->rx_std_prod_idx
;
6068 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6069 if (dpr
->rx_std_buffers
[i
].data
) {
6079 /* Ensure that updates to the rx_std_buffers ring and the
6080 * shadowed hardware producer ring from tg3_recycle_skb() are
6081 * ordered correctly WRT the skb check above.
6085 memcpy(&dpr
->rx_std_buffers
[di
],
6086 &spr
->rx_std_buffers
[si
],
6087 cpycnt
* sizeof(struct ring_info
));
6089 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6090 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6091 sbd
= &spr
->rx_std
[si
];
6092 dbd
= &dpr
->rx_std
[di
];
6093 dbd
->addr_hi
= sbd
->addr_hi
;
6094 dbd
->addr_lo
= sbd
->addr_lo
;
6097 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6098 tp
->rx_std_ring_mask
;
6099 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6100 tp
->rx_std_ring_mask
;
6104 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6106 /* Make sure updates to the rx_jmb_buffers[] entries and
6107 * the jumbo producer index are seen in the correct order.
6111 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6114 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6115 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6117 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6118 spr
->rx_jmb_cons_idx
;
6120 cpycnt
= min(cpycnt
,
6121 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6123 si
= spr
->rx_jmb_cons_idx
;
6124 di
= dpr
->rx_jmb_prod_idx
;
6126 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6127 if (dpr
->rx_jmb_buffers
[i
].data
) {
6137 /* Ensure that updates to the rx_jmb_buffers ring and the
6138 * shadowed hardware producer ring from tg3_recycle_skb() are
6139 * ordered correctly WRT the skb check above.
6143 memcpy(&dpr
->rx_jmb_buffers
[di
],
6144 &spr
->rx_jmb_buffers
[si
],
6145 cpycnt
* sizeof(struct ring_info
));
6147 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6148 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6149 sbd
= &spr
->rx_jmb
[si
].std
;
6150 dbd
= &dpr
->rx_jmb
[di
].std
;
6151 dbd
->addr_hi
= sbd
->addr_hi
;
6152 dbd
->addr_lo
= sbd
->addr_lo
;
6155 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6156 tp
->rx_jmb_ring_mask
;
6157 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6158 tp
->rx_jmb_ring_mask
;
6164 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6166 struct tg3
*tp
= tnapi
->tp
;
6168 /* run TX completion thread */
6169 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6171 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6175 if (!tnapi
->rx_rcb_prod_idx
)
6178 /* run RX thread, within the bounds set by NAPI.
6179 * All RX "locking" is done by ensuring outside
6180 * code synchronizes with tg3->napi.poll()
6182 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6183 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6185 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6186 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6188 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6189 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6191 tp
->rx_refill
= false;
6192 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6193 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6194 &tp
->napi
[i
].prodring
);
6198 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6199 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6200 dpr
->rx_std_prod_idx
);
6202 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6203 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6204 dpr
->rx_jmb_prod_idx
);
6209 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6215 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6217 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6218 schedule_work(&tp
->reset_task
);
6221 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6223 cancel_work_sync(&tp
->reset_task
);
6224 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6225 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6228 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6230 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6231 struct tg3
*tp
= tnapi
->tp
;
6233 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6236 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6238 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6241 if (unlikely(work_done
>= budget
))
6244 /* tp->last_tag is used in tg3_int_reenable() below
6245 * to tell the hw how much work has been processed,
6246 * so we must read it before checking for more work.
6248 tnapi
->last_tag
= sblk
->status_tag
;
6249 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6252 /* check for RX/TX work to do */
6253 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6254 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6256 /* This test here is not race free, but will reduce
6257 * the number of interrupts by looping again.
6259 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6262 napi_complete(napi
);
6263 /* Reenable interrupts. */
6264 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6266 /* This test here is synchronized by napi_schedule()
6267 * and napi_complete() to close the race condition.
6269 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6270 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6271 HOSTCC_MODE_ENABLE
|
6282 /* work_done is guaranteed to be less than budget. */
6283 napi_complete(napi
);
6284 tg3_reset_task_schedule(tp
);
6288 static void tg3_process_error(struct tg3
*tp
)
6291 bool real_error
= false;
6293 if (tg3_flag(tp
, ERROR_PROCESSED
))
6296 /* Check Flow Attention register */
6297 val
= tr32(HOSTCC_FLOW_ATTN
);
6298 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6299 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6303 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6304 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6308 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6309 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6318 tg3_flag_set(tp
, ERROR_PROCESSED
);
6319 tg3_reset_task_schedule(tp
);
6322 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6324 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6325 struct tg3
*tp
= tnapi
->tp
;
6327 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6330 if (sblk
->status
& SD_STATUS_ERROR
)
6331 tg3_process_error(tp
);
6335 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6337 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6340 if (unlikely(work_done
>= budget
))
6343 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6344 /* tp->last_tag is used in tg3_int_reenable() below
6345 * to tell the hw how much work has been processed,
6346 * so we must read it before checking for more work.
6348 tnapi
->last_tag
= sblk
->status_tag
;
6349 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6352 sblk
->status
&= ~SD_STATUS_UPDATED
;
6354 if (likely(!tg3_has_work(tnapi
))) {
6355 napi_complete(napi
);
6356 tg3_int_reenable(tnapi
);
6364 /* work_done is guaranteed to be less than budget. */
6365 napi_complete(napi
);
6366 tg3_reset_task_schedule(tp
);
6370 static void tg3_napi_disable(struct tg3
*tp
)
6374 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6375 napi_disable(&tp
->napi
[i
].napi
);
6378 static void tg3_napi_enable(struct tg3
*tp
)
6382 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6383 napi_enable(&tp
->napi
[i
].napi
);
6386 static void tg3_napi_init(struct tg3
*tp
)
6390 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6391 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6392 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6395 static void tg3_napi_fini(struct tg3
*tp
)
6399 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6400 netif_napi_del(&tp
->napi
[i
].napi
);
6403 static inline void tg3_netif_stop(struct tg3
*tp
)
6405 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6406 tg3_napi_disable(tp
);
6407 netif_tx_disable(tp
->dev
);
6410 static inline void tg3_netif_start(struct tg3
*tp
)
6412 /* NOTE: unconditional netif_tx_wake_all_queues is only
6413 * appropriate so long as all callers are assured to
6414 * have free tx slots (such as after tg3_init_hw)
6416 netif_tx_wake_all_queues(tp
->dev
);
6418 tg3_napi_enable(tp
);
6419 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6420 tg3_enable_ints(tp
);
6423 static void tg3_irq_quiesce(struct tg3
*tp
)
6427 BUG_ON(tp
->irq_sync
);
6432 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6433 synchronize_irq(tp
->napi
[i
].irq_vec
);
6436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6437 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6438 * with as well. Most of the time, this is not necessary except when
6439 * shutting down the device.
6441 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6443 spin_lock_bh(&tp
->lock
);
6445 tg3_irq_quiesce(tp
);
6448 static inline void tg3_full_unlock(struct tg3
*tp
)
6450 spin_unlock_bh(&tp
->lock
);
6453 /* One-shot MSI handler - Chip automatically disables interrupt
6454 * after sending MSI so driver doesn't have to do it.
6456 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6458 struct tg3_napi
*tnapi
= dev_id
;
6459 struct tg3
*tp
= tnapi
->tp
;
6461 prefetch(tnapi
->hw_status
);
6463 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6465 if (likely(!tg3_irq_sync(tp
)))
6466 napi_schedule(&tnapi
->napi
);
6471 /* MSI ISR - No need to check for interrupt sharing and no need to
6472 * flush status block and interrupt mailbox. PCI ordering rules
6473 * guarantee that MSI will arrive after the status block.
6475 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6477 struct tg3_napi
*tnapi
= dev_id
;
6478 struct tg3
*tp
= tnapi
->tp
;
6480 prefetch(tnapi
->hw_status
);
6482 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6484 * Writing any value to intr-mbox-0 clears PCI INTA# and
6485 * chip-internal interrupt pending events.
6486 * Writing non-zero to intr-mbox-0 additional tells the
6487 * NIC to stop sending us irqs, engaging "in-intr-handler"
6490 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6491 if (likely(!tg3_irq_sync(tp
)))
6492 napi_schedule(&tnapi
->napi
);
6494 return IRQ_RETVAL(1);
6497 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6499 struct tg3_napi
*tnapi
= dev_id
;
6500 struct tg3
*tp
= tnapi
->tp
;
6501 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6502 unsigned int handled
= 1;
6504 /* In INTx mode, it is possible for the interrupt to arrive at
6505 * the CPU before the status block posted prior to the interrupt.
6506 * Reading the PCI State register will confirm whether the
6507 * interrupt is ours and will flush the status block.
6509 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6510 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6511 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6518 * Writing any value to intr-mbox-0 clears PCI INTA# and
6519 * chip-internal interrupt pending events.
6520 * Writing non-zero to intr-mbox-0 additional tells the
6521 * NIC to stop sending us irqs, engaging "in-intr-handler"
6524 * Flush the mailbox to de-assert the IRQ immediately to prevent
6525 * spurious interrupts. The flush impacts performance but
6526 * excessive spurious interrupts can be worse in some cases.
6528 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6529 if (tg3_irq_sync(tp
))
6531 sblk
->status
&= ~SD_STATUS_UPDATED
;
6532 if (likely(tg3_has_work(tnapi
))) {
6533 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6534 napi_schedule(&tnapi
->napi
);
6536 /* No work, shared interrupt perhaps? re-enable
6537 * interrupts, and flush that PCI write
6539 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6543 return IRQ_RETVAL(handled
);
6546 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6548 struct tg3_napi
*tnapi
= dev_id
;
6549 struct tg3
*tp
= tnapi
->tp
;
6550 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6551 unsigned int handled
= 1;
6553 /* In INTx mode, it is possible for the interrupt to arrive at
6554 * the CPU before the status block posted prior to the interrupt.
6555 * Reading the PCI State register will confirm whether the
6556 * interrupt is ours and will flush the status block.
6558 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6559 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6560 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6567 * writing any value to intr-mbox-0 clears PCI INTA# and
6568 * chip-internal interrupt pending events.
6569 * writing non-zero to intr-mbox-0 additional tells the
6570 * NIC to stop sending us irqs, engaging "in-intr-handler"
6573 * Flush the mailbox to de-assert the IRQ immediately to prevent
6574 * spurious interrupts. The flush impacts performance but
6575 * excessive spurious interrupts can be worse in some cases.
6577 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6580 * In a shared interrupt configuration, sometimes other devices'
6581 * interrupts will scream. We record the current status tag here
6582 * so that the above check can report that the screaming interrupts
6583 * are unhandled. Eventually they will be silenced.
6585 tnapi
->last_irq_tag
= sblk
->status_tag
;
6587 if (tg3_irq_sync(tp
))
6590 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6592 napi_schedule(&tnapi
->napi
);
6595 return IRQ_RETVAL(handled
);
6598 /* ISR for interrupt test */
6599 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6601 struct tg3_napi
*tnapi
= dev_id
;
6602 struct tg3
*tp
= tnapi
->tp
;
6603 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6605 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6606 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6607 tg3_disable_ints(tp
);
6608 return IRQ_RETVAL(1);
6610 return IRQ_RETVAL(0);
6613 #ifdef CONFIG_NET_POLL_CONTROLLER
6614 static void tg3_poll_controller(struct net_device
*dev
)
6617 struct tg3
*tp
= netdev_priv(dev
);
6619 if (tg3_irq_sync(tp
))
6622 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6623 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6627 static void tg3_tx_timeout(struct net_device
*dev
)
6629 struct tg3
*tp
= netdev_priv(dev
);
6631 if (netif_msg_tx_err(tp
)) {
6632 netdev_err(dev
, "transmit timed out, resetting\n");
6636 tg3_reset_task_schedule(tp
);
6639 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6640 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6642 u32 base
= (u32
) mapping
& 0xffffffff;
6644 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6647 /* Test for DMA addresses > 40-bit */
6648 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6651 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6652 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6653 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6660 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6661 dma_addr_t mapping
, u32 len
, u32 flags
,
6664 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6665 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6666 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6667 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6670 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6671 dma_addr_t map
, u32 len
, u32 flags
,
6674 struct tg3
*tp
= tnapi
->tp
;
6677 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6680 if (tg3_4g_overflow_test(map
, len
))
6683 if (tg3_40bit_overflow_test(tp
, map
, len
))
6686 if (tp
->dma_limit
) {
6687 u32 prvidx
= *entry
;
6688 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6689 while (len
> tp
->dma_limit
&& *budget
) {
6690 u32 frag_len
= tp
->dma_limit
;
6691 len
-= tp
->dma_limit
;
6693 /* Avoid the 8byte DMA problem */
6695 len
+= tp
->dma_limit
/ 2;
6696 frag_len
= tp
->dma_limit
/ 2;
6699 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6701 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6702 frag_len
, tmp_flag
, mss
, vlan
);
6705 *entry
= NEXT_TX(*entry
);
6712 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6713 len
, flags
, mss
, vlan
);
6715 *entry
= NEXT_TX(*entry
);
6718 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6722 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6723 len
, flags
, mss
, vlan
);
6724 *entry
= NEXT_TX(*entry
);
6730 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6733 struct sk_buff
*skb
;
6734 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6739 pci_unmap_single(tnapi
->tp
->pdev
,
6740 dma_unmap_addr(txb
, mapping
),
6744 while (txb
->fragmented
) {
6745 txb
->fragmented
= false;
6746 entry
= NEXT_TX(entry
);
6747 txb
= &tnapi
->tx_buffers
[entry
];
6750 for (i
= 0; i
<= last
; i
++) {
6751 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6753 entry
= NEXT_TX(entry
);
6754 txb
= &tnapi
->tx_buffers
[entry
];
6756 pci_unmap_page(tnapi
->tp
->pdev
,
6757 dma_unmap_addr(txb
, mapping
),
6758 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6760 while (txb
->fragmented
) {
6761 txb
->fragmented
= false;
6762 entry
= NEXT_TX(entry
);
6763 txb
= &tnapi
->tx_buffers
[entry
];
6768 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6769 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6770 struct sk_buff
**pskb
,
6771 u32
*entry
, u32
*budget
,
6772 u32 base_flags
, u32 mss
, u32 vlan
)
6774 struct tg3
*tp
= tnapi
->tp
;
6775 struct sk_buff
*new_skb
, *skb
= *pskb
;
6776 dma_addr_t new_addr
= 0;
6779 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6780 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6782 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6784 new_skb
= skb_copy_expand(skb
,
6785 skb_headroom(skb
) + more_headroom
,
6786 skb_tailroom(skb
), GFP_ATOMIC
);
6792 /* New SKB is guaranteed to be linear. */
6793 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6795 /* Make sure the mapping succeeded */
6796 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6797 dev_kfree_skb(new_skb
);
6800 u32 save_entry
= *entry
;
6802 base_flags
|= TXD_FLAG_END
;
6804 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6805 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6808 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6809 new_skb
->len
, base_flags
,
6811 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6812 dev_kfree_skb(new_skb
);
6823 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6825 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6826 * TSO header is greater than 80 bytes.
6828 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6830 struct sk_buff
*segs
, *nskb
;
6831 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6833 /* Estimate the number of fragments in the worst case */
6834 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6835 netif_stop_queue(tp
->dev
);
6837 /* netif_tx_stop_queue() must be done before checking
6838 * checking tx index in tg3_tx_avail() below, because in
6839 * tg3_tx(), we update tx index before checking for
6840 * netif_tx_queue_stopped().
6843 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6844 return NETDEV_TX_BUSY
;
6846 netif_wake_queue(tp
->dev
);
6849 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6851 goto tg3_tso_bug_end
;
6857 tg3_start_xmit(nskb
, tp
->dev
);
6863 return NETDEV_TX_OK
;
6866 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6867 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6869 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6871 struct tg3
*tp
= netdev_priv(dev
);
6872 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6874 int i
= -1, would_hit_hwbug
;
6876 struct tg3_napi
*tnapi
;
6877 struct netdev_queue
*txq
;
6880 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6881 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6882 if (tg3_flag(tp
, ENABLE_TSS
))
6885 budget
= tg3_tx_avail(tnapi
);
6887 /* We are running in BH disabled context with netif_tx_lock
6888 * and TX reclaim runs via tp->napi.poll inside of a software
6889 * interrupt. Furthermore, IRQ processing runs lockless so we have
6890 * no IRQ context deadlocks to worry about either. Rejoice!
6892 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6893 if (!netif_tx_queue_stopped(txq
)) {
6894 netif_tx_stop_queue(txq
);
6896 /* This is a hard error, log it. */
6898 "BUG! Tx Ring full when queue awake!\n");
6900 return NETDEV_TX_BUSY
;
6903 entry
= tnapi
->tx_prod
;
6905 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6906 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6908 mss
= skb_shinfo(skb
)->gso_size
;
6911 u32 tcp_opt_len
, hdr_len
;
6913 if (skb_header_cloned(skb
) &&
6914 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6918 tcp_opt_len
= tcp_optlen(skb
);
6920 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
6922 if (!skb_is_gso_v6(skb
)) {
6924 iph
->tot_len
= htons(mss
+ hdr_len
);
6927 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6928 tg3_flag(tp
, TSO_BUG
))
6929 return tg3_tso_bug(tp
, skb
);
6931 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6932 TXD_FLAG_CPU_POST_DMA
);
6934 if (tg3_flag(tp
, HW_TSO_1
) ||
6935 tg3_flag(tp
, HW_TSO_2
) ||
6936 tg3_flag(tp
, HW_TSO_3
)) {
6937 tcp_hdr(skb
)->check
= 0;
6938 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6940 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6945 if (tg3_flag(tp
, HW_TSO_3
)) {
6946 mss
|= (hdr_len
& 0xc) << 12;
6948 base_flags
|= 0x00000010;
6949 base_flags
|= (hdr_len
& 0x3e0) << 5;
6950 } else if (tg3_flag(tp
, HW_TSO_2
))
6951 mss
|= hdr_len
<< 9;
6952 else if (tg3_flag(tp
, HW_TSO_1
) ||
6953 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6954 if (tcp_opt_len
|| iph
->ihl
> 5) {
6957 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6958 mss
|= (tsflags
<< 11);
6961 if (tcp_opt_len
|| iph
->ihl
> 5) {
6964 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6965 base_flags
|= tsflags
<< 12;
6970 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6971 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6972 base_flags
|= TXD_FLAG_JMB_PKT
;
6974 if (vlan_tx_tag_present(skb
)) {
6975 base_flags
|= TXD_FLAG_VLAN
;
6976 vlan
= vlan_tx_tag_get(skb
);
6979 len
= skb_headlen(skb
);
6981 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6982 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6986 tnapi
->tx_buffers
[entry
].skb
= skb
;
6987 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6989 would_hit_hwbug
= 0;
6991 if (tg3_flag(tp
, 5701_DMA_BUG
))
6992 would_hit_hwbug
= 1;
6994 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6995 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6997 would_hit_hwbug
= 1;
6998 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7001 if (!tg3_flag(tp
, HW_TSO_1
) &&
7002 !tg3_flag(tp
, HW_TSO_2
) &&
7003 !tg3_flag(tp
, HW_TSO_3
))
7006 /* Now loop through additional data
7007 * fragments, and queue them.
7009 last
= skb_shinfo(skb
)->nr_frags
- 1;
7010 for (i
= 0; i
<= last
; i
++) {
7011 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7013 len
= skb_frag_size(frag
);
7014 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7015 len
, DMA_TO_DEVICE
);
7017 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7018 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7020 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7024 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7026 ((i
== last
) ? TXD_FLAG_END
: 0),
7028 would_hit_hwbug
= 1;
7034 if (would_hit_hwbug
) {
7035 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7037 /* If the workaround fails due to memory/mapping
7038 * failure, silently drop this packet.
7040 entry
= tnapi
->tx_prod
;
7041 budget
= tg3_tx_avail(tnapi
);
7042 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7043 base_flags
, mss
, vlan
))
7047 skb_tx_timestamp(skb
);
7048 netdev_tx_sent_queue(txq
, skb
->len
);
7050 /* Sync BD data before updating mailbox */
7053 /* Packets are ready, update Tx producer idx local and on card. */
7054 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7056 tnapi
->tx_prod
= entry
;
7057 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7058 netif_tx_stop_queue(txq
);
7060 /* netif_tx_stop_queue() must be done before checking
7061 * checking tx index in tg3_tx_avail() below, because in
7062 * tg3_tx(), we update tx index before checking for
7063 * netif_tx_queue_stopped().
7066 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7067 netif_tx_wake_queue(txq
);
7071 return NETDEV_TX_OK
;
7074 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7075 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7080 return NETDEV_TX_OK
;
7083 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7086 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7087 MAC_MODE_PORT_MODE_MASK
);
7089 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7091 if (!tg3_flag(tp
, 5705_PLUS
))
7092 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7094 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7095 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7097 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7099 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7101 if (tg3_flag(tp
, 5705_PLUS
) ||
7102 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7103 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
7104 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7107 tw32(MAC_MODE
, tp
->mac_mode
);
7111 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7113 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7115 tg3_phy_toggle_apd(tp
, false);
7116 tg3_phy_toggle_automdix(tp
, 0);
7118 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7121 bmcr
= BMCR_FULLDPLX
;
7126 bmcr
|= BMCR_SPEED100
;
7130 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7132 bmcr
|= BMCR_SPEED100
;
7135 bmcr
|= BMCR_SPEED1000
;
7140 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7141 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7142 val
|= CTL1000_AS_MASTER
|
7143 CTL1000_ENABLE_MASTER
;
7144 tg3_writephy(tp
, MII_CTRL1000
, val
);
7146 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7147 MII_TG3_FET_PTEST_TRIM_2
;
7148 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7151 bmcr
|= BMCR_LOOPBACK
;
7153 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7155 /* The write needs to be flushed for the FETs */
7156 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7157 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7161 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7162 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
7163 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7164 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7165 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7167 /* The write needs to be flushed for the AC131 */
7168 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7171 /* Reset to prevent losing 1st rx packet intermittently */
7172 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7173 tg3_flag(tp
, 5780_CLASS
)) {
7174 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7176 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7179 mac_mode
= tp
->mac_mode
&
7180 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7181 if (speed
== SPEED_1000
)
7182 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7184 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7186 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
7187 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7189 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7190 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7191 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7192 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7194 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7195 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7198 tw32(MAC_MODE
, mac_mode
);
7204 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7206 struct tg3
*tp
= netdev_priv(dev
);
7208 if (features
& NETIF_F_LOOPBACK
) {
7209 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7212 spin_lock_bh(&tp
->lock
);
7213 tg3_mac_loopback(tp
, true);
7214 netif_carrier_on(tp
->dev
);
7215 spin_unlock_bh(&tp
->lock
);
7216 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7218 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7221 spin_lock_bh(&tp
->lock
);
7222 tg3_mac_loopback(tp
, false);
7223 /* Force link status check */
7224 tg3_setup_phy(tp
, 1);
7225 spin_unlock_bh(&tp
->lock
);
7226 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7230 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7231 netdev_features_t features
)
7233 struct tg3
*tp
= netdev_priv(dev
);
7235 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7236 features
&= ~NETIF_F_ALL_TSO
;
7241 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7243 netdev_features_t changed
= dev
->features
^ features
;
7245 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7246 tg3_set_loopback(dev
, features
);
7251 static void tg3_rx_prodring_free(struct tg3
*tp
,
7252 struct tg3_rx_prodring_set
*tpr
)
7256 if (tpr
!= &tp
->napi
[0].prodring
) {
7257 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7258 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7259 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7262 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7263 for (i
= tpr
->rx_jmb_cons_idx
;
7264 i
!= tpr
->rx_jmb_prod_idx
;
7265 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7266 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7274 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7275 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7278 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7279 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7280 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7285 /* Initialize rx rings for packet processing.
7287 * The chip has been shut down and the driver detached from
7288 * the networking, so no interrupts or new tx packets will
7289 * end up in the driver. tp->{tx,}lock are held and thus
7292 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7293 struct tg3_rx_prodring_set
*tpr
)
7295 u32 i
, rx_pkt_dma_sz
;
7297 tpr
->rx_std_cons_idx
= 0;
7298 tpr
->rx_std_prod_idx
= 0;
7299 tpr
->rx_jmb_cons_idx
= 0;
7300 tpr
->rx_jmb_prod_idx
= 0;
7302 if (tpr
!= &tp
->napi
[0].prodring
) {
7303 memset(&tpr
->rx_std_buffers
[0], 0,
7304 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7305 if (tpr
->rx_jmb_buffers
)
7306 memset(&tpr
->rx_jmb_buffers
[0], 0,
7307 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7311 /* Zero out all descriptors. */
7312 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7314 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7315 if (tg3_flag(tp
, 5780_CLASS
) &&
7316 tp
->dev
->mtu
> ETH_DATA_LEN
)
7317 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7318 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7320 /* Initialize invariants of the rings, we only set this
7321 * stuff once. This works because the card does not
7322 * write into the rx buffer posting rings.
7324 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7325 struct tg3_rx_buffer_desc
*rxd
;
7327 rxd
= &tpr
->rx_std
[i
];
7328 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7329 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7330 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7331 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7334 /* Now allocate fresh SKBs for each rx ring. */
7335 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7336 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
7337 netdev_warn(tp
->dev
,
7338 "Using a smaller RX standard ring. Only "
7339 "%d out of %d buffers were allocated "
7340 "successfully\n", i
, tp
->rx_pending
);
7348 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7351 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7353 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7356 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7357 struct tg3_rx_buffer_desc
*rxd
;
7359 rxd
= &tpr
->rx_jmb
[i
].std
;
7360 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7361 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7363 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7364 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7367 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7368 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
7369 netdev_warn(tp
->dev
,
7370 "Using a smaller RX jumbo ring. Only %d "
7371 "out of %d buffers were allocated "
7372 "successfully\n", i
, tp
->rx_jumbo_pending
);
7375 tp
->rx_jumbo_pending
= i
;
7384 tg3_rx_prodring_free(tp
, tpr
);
7388 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7389 struct tg3_rx_prodring_set
*tpr
)
7391 kfree(tpr
->rx_std_buffers
);
7392 tpr
->rx_std_buffers
= NULL
;
7393 kfree(tpr
->rx_jmb_buffers
);
7394 tpr
->rx_jmb_buffers
= NULL
;
7396 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7397 tpr
->rx_std
, tpr
->rx_std_mapping
);
7401 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7402 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7407 static int tg3_rx_prodring_init(struct tg3
*tp
,
7408 struct tg3_rx_prodring_set
*tpr
)
7410 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7412 if (!tpr
->rx_std_buffers
)
7415 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7416 TG3_RX_STD_RING_BYTES(tp
),
7417 &tpr
->rx_std_mapping
,
7422 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7423 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7425 if (!tpr
->rx_jmb_buffers
)
7428 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7429 TG3_RX_JMB_RING_BYTES(tp
),
7430 &tpr
->rx_jmb_mapping
,
7439 tg3_rx_prodring_fini(tp
, tpr
);
7443 /* Free up pending packets in all rx/tx rings.
7445 * The chip has been shut down and the driver detached from
7446 * the networking, so no interrupts or new tx packets will
7447 * end up in the driver. tp->{tx,}lock is not held and we are not
7448 * in an interrupt context and thus may sleep.
7450 static void tg3_free_rings(struct tg3
*tp
)
7454 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7455 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7457 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7459 if (!tnapi
->tx_buffers
)
7462 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7463 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7468 tg3_tx_skb_unmap(tnapi
, i
,
7469 skb_shinfo(skb
)->nr_frags
- 1);
7471 dev_kfree_skb_any(skb
);
7473 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7477 /* Initialize tx/rx rings for packet processing.
7479 * The chip has been shut down and the driver detached from
7480 * the networking, so no interrupts or new tx packets will
7481 * end up in the driver. tp->{tx,}lock are held and thus
7484 static int tg3_init_rings(struct tg3
*tp
)
7488 /* Free up all the SKBs. */
7491 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7492 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7494 tnapi
->last_tag
= 0;
7495 tnapi
->last_irq_tag
= 0;
7496 tnapi
->hw_status
->status
= 0;
7497 tnapi
->hw_status
->status_tag
= 0;
7498 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7503 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7505 tnapi
->rx_rcb_ptr
= 0;
7507 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7509 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7519 * Must not be invoked with interrupt sources disabled and
7520 * the hardware shutdown down.
7522 static void tg3_free_consistent(struct tg3
*tp
)
7526 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7527 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7529 if (tnapi
->tx_ring
) {
7530 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7531 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7532 tnapi
->tx_ring
= NULL
;
7535 kfree(tnapi
->tx_buffers
);
7536 tnapi
->tx_buffers
= NULL
;
7538 if (tnapi
->rx_rcb
) {
7539 dma_free_coherent(&tp
->pdev
->dev
,
7540 TG3_RX_RCB_RING_BYTES(tp
),
7542 tnapi
->rx_rcb_mapping
);
7543 tnapi
->rx_rcb
= NULL
;
7546 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7548 if (tnapi
->hw_status
) {
7549 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7551 tnapi
->status_mapping
);
7552 tnapi
->hw_status
= NULL
;
7557 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7558 tp
->hw_stats
, tp
->stats_mapping
);
7559 tp
->hw_stats
= NULL
;
7564 * Must not be invoked with interrupt sources disabled and
7565 * the hardware shutdown down. Can sleep.
7567 static int tg3_alloc_consistent(struct tg3
*tp
)
7571 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7572 sizeof(struct tg3_hw_stats
),
7578 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7580 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7581 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7582 struct tg3_hw_status
*sblk
;
7584 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7586 &tnapi
->status_mapping
,
7588 if (!tnapi
->hw_status
)
7591 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7592 sblk
= tnapi
->hw_status
;
7594 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7597 /* If multivector TSS is enabled, vector 0 does not handle
7598 * tx interrupts. Don't allocate any resources for it.
7600 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7601 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7602 tnapi
->tx_buffers
= kzalloc(
7603 sizeof(struct tg3_tx_ring_info
) *
7604 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7605 if (!tnapi
->tx_buffers
)
7608 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7610 &tnapi
->tx_desc_mapping
,
7612 if (!tnapi
->tx_ring
)
7617 * When RSS is enabled, the status block format changes
7618 * slightly. The "rx_jumbo_consumer", "reserved",
7619 * and "rx_mini_consumer" members get mapped to the
7620 * other three rx return ring producer indexes.
7624 if (tg3_flag(tp
, ENABLE_RSS
)) {
7625 tnapi
->rx_rcb_prod_idx
= NULL
;
7630 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7633 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7636 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7639 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7644 * If multivector RSS is enabled, vector 0 does not handle
7645 * rx or tx interrupts. Don't allocate any resources for it.
7647 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7650 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7651 TG3_RX_RCB_RING_BYTES(tp
),
7652 &tnapi
->rx_rcb_mapping
,
7657 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7663 tg3_free_consistent(tp
);
7667 #define MAX_WAIT_CNT 1000
7669 /* To stop a block, clear the enable bit and poll till it
7670 * clears. tp->lock is held.
7672 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7677 if (tg3_flag(tp
, 5705_PLUS
)) {
7684 /* We can't enable/disable these bits of the
7685 * 5705/5750, just say success.
7698 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7701 if ((val
& enable_bit
) == 0)
7705 if (i
== MAX_WAIT_CNT
&& !silent
) {
7706 dev_err(&tp
->pdev
->dev
,
7707 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7715 /* tp->lock is held. */
7716 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7720 tg3_disable_ints(tp
);
7722 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7723 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7726 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7727 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7728 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7729 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7730 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7731 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7733 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7734 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7735 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7736 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7737 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7738 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7739 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7741 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7742 tw32_f(MAC_MODE
, tp
->mac_mode
);
7745 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7746 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7748 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7750 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7753 if (i
>= MAX_WAIT_CNT
) {
7754 dev_err(&tp
->pdev
->dev
,
7755 "%s timed out, TX_MODE_ENABLE will not clear "
7756 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7760 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7761 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7762 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7764 tw32(FTQ_RESET
, 0xffffffff);
7765 tw32(FTQ_RESET
, 0x00000000);
7767 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7768 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7770 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7771 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7772 if (tnapi
->hw_status
)
7773 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7779 /* Save PCI command register before chip reset */
7780 static void tg3_save_pci_state(struct tg3
*tp
)
7782 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7785 /* Restore PCI state after chip reset */
7786 static void tg3_restore_pci_state(struct tg3
*tp
)
7790 /* Re-enable indirect register accesses. */
7791 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7792 tp
->misc_host_ctrl
);
7794 /* Set MAX PCI retry to zero. */
7795 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7796 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7797 tg3_flag(tp
, PCIX_MODE
))
7798 val
|= PCISTATE_RETRY_SAME_DMA
;
7799 /* Allow reads and writes to the APE register and memory space. */
7800 if (tg3_flag(tp
, ENABLE_APE
))
7801 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7802 PCISTATE_ALLOW_APE_SHMEM_WR
|
7803 PCISTATE_ALLOW_APE_PSPACE_WR
;
7804 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7806 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7808 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
7809 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7810 tp
->pci_cacheline_sz
);
7811 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7815 /* Make sure PCI-X relaxed ordering bit is clear. */
7816 if (tg3_flag(tp
, PCIX_MODE
)) {
7819 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7821 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7822 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7826 if (tg3_flag(tp
, 5780_CLASS
)) {
7828 /* Chip reset on 5780 will reset MSI enable bit,
7829 * so need to restore it.
7831 if (tg3_flag(tp
, USING_MSI
)) {
7834 pci_read_config_word(tp
->pdev
,
7835 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7837 pci_write_config_word(tp
->pdev
,
7838 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7839 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7840 val
= tr32(MSGINT_MODE
);
7841 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7846 /* tp->lock is held. */
7847 static int tg3_chip_reset(struct tg3
*tp
)
7850 void (*write_op
)(struct tg3
*, u32
, u32
);
7855 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7857 /* No matching tg3_nvram_unlock() after this because
7858 * chip reset below will undo the nvram lock.
7860 tp
->nvram_lock_cnt
= 0;
7862 /* GRC_MISC_CFG core clock reset will clear the memory
7863 * enable bit in PCI register 4 and the MSI enable bit
7864 * on some chips, so we save relevant registers here.
7866 tg3_save_pci_state(tp
);
7868 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7869 tg3_flag(tp
, 5755_PLUS
))
7870 tw32(GRC_FASTBOOT_PC
, 0);
7873 * We must avoid the readl() that normally takes place.
7874 * It locks machines, causes machine checks, and other
7875 * fun things. So, temporarily disable the 5701
7876 * hardware workaround, while we do the reset.
7878 write_op
= tp
->write32
;
7879 if (write_op
== tg3_write_flush_reg32
)
7880 tp
->write32
= tg3_write32
;
7882 /* Prevent the irq handler from reading or writing PCI registers
7883 * during chip reset when the memory enable bit in the PCI command
7884 * register may be cleared. The chip does not generate interrupt
7885 * at this time, but the irq handler may still be called due to irq
7886 * sharing or irqpoll.
7888 tg3_flag_set(tp
, CHIP_RESETTING
);
7889 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7890 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7891 if (tnapi
->hw_status
) {
7892 tnapi
->hw_status
->status
= 0;
7893 tnapi
->hw_status
->status_tag
= 0;
7895 tnapi
->last_tag
= 0;
7896 tnapi
->last_irq_tag
= 0;
7900 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7901 synchronize_irq(tp
->napi
[i
].irq_vec
);
7903 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7904 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7905 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7909 val
= GRC_MISC_CFG_CORECLK_RESET
;
7911 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7912 /* Force PCIe 1.0a mode */
7913 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7914 !tg3_flag(tp
, 57765_PLUS
) &&
7915 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7916 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7917 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7919 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7920 tw32(GRC_MISC_CFG
, (1 << 29));
7925 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7926 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7927 tw32(GRC_VCPU_EXT_CTRL
,
7928 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7931 /* Manage gphy power for all CPMU absent PCIe devices. */
7932 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7933 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7935 tw32(GRC_MISC_CFG
, val
);
7937 /* restore 5701 hardware bug workaround write method */
7938 tp
->write32
= write_op
;
7940 /* Unfortunately, we have to delay before the PCI read back.
7941 * Some 575X chips even will not respond to a PCI cfg access
7942 * when the reset command is given to the chip.
7944 * How do these hardware designers expect things to work
7945 * properly if the PCI write is posted for a long period
7946 * of time? It is always necessary to have some method by
7947 * which a register read back can occur to push the write
7948 * out which does the reset.
7950 * For most tg3 variants the trick below was working.
7955 /* Flush PCI posted writes. The normal MMIO registers
7956 * are inaccessible at this time so this is the only
7957 * way to make this reliably (actually, this is no longer
7958 * the case, see above). I tried to use indirect
7959 * register read/write but this upset some 5701 variants.
7961 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7965 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7968 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7972 /* Wait for link training to complete. */
7973 for (i
= 0; i
< 5000; i
++)
7976 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7977 pci_write_config_dword(tp
->pdev
, 0xc4,
7978 cfg_val
| (1 << 15));
7981 /* Clear the "no snoop" and "relaxed ordering" bits. */
7982 pci_read_config_word(tp
->pdev
,
7983 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7985 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7986 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7988 * Older PCIe devices only support the 128 byte
7989 * MPS setting. Enforce the restriction.
7991 if (!tg3_flag(tp
, CPMU_PRESENT
))
7992 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7993 pci_write_config_word(tp
->pdev
,
7994 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7997 /* Clear error status */
7998 pci_write_config_word(tp
->pdev
,
7999 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
8000 PCI_EXP_DEVSTA_CED
|
8001 PCI_EXP_DEVSTA_NFED
|
8002 PCI_EXP_DEVSTA_FED
|
8003 PCI_EXP_DEVSTA_URD
);
8006 tg3_restore_pci_state(tp
);
8008 tg3_flag_clear(tp
, CHIP_RESETTING
);
8009 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8012 if (tg3_flag(tp
, 5780_CLASS
))
8013 val
= tr32(MEMARB_MODE
);
8014 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8016 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
8018 tw32(0x5000, 0x400);
8021 tw32(GRC_MODE
, tp
->grc_mode
);
8023 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
8026 tw32(0xc4, val
| (1 << 15));
8029 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8030 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8031 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8032 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
8033 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8034 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8037 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8038 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8040 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8041 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8046 tw32_f(MAC_MODE
, val
);
8049 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8051 err
= tg3_poll_fw(tp
);
8057 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8058 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
8059 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
8060 !tg3_flag(tp
, 57765_PLUS
)) {
8063 tw32(0x7c00, val
| (1 << 25));
8066 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8067 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8068 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8071 /* Reprobe ASF enable state. */
8072 tg3_flag_clear(tp
, ENABLE_ASF
);
8073 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8074 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8075 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8078 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8079 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8080 tg3_flag_set(tp
, ENABLE_ASF
);
8081 tp
->last_event_jiffies
= jiffies
;
8082 if (tg3_flag(tp
, 5750_PLUS
))
8083 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8090 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8091 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8093 /* tp->lock is held. */
8094 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8100 tg3_write_sig_pre_reset(tp
, kind
);
8102 tg3_abort_hw(tp
, silent
);
8103 err
= tg3_chip_reset(tp
);
8105 __tg3_set_mac_addr(tp
, 0);
8107 tg3_write_sig_legacy(tp
, kind
);
8108 tg3_write_sig_post_reset(tp
, kind
);
8111 /* Save the stats across chip resets... */
8112 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8113 tg3_get_estats(tp
, &tp
->estats_prev
);
8115 /* And make sure the next sample is new data */
8116 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8125 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8127 struct tg3
*tp
= netdev_priv(dev
);
8128 struct sockaddr
*addr
= p
;
8129 int err
= 0, skip_mac_1
= 0;
8131 if (!is_valid_ether_addr(addr
->sa_data
))
8132 return -EADDRNOTAVAIL
;
8134 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8136 if (!netif_running(dev
))
8139 if (tg3_flag(tp
, ENABLE_ASF
)) {
8140 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8142 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8143 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8144 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8145 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8147 /* Skip MAC addr 1 if ASF is using it. */
8148 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8149 !(addr1_high
== 0 && addr1_low
== 0))
8152 spin_lock_bh(&tp
->lock
);
8153 __tg3_set_mac_addr(tp
, skip_mac_1
);
8154 spin_unlock_bh(&tp
->lock
);
8159 /* tp->lock is held. */
8160 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8161 dma_addr_t mapping
, u32 maxlen_flags
,
8165 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8166 ((u64
) mapping
>> 32));
8168 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8169 ((u64
) mapping
& 0xffffffff));
8171 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8174 if (!tg3_flag(tp
, 5705_PLUS
))
8176 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8180 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8184 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8185 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8186 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8187 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8189 tw32(HOSTCC_TXCOL_TICKS
, 0);
8190 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8191 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8194 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8195 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8196 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8197 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8199 tw32(HOSTCC_RXCOL_TICKS
, 0);
8200 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8201 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8204 if (!tg3_flag(tp
, 5705_PLUS
)) {
8205 u32 val
= ec
->stats_block_coalesce_usecs
;
8207 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8208 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8210 if (!netif_carrier_ok(tp
->dev
))
8213 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8216 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8219 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8220 tw32(reg
, ec
->rx_coalesce_usecs
);
8221 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8222 tw32(reg
, ec
->rx_max_coalesced_frames
);
8223 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8224 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8226 if (tg3_flag(tp
, ENABLE_TSS
)) {
8227 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8228 tw32(reg
, ec
->tx_coalesce_usecs
);
8229 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8230 tw32(reg
, ec
->tx_max_coalesced_frames
);
8231 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8232 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8236 for (; i
< tp
->irq_max
- 1; i
++) {
8237 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8238 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8239 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8241 if (tg3_flag(tp
, ENABLE_TSS
)) {
8242 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8243 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8244 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8249 /* tp->lock is held. */
8250 static void tg3_rings_reset(struct tg3
*tp
)
8253 u32 stblk
, txrcb
, rxrcb
, limit
;
8254 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8256 /* Disable all transmit rings but the first. */
8257 if (!tg3_flag(tp
, 5705_PLUS
))
8258 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8259 else if (tg3_flag(tp
, 5717_PLUS
))
8260 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8261 else if (tg3_flag(tp
, 57765_CLASS
))
8262 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8264 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8266 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8267 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8268 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8269 BDINFO_FLAGS_DISABLED
);
8272 /* Disable all receive return rings but the first. */
8273 if (tg3_flag(tp
, 5717_PLUS
))
8274 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8275 else if (!tg3_flag(tp
, 5705_PLUS
))
8276 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8277 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8278 tg3_flag(tp
, 57765_CLASS
))
8279 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8281 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8283 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8284 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8285 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8286 BDINFO_FLAGS_DISABLED
);
8288 /* Disable interrupts */
8289 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8290 tp
->napi
[0].chk_msi_cnt
= 0;
8291 tp
->napi
[0].last_rx_cons
= 0;
8292 tp
->napi
[0].last_tx_cons
= 0;
8294 /* Zero mailbox registers. */
8295 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8296 for (i
= 1; i
< tp
->irq_max
; i
++) {
8297 tp
->napi
[i
].tx_prod
= 0;
8298 tp
->napi
[i
].tx_cons
= 0;
8299 if (tg3_flag(tp
, ENABLE_TSS
))
8300 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8301 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8302 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8303 tp
->napi
[i
].chk_msi_cnt
= 0;
8304 tp
->napi
[i
].last_rx_cons
= 0;
8305 tp
->napi
[i
].last_tx_cons
= 0;
8307 if (!tg3_flag(tp
, ENABLE_TSS
))
8308 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8310 tp
->napi
[0].tx_prod
= 0;
8311 tp
->napi
[0].tx_cons
= 0;
8312 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8313 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8316 /* Make sure the NIC-based send BD rings are disabled. */
8317 if (!tg3_flag(tp
, 5705_PLUS
)) {
8318 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8319 for (i
= 0; i
< 16; i
++)
8320 tw32_tx_mbox(mbox
+ i
* 8, 0);
8323 txrcb
= NIC_SRAM_SEND_RCB
;
8324 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8326 /* Clear status block in ram. */
8327 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8329 /* Set status block DMA address */
8330 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8331 ((u64
) tnapi
->status_mapping
>> 32));
8332 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8333 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8335 if (tnapi
->tx_ring
) {
8336 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8337 (TG3_TX_RING_SIZE
<<
8338 BDINFO_FLAGS_MAXLEN_SHIFT
),
8339 NIC_SRAM_TX_BUFFER_DESC
);
8340 txrcb
+= TG3_BDINFO_SIZE
;
8343 if (tnapi
->rx_rcb
) {
8344 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8345 (tp
->rx_ret_ring_mask
+ 1) <<
8346 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8347 rxrcb
+= TG3_BDINFO_SIZE
;
8350 stblk
= HOSTCC_STATBLCK_RING1
;
8352 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8353 u64 mapping
= (u64
)tnapi
->status_mapping
;
8354 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8355 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8357 /* Clear status block in ram. */
8358 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8360 if (tnapi
->tx_ring
) {
8361 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8362 (TG3_TX_RING_SIZE
<<
8363 BDINFO_FLAGS_MAXLEN_SHIFT
),
8364 NIC_SRAM_TX_BUFFER_DESC
);
8365 txrcb
+= TG3_BDINFO_SIZE
;
8368 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8369 ((tp
->rx_ret_ring_mask
+ 1) <<
8370 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8373 rxrcb
+= TG3_BDINFO_SIZE
;
8377 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8379 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8381 if (!tg3_flag(tp
, 5750_PLUS
) ||
8382 tg3_flag(tp
, 5780_CLASS
) ||
8383 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8384 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8385 tg3_flag(tp
, 57765_PLUS
))
8386 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8387 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8388 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8389 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8391 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8393 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8394 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8396 val
= min(nic_rep_thresh
, host_rep_thresh
);
8397 tw32(RCVBDI_STD_THRESH
, val
);
8399 if (tg3_flag(tp
, 57765_PLUS
))
8400 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8402 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8405 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8407 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8409 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8410 tw32(RCVBDI_JUMBO_THRESH
, val
);
8412 if (tg3_flag(tp
, 57765_PLUS
))
8413 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8416 static inline u32
calc_crc(unsigned char *buf
, int len
)
8424 for (j
= 0; j
< len
; j
++) {
8427 for (k
= 0; k
< 8; k
++) {
8440 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8442 /* accept or reject all multicast frames */
8443 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8444 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8445 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8446 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8449 static void __tg3_set_rx_mode(struct net_device
*dev
)
8451 struct tg3
*tp
= netdev_priv(dev
);
8454 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8455 RX_MODE_KEEP_VLAN_TAG
);
8457 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8458 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8461 if (!tg3_flag(tp
, ENABLE_ASF
))
8462 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8465 if (dev
->flags
& IFF_PROMISC
) {
8466 /* Promiscuous mode. */
8467 rx_mode
|= RX_MODE_PROMISC
;
8468 } else if (dev
->flags
& IFF_ALLMULTI
) {
8469 /* Accept all multicast. */
8470 tg3_set_multi(tp
, 1);
8471 } else if (netdev_mc_empty(dev
)) {
8472 /* Reject all multicast. */
8473 tg3_set_multi(tp
, 0);
8475 /* Accept one or more multicast(s). */
8476 struct netdev_hw_addr
*ha
;
8477 u32 mc_filter
[4] = { 0, };
8482 netdev_for_each_mc_addr(ha
, dev
) {
8483 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
8485 regidx
= (bit
& 0x60) >> 5;
8487 mc_filter
[regidx
] |= (1 << bit
);
8490 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8491 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8492 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8493 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
8496 if (rx_mode
!= tp
->rx_mode
) {
8497 tp
->rx_mode
= rx_mode
;
8498 tw32_f(MAC_RX_MODE
, rx_mode
);
8503 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
)
8507 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
8508 tp
->rss_ind_tbl
[i
] =
8509 ethtool_rxfh_indir_default(i
, tp
->irq_cnt
- 1);
8512 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
8516 if (!tg3_flag(tp
, SUPPORT_MSIX
))
8519 if (tp
->irq_cnt
<= 2) {
8520 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
8524 /* Validate table against current IRQ count */
8525 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8526 if (tp
->rss_ind_tbl
[i
] >= tp
->irq_cnt
- 1)
8530 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
8531 tg3_rss_init_dflt_indir_tbl(tp
);
8534 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
8537 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8539 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8540 u32 val
= tp
->rss_ind_tbl
[i
];
8542 for (; i
% 8; i
++) {
8544 val
|= tp
->rss_ind_tbl
[i
];
8551 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
8553 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8554 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
8556 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
8559 /* tp->lock is held. */
8560 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8562 u32 val
, rdmac_mode
;
8564 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8566 tg3_disable_ints(tp
);
8570 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8572 if (tg3_flag(tp
, INIT_COMPLETE
))
8573 tg3_abort_hw(tp
, 1);
8575 /* Enable MAC control of LPI */
8576 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8577 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8578 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8579 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8581 tw32_f(TG3_CPMU_EEE_CTRL
,
8582 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8584 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8585 TG3_CPMU_EEEMD_LPI_IN_TX
|
8586 TG3_CPMU_EEEMD_LPI_IN_RX
|
8587 TG3_CPMU_EEEMD_EEE_ENABLE
;
8589 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8590 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8592 if (tg3_flag(tp
, ENABLE_APE
))
8593 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8595 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8597 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8598 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8599 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8601 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8602 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8603 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8609 err
= tg3_chip_reset(tp
);
8613 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8615 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8616 val
= tr32(TG3_CPMU_CTRL
);
8617 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8618 tw32(TG3_CPMU_CTRL
, val
);
8620 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8621 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8622 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8623 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8625 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8626 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8627 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8628 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8630 val
= tr32(TG3_CPMU_HST_ACC
);
8631 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8632 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8633 tw32(TG3_CPMU_HST_ACC
, val
);
8636 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8637 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8638 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8639 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8640 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8642 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8643 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8645 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8647 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8648 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8651 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8652 u32 grc_mode
= tr32(GRC_MODE
);
8654 /* Access the lower 1K of PL PCIE block registers. */
8655 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8656 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8658 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8659 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8660 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8662 tw32(GRC_MODE
, grc_mode
);
8665 if (tg3_flag(tp
, 57765_CLASS
)) {
8666 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8667 u32 grc_mode
= tr32(GRC_MODE
);
8669 /* Access the lower 1K of PL PCIE block registers. */
8670 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8671 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8673 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8674 TG3_PCIE_PL_LO_PHYCTL5
);
8675 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8676 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8678 tw32(GRC_MODE
, grc_mode
);
8681 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8682 u32 grc_mode
= tr32(GRC_MODE
);
8684 /* Access the lower 1K of DL PCIE block registers. */
8685 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8686 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8688 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8689 TG3_PCIE_DL_LO_FTSMAX
);
8690 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8691 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8692 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8694 tw32(GRC_MODE
, grc_mode
);
8697 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8698 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8699 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8700 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8703 /* This works around an issue with Athlon chipsets on
8704 * B3 tigon3 silicon. This bit has no effect on any
8705 * other revision. But do not set this on PCI Express
8706 * chips and don't even touch the clocks if the CPMU is present.
8708 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8709 if (!tg3_flag(tp
, PCI_EXPRESS
))
8710 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8711 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8714 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8715 tg3_flag(tp
, PCIX_MODE
)) {
8716 val
= tr32(TG3PCI_PCISTATE
);
8717 val
|= PCISTATE_RETRY_SAME_DMA
;
8718 tw32(TG3PCI_PCISTATE
, val
);
8721 if (tg3_flag(tp
, ENABLE_APE
)) {
8722 /* Allow reads and writes to the
8723 * APE register and memory space.
8725 val
= tr32(TG3PCI_PCISTATE
);
8726 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8727 PCISTATE_ALLOW_APE_SHMEM_WR
|
8728 PCISTATE_ALLOW_APE_PSPACE_WR
;
8729 tw32(TG3PCI_PCISTATE
, val
);
8732 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8733 /* Enable some hw fixes. */
8734 val
= tr32(TG3PCI_MSI_DATA
);
8735 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8736 tw32(TG3PCI_MSI_DATA
, val
);
8739 /* Descriptor ring init may make accesses to the
8740 * NIC SRAM area to setup the TX descriptors, so we
8741 * can only do this after the hardware has been
8742 * successfully reset.
8744 err
= tg3_init_rings(tp
);
8748 if (tg3_flag(tp
, 57765_PLUS
)) {
8749 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8750 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8751 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8752 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8753 if (!tg3_flag(tp
, 57765_CLASS
) &&
8754 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8755 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8756 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8757 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8758 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8759 /* This value is determined during the probe time DMA
8760 * engine test, tg3_test_dma.
8762 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8765 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8766 GRC_MODE_4X_NIC_SEND_RINGS
|
8767 GRC_MODE_NO_TX_PHDR_CSUM
|
8768 GRC_MODE_NO_RX_PHDR_CSUM
);
8769 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8771 /* Pseudo-header checksum is done by hardware logic and not
8772 * the offload processers, so make the chip do the pseudo-
8773 * header checksums on receive. For transmit it is more
8774 * convenient to do the pseudo-header checksum in software
8775 * as Linux does that on transmit for us in all cases.
8777 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8781 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8783 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8784 val
= tr32(GRC_MISC_CFG
);
8786 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8787 tw32(GRC_MISC_CFG
, val
);
8789 /* Initialize MBUF/DESC pool. */
8790 if (tg3_flag(tp
, 5750_PLUS
)) {
8792 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8793 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8795 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8797 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8798 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8799 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8800 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8803 fw_len
= tp
->fw_len
;
8804 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8805 tw32(BUFMGR_MB_POOL_ADDR
,
8806 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8807 tw32(BUFMGR_MB_POOL_SIZE
,
8808 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8811 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8812 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8813 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8814 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8815 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8816 tw32(BUFMGR_MB_HIGH_WATER
,
8817 tp
->bufmgr_config
.mbuf_high_water
);
8819 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8820 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8821 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8822 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8823 tw32(BUFMGR_MB_HIGH_WATER
,
8824 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8826 tw32(BUFMGR_DMA_LOW_WATER
,
8827 tp
->bufmgr_config
.dma_low_water
);
8828 tw32(BUFMGR_DMA_HIGH_WATER
,
8829 tp
->bufmgr_config
.dma_high_water
);
8831 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8832 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8833 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8834 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8835 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8836 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8837 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8838 tw32(BUFMGR_MODE
, val
);
8839 for (i
= 0; i
< 2000; i
++) {
8840 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8845 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8849 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8850 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8852 tg3_setup_rxbd_thresholds(tp
);
8854 /* Initialize TG3_BDINFO's at:
8855 * RCVDBDI_STD_BD: standard eth size rx ring
8856 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8857 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8860 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8861 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8862 * ring attribute flags
8863 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8865 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8866 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8868 * The size of each ring is fixed in the firmware, but the location is
8871 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8872 ((u64
) tpr
->rx_std_mapping
>> 32));
8873 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8874 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8875 if (!tg3_flag(tp
, 5717_PLUS
))
8876 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8877 NIC_SRAM_RX_BUFFER_DESC
);
8879 /* Disable the mini ring */
8880 if (!tg3_flag(tp
, 5705_PLUS
))
8881 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8882 BDINFO_FLAGS_DISABLED
);
8884 /* Program the jumbo buffer descriptor ring control
8885 * blocks on those devices that have them.
8887 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8888 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8890 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8891 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8892 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8893 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8894 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8895 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8896 BDINFO_FLAGS_MAXLEN_SHIFT
;
8897 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8898 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8899 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8900 tg3_flag(tp
, 57765_CLASS
))
8901 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8902 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8904 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8905 BDINFO_FLAGS_DISABLED
);
8908 if (tg3_flag(tp
, 57765_PLUS
)) {
8909 val
= TG3_RX_STD_RING_SIZE(tp
);
8910 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8911 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8913 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8915 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8917 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8919 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8920 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8922 tpr
->rx_jmb_prod_idx
=
8923 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8924 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8926 tg3_rings_reset(tp
);
8928 /* Initialize MAC address and backoff seed. */
8929 __tg3_set_mac_addr(tp
, 0);
8931 /* MTU + ethernet header + FCS + optional VLAN tag */
8932 tw32(MAC_RX_MTU_SIZE
,
8933 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8935 /* The slot time is changed by tg3_setup_phy if we
8936 * run at gigabit with half duplex.
8938 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8939 (6 << TX_LENGTHS_IPG_SHIFT
) |
8940 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8942 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8943 val
|= tr32(MAC_TX_LENGTHS
) &
8944 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8945 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8947 tw32(MAC_TX_LENGTHS
, val
);
8949 /* Receive rules. */
8950 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8951 tw32(RCVLPC_CONFIG
, 0x0181);
8953 /* Calculate RDMAC_MODE setting early, we need it to determine
8954 * the RCVLPC_STATE_ENABLE mask.
8956 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8957 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8958 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8959 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8960 RDMAC_MODE_LNGREAD_ENAB
);
8962 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8963 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8965 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8966 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8967 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8968 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8969 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8970 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8972 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8973 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8974 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8975 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8976 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8977 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8978 !tg3_flag(tp
, IS_5788
)) {
8979 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8983 if (tg3_flag(tp
, PCI_EXPRESS
))
8984 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8986 if (tg3_flag(tp
, HW_TSO_1
) ||
8987 tg3_flag(tp
, HW_TSO_2
) ||
8988 tg3_flag(tp
, HW_TSO_3
))
8989 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8991 if (tg3_flag(tp
, 57765_PLUS
) ||
8992 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8993 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8994 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8996 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8997 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8999 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
9000 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9001 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9002 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
9003 tg3_flag(tp
, 57765_PLUS
)) {
9004 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
9005 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
) {
9006 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9007 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9008 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9009 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9010 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9011 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9013 tw32(TG3_RDMA_RSRVCTRL_REG
,
9014 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9017 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9018 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9019 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9020 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
9021 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9022 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9025 /* Receive/send statistics. */
9026 if (tg3_flag(tp
, 5750_PLUS
)) {
9027 val
= tr32(RCVLPC_STATS_ENABLE
);
9028 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9029 tw32(RCVLPC_STATS_ENABLE
, val
);
9030 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9031 tg3_flag(tp
, TSO_CAPABLE
)) {
9032 val
= tr32(RCVLPC_STATS_ENABLE
);
9033 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9034 tw32(RCVLPC_STATS_ENABLE
, val
);
9036 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9038 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9039 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9040 tw32(SNDDATAI_STATSCTRL
,
9041 (SNDDATAI_SCTRL_ENABLE
|
9042 SNDDATAI_SCTRL_FASTUPD
));
9044 /* Setup host coalescing engine. */
9045 tw32(HOSTCC_MODE
, 0);
9046 for (i
= 0; i
< 2000; i
++) {
9047 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9052 __tg3_set_coalesce(tp
, &tp
->coal
);
9054 if (!tg3_flag(tp
, 5705_PLUS
)) {
9055 /* Status/statistics block address. See tg3_timer,
9056 * the tg3_periodic_fetch_stats call there, and
9057 * tg3_get_stats to see how this works for 5705/5750 chips.
9059 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9060 ((u64
) tp
->stats_mapping
>> 32));
9061 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9062 ((u64
) tp
->stats_mapping
& 0xffffffff));
9063 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9065 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9067 /* Clear statistics and status block memory areas */
9068 for (i
= NIC_SRAM_STATS_BLK
;
9069 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9071 tg3_write_mem(tp
, i
, 0);
9076 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9078 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9079 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9080 if (!tg3_flag(tp
, 5705_PLUS
))
9081 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9083 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9084 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9085 /* reset to prevent losing 1st rx packet intermittently */
9086 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9090 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9091 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9092 MAC_MODE_FHDE_ENABLE
;
9093 if (tg3_flag(tp
, ENABLE_APE
))
9094 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9095 if (!tg3_flag(tp
, 5705_PLUS
) &&
9096 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9097 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
9098 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9099 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9102 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9103 * If TG3_FLAG_IS_NIC is zero, we should read the
9104 * register to preserve the GPIO settings for LOMs. The GPIOs,
9105 * whether used as inputs or outputs, are set by boot code after
9108 if (!tg3_flag(tp
, IS_NIC
)) {
9111 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9112 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9113 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9115 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9116 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9117 GRC_LCLCTRL_GPIO_OUTPUT3
;
9119 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
9120 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9122 tp
->grc_local_ctrl
&= ~gpio_mask
;
9123 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9125 /* GPIO1 must be driven high for eeprom write protect */
9126 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9127 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9128 GRC_LCLCTRL_GPIO_OUTPUT1
);
9130 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9133 if (tg3_flag(tp
, USING_MSIX
)) {
9134 val
= tr32(MSGINT_MODE
);
9135 val
|= MSGINT_MODE_ENABLE
;
9136 if (tp
->irq_cnt
> 1)
9137 val
|= MSGINT_MODE_MULTIVEC_EN
;
9138 if (!tg3_flag(tp
, 1SHOT_MSI
))
9139 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9140 tw32(MSGINT_MODE
, val
);
9143 if (!tg3_flag(tp
, 5705_PLUS
)) {
9144 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9148 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9149 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9150 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9151 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9152 WDMAC_MODE_LNGREAD_ENAB
);
9154 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9155 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
9156 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9157 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
9158 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
9160 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9161 !tg3_flag(tp
, IS_5788
)) {
9162 val
|= WDMAC_MODE_RX_ACCEL
;
9166 /* Enable host coalescing bug fix */
9167 if (tg3_flag(tp
, 5755_PLUS
))
9168 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9170 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
9171 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9173 tw32_f(WDMAC_MODE
, val
);
9176 if (tg3_flag(tp
, PCIX_MODE
)) {
9179 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9181 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
9182 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9183 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9184 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
9185 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9186 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9188 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9192 tw32_f(RDMAC_MODE
, rdmac_mode
);
9195 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9196 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9197 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9198 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9201 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9202 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9203 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
9204 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9205 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
9209 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9210 if (!tg3_flag(tp
, 5705_PLUS
))
9211 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9213 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
9215 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9217 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9219 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9220 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9221 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9222 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9223 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9224 tw32(RCVDBDI_MODE
, val
);
9225 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9226 if (tg3_flag(tp
, HW_TSO_1
) ||
9227 tg3_flag(tp
, HW_TSO_2
) ||
9228 tg3_flag(tp
, HW_TSO_3
))
9229 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9230 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9231 if (tg3_flag(tp
, ENABLE_TSS
))
9232 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9233 tw32(SNDBDI_MODE
, val
);
9234 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9236 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9237 err
= tg3_load_5701_a0_firmware_fix(tp
);
9242 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9243 err
= tg3_load_tso_firmware(tp
);
9248 tp
->tx_mode
= TX_MODE_ENABLE
;
9250 if (tg3_flag(tp
, 5755_PLUS
) ||
9251 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
9252 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9254 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9255 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9256 tp
->tx_mode
&= ~val
;
9257 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9260 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9263 if (tg3_flag(tp
, ENABLE_RSS
)) {
9264 tg3_rss_write_indir_tbl(tp
);
9266 /* Setup the "secret" hash key. */
9267 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9268 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9269 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9270 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9271 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9272 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9273 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9274 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9275 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9276 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9279 tp
->rx_mode
= RX_MODE_ENABLE
;
9280 if (tg3_flag(tp
, 5755_PLUS
))
9281 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9283 if (tg3_flag(tp
, ENABLE_RSS
))
9284 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9285 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9286 RX_MODE_RSS_IPV6_HASH_EN
|
9287 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9288 RX_MODE_RSS_IPV4_HASH_EN
|
9289 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9291 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9294 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9296 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9297 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9298 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9301 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9304 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9305 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9306 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9307 /* Set drive transmission level to 1.2V */
9308 /* only if the signal pre-emphasis bit is not set */
9309 val
= tr32(MAC_SERDES_CFG
);
9312 tw32(MAC_SERDES_CFG
, val
);
9314 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9315 tw32(MAC_SERDES_CFG
, 0x616000);
9318 /* Prevent chip from dropping frames when flow control
9321 if (tg3_flag(tp
, 57765_CLASS
))
9325 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9327 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9328 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9329 /* Use hardware link auto-negotiation */
9330 tg3_flag_set(tp
, HW_AUTONEG
);
9333 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9334 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9337 tmp
= tr32(SERDES_RX_CTRL
);
9338 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9339 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9340 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9341 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9344 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9345 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9346 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9348 err
= tg3_setup_phy(tp
, 0);
9352 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9353 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9356 /* Clear CRC stats. */
9357 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9358 tg3_writephy(tp
, MII_TG3_TEST1
,
9359 tmp
| MII_TG3_TEST1_CRC_EN
);
9360 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9365 __tg3_set_rx_mode(tp
->dev
);
9367 /* Initialize receive rules. */
9368 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9369 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9370 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9371 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9373 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9377 if (tg3_flag(tp
, ENABLE_ASF
))
9381 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9383 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9385 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9387 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9389 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9391 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9393 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9395 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9397 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9399 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9401 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9403 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9405 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9407 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9415 if (tg3_flag(tp
, ENABLE_APE
))
9416 /* Write our heartbeat update interval to APE. */
9417 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9418 APE_HOST_HEARTBEAT_INT_DISABLE
);
9420 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9425 /* Called at device open time to get the chip ready for
9426 * packet processing. Invoked with tp->lock held.
9428 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9430 /* Chip may have been just powered on. If so, the boot code may still
9431 * be running initialization. Wait for it to finish to avoid races in
9432 * accessing the hardware.
9434 tg3_enable_register_access(tp
);
9437 tg3_switch_clocks(tp
);
9439 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9441 return tg3_reset_hw(tp
, reset_phy
);
9444 #define TG3_STAT_ADD32(PSTAT, REG) \
9445 do { u32 __val = tr32(REG); \
9446 (PSTAT)->low += __val; \
9447 if ((PSTAT)->low < __val) \
9448 (PSTAT)->high += 1; \
9451 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9453 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9455 if (!netif_carrier_ok(tp
->dev
))
9458 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9459 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9460 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9461 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9462 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9463 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9464 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9465 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9466 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9467 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9468 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9469 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9470 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9471 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
9472 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
9473 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
9476 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9477 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
9478 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9479 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
9482 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9483 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9484 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9485 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9486 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9487 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9488 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9489 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9490 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9491 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9492 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9493 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9494 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9495 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9497 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9498 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9499 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9500 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9501 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9503 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9504 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9506 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9507 sp
->rx_discards
.low
+= val
;
9508 if (sp
->rx_discards
.low
< val
)
9509 sp
->rx_discards
.high
+= 1;
9511 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9513 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9516 static void tg3_chk_missed_msi(struct tg3
*tp
)
9520 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9521 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9523 if (tg3_has_work(tnapi
)) {
9524 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9525 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9526 if (tnapi
->chk_msi_cnt
< 1) {
9527 tnapi
->chk_msi_cnt
++;
9533 tnapi
->chk_msi_cnt
= 0;
9534 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9535 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9539 static void tg3_timer(unsigned long __opaque
)
9541 struct tg3
*tp
= (struct tg3
*) __opaque
;
9543 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9546 spin_lock(&tp
->lock
);
9548 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9549 tg3_flag(tp
, 57765_CLASS
))
9550 tg3_chk_missed_msi(tp
);
9552 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9553 /* All of this garbage is because when using non-tagged
9554 * IRQ status the mailbox/status_block protocol the chip
9555 * uses with the cpu is race prone.
9557 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9558 tw32(GRC_LOCAL_CTRL
,
9559 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9561 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9562 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9565 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9566 spin_unlock(&tp
->lock
);
9567 tg3_reset_task_schedule(tp
);
9572 /* This part only runs once per second. */
9573 if (!--tp
->timer_counter
) {
9574 if (tg3_flag(tp
, 5705_PLUS
))
9575 tg3_periodic_fetch_stats(tp
);
9577 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9578 tg3_phy_eee_enable(tp
);
9580 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9584 mac_stat
= tr32(MAC_STATUS
);
9587 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9588 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9590 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9594 tg3_setup_phy(tp
, 0);
9595 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9596 u32 mac_stat
= tr32(MAC_STATUS
);
9599 if (netif_carrier_ok(tp
->dev
) &&
9600 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9603 if (!netif_carrier_ok(tp
->dev
) &&
9604 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9605 MAC_STATUS_SIGNAL_DET
))) {
9609 if (!tp
->serdes_counter
) {
9612 ~MAC_MODE_PORT_MODE_MASK
));
9614 tw32_f(MAC_MODE
, tp
->mac_mode
);
9617 tg3_setup_phy(tp
, 0);
9619 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9620 tg3_flag(tp
, 5780_CLASS
)) {
9621 tg3_serdes_parallel_detect(tp
);
9624 tp
->timer_counter
= tp
->timer_multiplier
;
9627 /* Heartbeat is only sent once every 2 seconds.
9629 * The heartbeat is to tell the ASF firmware that the host
9630 * driver is still alive. In the event that the OS crashes,
9631 * ASF needs to reset the hardware to free up the FIFO space
9632 * that may be filled with rx packets destined for the host.
9633 * If the FIFO is full, ASF will no longer function properly.
9635 * Unintended resets have been reported on real time kernels
9636 * where the timer doesn't run on time. Netpoll will also have
9639 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9640 * to check the ring condition when the heartbeat is expiring
9641 * before doing the reset. This will prevent most unintended
9644 if (!--tp
->asf_counter
) {
9645 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9646 tg3_wait_for_event_ack(tp
);
9648 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9649 FWCMD_NICDRV_ALIVE3
);
9650 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9651 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9652 TG3_FW_UPDATE_TIMEOUT_SEC
);
9654 tg3_generate_fw_event(tp
);
9656 tp
->asf_counter
= tp
->asf_multiplier
;
9659 spin_unlock(&tp
->lock
);
9662 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9663 add_timer(&tp
->timer
);
9666 static void __devinit
tg3_timer_init(struct tg3
*tp
)
9668 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9669 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9670 !tg3_flag(tp
, 57765_CLASS
))
9671 tp
->timer_offset
= HZ
;
9673 tp
->timer_offset
= HZ
/ 10;
9675 BUG_ON(tp
->timer_offset
> HZ
);
9677 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
9678 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
9679 TG3_FW_UPDATE_FREQ_SEC
;
9681 init_timer(&tp
->timer
);
9682 tp
->timer
.data
= (unsigned long) tp
;
9683 tp
->timer
.function
= tg3_timer
;
9686 static void tg3_timer_start(struct tg3
*tp
)
9688 tp
->asf_counter
= tp
->asf_multiplier
;
9689 tp
->timer_counter
= tp
->timer_multiplier
;
9691 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9692 add_timer(&tp
->timer
);
9695 static void tg3_timer_stop(struct tg3
*tp
)
9697 del_timer_sync(&tp
->timer
);
9700 /* Restart hardware after configuration changes, self-test, etc.
9701 * Invoked with tp->lock held.
9703 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
9704 __releases(tp
->lock
)
9705 __acquires(tp
->lock
)
9709 err
= tg3_init_hw(tp
, reset_phy
);
9712 "Failed to re-initialize device, aborting\n");
9713 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9714 tg3_full_unlock(tp
);
9717 tg3_napi_enable(tp
);
9719 tg3_full_lock(tp
, 0);
9724 static void tg3_reset_task(struct work_struct
*work
)
9726 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
9729 tg3_full_lock(tp
, 0);
9731 if (!netif_running(tp
->dev
)) {
9732 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
9733 tg3_full_unlock(tp
);
9737 tg3_full_unlock(tp
);
9743 tg3_full_lock(tp
, 1);
9745 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
9746 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
9747 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
9748 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
9749 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
9752 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
9753 err
= tg3_init_hw(tp
, 1);
9757 tg3_netif_start(tp
);
9760 tg3_full_unlock(tp
);
9765 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
9768 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9771 unsigned long flags
;
9773 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9775 if (tp
->irq_cnt
== 1)
9776 name
= tp
->dev
->name
;
9778 name
= &tnapi
->irq_lbl
[0];
9779 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9780 name
[IFNAMSIZ
-1] = 0;
9783 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9785 if (tg3_flag(tp
, 1SHOT_MSI
))
9790 if (tg3_flag(tp
, TAGGED_STATUS
))
9791 fn
= tg3_interrupt_tagged
;
9792 flags
= IRQF_SHARED
;
9795 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9798 static int tg3_test_interrupt(struct tg3
*tp
)
9800 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9801 struct net_device
*dev
= tp
->dev
;
9802 int err
, i
, intr_ok
= 0;
9805 if (!netif_running(dev
))
9808 tg3_disable_ints(tp
);
9810 free_irq(tnapi
->irq_vec
, tnapi
);
9813 * Turn off MSI one shot mode. Otherwise this test has no
9814 * observable way to know whether the interrupt was delivered.
9816 if (tg3_flag(tp
, 57765_PLUS
)) {
9817 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9818 tw32(MSGINT_MODE
, val
);
9821 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9822 IRQF_SHARED
, dev
->name
, tnapi
);
9826 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9827 tg3_enable_ints(tp
);
9829 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9832 for (i
= 0; i
< 5; i
++) {
9833 u32 int_mbox
, misc_host_ctrl
;
9835 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9836 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9838 if ((int_mbox
!= 0) ||
9839 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9844 if (tg3_flag(tp
, 57765_PLUS
) &&
9845 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9846 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9851 tg3_disable_ints(tp
);
9853 free_irq(tnapi
->irq_vec
, tnapi
);
9855 err
= tg3_request_irq(tp
, 0);
9861 /* Reenable MSI one shot mode. */
9862 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
9863 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9864 tw32(MSGINT_MODE
, val
);
9872 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9873 * successfully restored
9875 static int tg3_test_msi(struct tg3
*tp
)
9880 if (!tg3_flag(tp
, USING_MSI
))
9883 /* Turn off SERR reporting in case MSI terminates with Master
9886 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9887 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9888 pci_cmd
& ~PCI_COMMAND_SERR
);
9890 err
= tg3_test_interrupt(tp
);
9892 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9897 /* other failures */
9901 /* MSI test failed, go back to INTx mode */
9902 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9903 "to INTx mode. Please report this failure to the PCI "
9904 "maintainer and include system chipset information\n");
9906 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9908 pci_disable_msi(tp
->pdev
);
9910 tg3_flag_clear(tp
, USING_MSI
);
9911 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9913 err
= tg3_request_irq(tp
, 0);
9917 /* Need to reset the chip because the MSI cycle may have terminated
9918 * with Master Abort.
9920 tg3_full_lock(tp
, 1);
9922 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9923 err
= tg3_init_hw(tp
, 1);
9925 tg3_full_unlock(tp
);
9928 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9933 static int tg3_request_firmware(struct tg3
*tp
)
9935 const __be32
*fw_data
;
9937 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9938 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9943 fw_data
= (void *)tp
->fw
->data
;
9945 /* Firmware blob starts with version numbers, followed by
9946 * start address and _full_ length including BSS sections
9947 * (which must be longer than the actual data, of course
9950 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9951 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9952 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9953 tp
->fw_len
, tp
->fw_needed
);
9954 release_firmware(tp
->fw
);
9959 /* We no longer need firmware; we have it. */
9960 tp
->fw_needed
= NULL
;
9964 static bool tg3_enable_msix(struct tg3
*tp
)
9967 struct msix_entry msix_ent
[tp
->irq_max
];
9969 tp
->irq_cnt
= num_online_cpus();
9970 if (tp
->irq_cnt
> 1) {
9971 /* We want as many rx rings enabled as there are cpus.
9972 * In multiqueue MSI-X mode, the first MSI-X vector
9973 * only deals with link interrupts, etc, so we add
9974 * one to the number of vectors we are requesting.
9976 tp
->irq_cnt
= min_t(unsigned, tp
->irq_cnt
+ 1, tp
->irq_max
);
9979 for (i
= 0; i
< tp
->irq_max
; i
++) {
9980 msix_ent
[i
].entry
= i
;
9981 msix_ent
[i
].vector
= 0;
9984 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9987 } else if (rc
!= 0) {
9988 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9990 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9995 for (i
= 0; i
< tp
->irq_max
; i
++)
9996 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9998 netif_set_real_num_tx_queues(tp
->dev
, 1);
9999 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
10000 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
10001 pci_disable_msix(tp
->pdev
);
10005 if (tp
->irq_cnt
> 1) {
10006 tg3_flag_set(tp
, ENABLE_RSS
);
10008 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
10009 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
10010 tg3_flag_set(tp
, ENABLE_TSS
);
10011 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
10018 static void tg3_ints_init(struct tg3
*tp
)
10020 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10021 !tg3_flag(tp
, TAGGED_STATUS
)) {
10022 /* All MSI supporting chips should support tagged
10023 * status. Assert that this is the case.
10025 netdev_warn(tp
->dev
,
10026 "MSI without TAGGED_STATUS? Not using MSI\n");
10030 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10031 tg3_flag_set(tp
, USING_MSIX
);
10032 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10033 tg3_flag_set(tp
, USING_MSI
);
10035 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10036 u32 msi_mode
= tr32(MSGINT_MODE
);
10037 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10038 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10039 if (!tg3_flag(tp
, 1SHOT_MSI
))
10040 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10041 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10044 if (!tg3_flag(tp
, USING_MSIX
)) {
10046 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10047 netif_set_real_num_tx_queues(tp
->dev
, 1);
10048 netif_set_real_num_rx_queues(tp
->dev
, 1);
10052 static void tg3_ints_fini(struct tg3
*tp
)
10054 if (tg3_flag(tp
, USING_MSIX
))
10055 pci_disable_msix(tp
->pdev
);
10056 else if (tg3_flag(tp
, USING_MSI
))
10057 pci_disable_msi(tp
->pdev
);
10058 tg3_flag_clear(tp
, USING_MSI
);
10059 tg3_flag_clear(tp
, USING_MSIX
);
10060 tg3_flag_clear(tp
, ENABLE_RSS
);
10061 tg3_flag_clear(tp
, ENABLE_TSS
);
10064 static int tg3_open(struct net_device
*dev
)
10066 struct tg3
*tp
= netdev_priv(dev
);
10069 if (tp
->fw_needed
) {
10070 err
= tg3_request_firmware(tp
);
10071 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
10075 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10076 tg3_flag_clear(tp
, TSO_CAPABLE
);
10077 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10078 netdev_notice(tp
->dev
, "TSO capability restored\n");
10079 tg3_flag_set(tp
, TSO_CAPABLE
);
10083 netif_carrier_off(tp
->dev
);
10085 err
= tg3_power_up(tp
);
10089 tg3_full_lock(tp
, 0);
10091 tg3_disable_ints(tp
);
10092 tg3_flag_clear(tp
, INIT_COMPLETE
);
10094 tg3_full_unlock(tp
);
10097 * Setup interrupts first so we know how
10098 * many NAPI resources to allocate
10102 tg3_rss_check_indir_tbl(tp
);
10104 /* The placement of this call is tied
10105 * to the setup and use of Host TX descriptors.
10107 err
= tg3_alloc_consistent(tp
);
10113 tg3_napi_enable(tp
);
10115 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10116 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10117 err
= tg3_request_irq(tp
, i
);
10119 for (i
--; i
>= 0; i
--) {
10120 tnapi
= &tp
->napi
[i
];
10121 free_irq(tnapi
->irq_vec
, tnapi
);
10127 tg3_full_lock(tp
, 0);
10129 err
= tg3_init_hw(tp
, 1);
10131 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10132 tg3_free_rings(tp
);
10135 tg3_full_unlock(tp
);
10140 if (tg3_flag(tp
, USING_MSI
)) {
10141 err
= tg3_test_msi(tp
);
10144 tg3_full_lock(tp
, 0);
10145 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10146 tg3_free_rings(tp
);
10147 tg3_full_unlock(tp
);
10152 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10153 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10155 tw32(PCIE_TRANSACTION_CFG
,
10156 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10162 tg3_full_lock(tp
, 0);
10164 tg3_timer_start(tp
);
10165 tg3_flag_set(tp
, INIT_COMPLETE
);
10166 tg3_enable_ints(tp
);
10168 tg3_full_unlock(tp
);
10170 netif_tx_start_all_queues(dev
);
10173 * Reset loopback feature if it was turned on while the device was down
10174 * make sure that it's installed properly now.
10176 if (dev
->features
& NETIF_F_LOOPBACK
)
10177 tg3_set_loopback(dev
, dev
->features
);
10182 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10183 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10184 free_irq(tnapi
->irq_vec
, tnapi
);
10188 tg3_napi_disable(tp
);
10190 tg3_free_consistent(tp
);
10194 tg3_frob_aux_power(tp
, false);
10195 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10199 static int tg3_close(struct net_device
*dev
)
10202 struct tg3
*tp
= netdev_priv(dev
);
10204 tg3_napi_disable(tp
);
10205 tg3_reset_task_cancel(tp
);
10207 netif_tx_stop_all_queues(dev
);
10209 tg3_timer_stop(tp
);
10213 tg3_full_lock(tp
, 1);
10215 tg3_disable_ints(tp
);
10217 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10218 tg3_free_rings(tp
);
10219 tg3_flag_clear(tp
, INIT_COMPLETE
);
10221 tg3_full_unlock(tp
);
10223 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10224 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10225 free_irq(tnapi
->irq_vec
, tnapi
);
10230 /* Clear stats across close / open calls */
10231 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10232 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10236 tg3_free_consistent(tp
);
10238 tg3_power_down(tp
);
10240 netif_carrier_off(tp
->dev
);
10245 static inline u64
get_stat64(tg3_stat64_t
*val
)
10247 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10250 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10252 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10254 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10255 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10256 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
10259 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10260 tg3_writephy(tp
, MII_TG3_TEST1
,
10261 val
| MII_TG3_TEST1_CRC_EN
);
10262 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10266 tp
->phy_crc_errors
+= val
;
10268 return tp
->phy_crc_errors
;
10271 return get_stat64(&hw_stats
->rx_fcs_errors
);
10274 #define ESTAT_ADD(member) \
10275 estats->member = old_estats->member + \
10276 get_stat64(&hw_stats->member)
10278 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
10280 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
10281 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10283 ESTAT_ADD(rx_octets
);
10284 ESTAT_ADD(rx_fragments
);
10285 ESTAT_ADD(rx_ucast_packets
);
10286 ESTAT_ADD(rx_mcast_packets
);
10287 ESTAT_ADD(rx_bcast_packets
);
10288 ESTAT_ADD(rx_fcs_errors
);
10289 ESTAT_ADD(rx_align_errors
);
10290 ESTAT_ADD(rx_xon_pause_rcvd
);
10291 ESTAT_ADD(rx_xoff_pause_rcvd
);
10292 ESTAT_ADD(rx_mac_ctrl_rcvd
);
10293 ESTAT_ADD(rx_xoff_entered
);
10294 ESTAT_ADD(rx_frame_too_long_errors
);
10295 ESTAT_ADD(rx_jabbers
);
10296 ESTAT_ADD(rx_undersize_packets
);
10297 ESTAT_ADD(rx_in_length_errors
);
10298 ESTAT_ADD(rx_out_length_errors
);
10299 ESTAT_ADD(rx_64_or_less_octet_packets
);
10300 ESTAT_ADD(rx_65_to_127_octet_packets
);
10301 ESTAT_ADD(rx_128_to_255_octet_packets
);
10302 ESTAT_ADD(rx_256_to_511_octet_packets
);
10303 ESTAT_ADD(rx_512_to_1023_octet_packets
);
10304 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
10305 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
10306 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
10307 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
10308 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
10310 ESTAT_ADD(tx_octets
);
10311 ESTAT_ADD(tx_collisions
);
10312 ESTAT_ADD(tx_xon_sent
);
10313 ESTAT_ADD(tx_xoff_sent
);
10314 ESTAT_ADD(tx_flow_control
);
10315 ESTAT_ADD(tx_mac_errors
);
10316 ESTAT_ADD(tx_single_collisions
);
10317 ESTAT_ADD(tx_mult_collisions
);
10318 ESTAT_ADD(tx_deferred
);
10319 ESTAT_ADD(tx_excessive_collisions
);
10320 ESTAT_ADD(tx_late_collisions
);
10321 ESTAT_ADD(tx_collide_2times
);
10322 ESTAT_ADD(tx_collide_3times
);
10323 ESTAT_ADD(tx_collide_4times
);
10324 ESTAT_ADD(tx_collide_5times
);
10325 ESTAT_ADD(tx_collide_6times
);
10326 ESTAT_ADD(tx_collide_7times
);
10327 ESTAT_ADD(tx_collide_8times
);
10328 ESTAT_ADD(tx_collide_9times
);
10329 ESTAT_ADD(tx_collide_10times
);
10330 ESTAT_ADD(tx_collide_11times
);
10331 ESTAT_ADD(tx_collide_12times
);
10332 ESTAT_ADD(tx_collide_13times
);
10333 ESTAT_ADD(tx_collide_14times
);
10334 ESTAT_ADD(tx_collide_15times
);
10335 ESTAT_ADD(tx_ucast_packets
);
10336 ESTAT_ADD(tx_mcast_packets
);
10337 ESTAT_ADD(tx_bcast_packets
);
10338 ESTAT_ADD(tx_carrier_sense_errors
);
10339 ESTAT_ADD(tx_discards
);
10340 ESTAT_ADD(tx_errors
);
10342 ESTAT_ADD(dma_writeq_full
);
10343 ESTAT_ADD(dma_write_prioq_full
);
10344 ESTAT_ADD(rxbds_empty
);
10345 ESTAT_ADD(rx_discards
);
10346 ESTAT_ADD(rx_errors
);
10347 ESTAT_ADD(rx_threshold_hit
);
10349 ESTAT_ADD(dma_readq_full
);
10350 ESTAT_ADD(dma_read_prioq_full
);
10351 ESTAT_ADD(tx_comp_queue_full
);
10353 ESTAT_ADD(ring_set_send_prod_index
);
10354 ESTAT_ADD(ring_status_update
);
10355 ESTAT_ADD(nic_irqs
);
10356 ESTAT_ADD(nic_avoided_irqs
);
10357 ESTAT_ADD(nic_tx_threshold_hit
);
10359 ESTAT_ADD(mbuf_lwm_thresh_hit
);
10362 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
10364 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
10365 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10367 stats
->rx_packets
= old_stats
->rx_packets
+
10368 get_stat64(&hw_stats
->rx_ucast_packets
) +
10369 get_stat64(&hw_stats
->rx_mcast_packets
) +
10370 get_stat64(&hw_stats
->rx_bcast_packets
);
10372 stats
->tx_packets
= old_stats
->tx_packets
+
10373 get_stat64(&hw_stats
->tx_ucast_packets
) +
10374 get_stat64(&hw_stats
->tx_mcast_packets
) +
10375 get_stat64(&hw_stats
->tx_bcast_packets
);
10377 stats
->rx_bytes
= old_stats
->rx_bytes
+
10378 get_stat64(&hw_stats
->rx_octets
);
10379 stats
->tx_bytes
= old_stats
->tx_bytes
+
10380 get_stat64(&hw_stats
->tx_octets
);
10382 stats
->rx_errors
= old_stats
->rx_errors
+
10383 get_stat64(&hw_stats
->rx_errors
);
10384 stats
->tx_errors
= old_stats
->tx_errors
+
10385 get_stat64(&hw_stats
->tx_errors
) +
10386 get_stat64(&hw_stats
->tx_mac_errors
) +
10387 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
10388 get_stat64(&hw_stats
->tx_discards
);
10390 stats
->multicast
= old_stats
->multicast
+
10391 get_stat64(&hw_stats
->rx_mcast_packets
);
10392 stats
->collisions
= old_stats
->collisions
+
10393 get_stat64(&hw_stats
->tx_collisions
);
10395 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10396 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10397 get_stat64(&hw_stats
->rx_undersize_packets
);
10399 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10400 get_stat64(&hw_stats
->rxbds_empty
);
10401 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10402 get_stat64(&hw_stats
->rx_align_errors
);
10403 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10404 get_stat64(&hw_stats
->tx_discards
);
10405 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10406 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10408 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10409 tg3_calc_crc_errors(tp
);
10411 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10412 get_stat64(&hw_stats
->rx_discards
);
10414 stats
->rx_dropped
= tp
->rx_dropped
;
10415 stats
->tx_dropped
= tp
->tx_dropped
;
10418 static int tg3_get_regs_len(struct net_device
*dev
)
10420 return TG3_REG_BLK_SIZE
;
10423 static void tg3_get_regs(struct net_device
*dev
,
10424 struct ethtool_regs
*regs
, void *_p
)
10426 struct tg3
*tp
= netdev_priv(dev
);
10430 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10432 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10435 tg3_full_lock(tp
, 0);
10437 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10439 tg3_full_unlock(tp
);
10442 static int tg3_get_eeprom_len(struct net_device
*dev
)
10444 struct tg3
*tp
= netdev_priv(dev
);
10446 return tp
->nvram_size
;
10449 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10451 struct tg3
*tp
= netdev_priv(dev
);
10454 u32 i
, offset
, len
, b_offset
, b_count
;
10457 if (tg3_flag(tp
, NO_NVRAM
))
10460 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10463 offset
= eeprom
->offset
;
10467 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10470 /* adjustments to start on required 4 byte boundary */
10471 b_offset
= offset
& 3;
10472 b_count
= 4 - b_offset
;
10473 if (b_count
> len
) {
10474 /* i.e. offset=1 len=2 */
10477 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10480 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10483 eeprom
->len
+= b_count
;
10486 /* read bytes up to the last 4 byte boundary */
10487 pd
= &data
[eeprom
->len
];
10488 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10489 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10494 memcpy(pd
+ i
, &val
, 4);
10499 /* read last bytes not ending on 4 byte boundary */
10500 pd
= &data
[eeprom
->len
];
10502 b_offset
= offset
+ len
- b_count
;
10503 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10506 memcpy(pd
, &val
, b_count
);
10507 eeprom
->len
+= b_count
;
10512 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10514 struct tg3
*tp
= netdev_priv(dev
);
10516 u32 offset
, len
, b_offset
, odd_len
;
10520 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10523 if (tg3_flag(tp
, NO_NVRAM
) ||
10524 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10527 offset
= eeprom
->offset
;
10530 if ((b_offset
= (offset
& 3))) {
10531 /* adjustments to start on required 4 byte boundary */
10532 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10543 /* adjustments to end on required 4 byte boundary */
10545 len
= (len
+ 3) & ~3;
10546 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10552 if (b_offset
|| odd_len
) {
10553 buf
= kmalloc(len
, GFP_KERNEL
);
10557 memcpy(buf
, &start
, 4);
10559 memcpy(buf
+len
-4, &end
, 4);
10560 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10563 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10571 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10573 struct tg3
*tp
= netdev_priv(dev
);
10575 if (tg3_flag(tp
, USE_PHYLIB
)) {
10576 struct phy_device
*phydev
;
10577 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10579 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10580 return phy_ethtool_gset(phydev
, cmd
);
10583 cmd
->supported
= (SUPPORTED_Autoneg
);
10585 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10586 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10587 SUPPORTED_1000baseT_Full
);
10589 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10590 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10591 SUPPORTED_100baseT_Full
|
10592 SUPPORTED_10baseT_Half
|
10593 SUPPORTED_10baseT_Full
|
10595 cmd
->port
= PORT_TP
;
10597 cmd
->supported
|= SUPPORTED_FIBRE
;
10598 cmd
->port
= PORT_FIBRE
;
10601 cmd
->advertising
= tp
->link_config
.advertising
;
10602 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10603 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10604 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10605 cmd
->advertising
|= ADVERTISED_Pause
;
10607 cmd
->advertising
|= ADVERTISED_Pause
|
10608 ADVERTISED_Asym_Pause
;
10610 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10611 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10614 if (netif_running(dev
) && netif_carrier_ok(dev
)) {
10615 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10616 cmd
->duplex
= tp
->link_config
.active_duplex
;
10617 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
10618 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10619 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
10620 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
10622 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
10625 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
10626 cmd
->duplex
= DUPLEX_UNKNOWN
;
10627 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
10629 cmd
->phy_address
= tp
->phy_addr
;
10630 cmd
->transceiver
= XCVR_INTERNAL
;
10631 cmd
->autoneg
= tp
->link_config
.autoneg
;
10637 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10639 struct tg3
*tp
= netdev_priv(dev
);
10640 u32 speed
= ethtool_cmd_speed(cmd
);
10642 if (tg3_flag(tp
, USE_PHYLIB
)) {
10643 struct phy_device
*phydev
;
10644 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10646 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10647 return phy_ethtool_sset(phydev
, cmd
);
10650 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10651 cmd
->autoneg
!= AUTONEG_DISABLE
)
10654 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10655 cmd
->duplex
!= DUPLEX_FULL
&&
10656 cmd
->duplex
!= DUPLEX_HALF
)
10659 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10660 u32 mask
= ADVERTISED_Autoneg
|
10662 ADVERTISED_Asym_Pause
;
10664 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10665 mask
|= ADVERTISED_1000baseT_Half
|
10666 ADVERTISED_1000baseT_Full
;
10668 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10669 mask
|= ADVERTISED_100baseT_Half
|
10670 ADVERTISED_100baseT_Full
|
10671 ADVERTISED_10baseT_Half
|
10672 ADVERTISED_10baseT_Full
|
10675 mask
|= ADVERTISED_FIBRE
;
10677 if (cmd
->advertising
& ~mask
)
10680 mask
&= (ADVERTISED_1000baseT_Half
|
10681 ADVERTISED_1000baseT_Full
|
10682 ADVERTISED_100baseT_Half
|
10683 ADVERTISED_100baseT_Full
|
10684 ADVERTISED_10baseT_Half
|
10685 ADVERTISED_10baseT_Full
);
10687 cmd
->advertising
&= mask
;
10689 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10690 if (speed
!= SPEED_1000
)
10693 if (cmd
->duplex
!= DUPLEX_FULL
)
10696 if (speed
!= SPEED_100
&&
10702 tg3_full_lock(tp
, 0);
10704 tp
->link_config
.autoneg
= cmd
->autoneg
;
10705 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10706 tp
->link_config
.advertising
= (cmd
->advertising
|
10707 ADVERTISED_Autoneg
);
10708 tp
->link_config
.speed
= SPEED_UNKNOWN
;
10709 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
10711 tp
->link_config
.advertising
= 0;
10712 tp
->link_config
.speed
= speed
;
10713 tp
->link_config
.duplex
= cmd
->duplex
;
10716 if (netif_running(dev
))
10717 tg3_setup_phy(tp
, 1);
10719 tg3_full_unlock(tp
);
10724 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10726 struct tg3
*tp
= netdev_priv(dev
);
10728 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
10729 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
10730 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
10731 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
10734 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10736 struct tg3
*tp
= netdev_priv(dev
);
10738 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10739 wol
->supported
= WAKE_MAGIC
;
10741 wol
->supported
= 0;
10743 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10744 wol
->wolopts
= WAKE_MAGIC
;
10745 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10748 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10750 struct tg3
*tp
= netdev_priv(dev
);
10751 struct device
*dp
= &tp
->pdev
->dev
;
10753 if (wol
->wolopts
& ~WAKE_MAGIC
)
10755 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10756 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10759 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10761 spin_lock_bh(&tp
->lock
);
10762 if (device_may_wakeup(dp
))
10763 tg3_flag_set(tp
, WOL_ENABLE
);
10765 tg3_flag_clear(tp
, WOL_ENABLE
);
10766 spin_unlock_bh(&tp
->lock
);
10771 static u32
tg3_get_msglevel(struct net_device
*dev
)
10773 struct tg3
*tp
= netdev_priv(dev
);
10774 return tp
->msg_enable
;
10777 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10779 struct tg3
*tp
= netdev_priv(dev
);
10780 tp
->msg_enable
= value
;
10783 static int tg3_nway_reset(struct net_device
*dev
)
10785 struct tg3
*tp
= netdev_priv(dev
);
10788 if (!netif_running(dev
))
10791 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10794 if (tg3_flag(tp
, USE_PHYLIB
)) {
10795 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10797 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10801 spin_lock_bh(&tp
->lock
);
10803 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10804 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10805 ((bmcr
& BMCR_ANENABLE
) ||
10806 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10807 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10811 spin_unlock_bh(&tp
->lock
);
10817 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10819 struct tg3
*tp
= netdev_priv(dev
);
10821 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10822 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10823 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10825 ering
->rx_jumbo_max_pending
= 0;
10827 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10829 ering
->rx_pending
= tp
->rx_pending
;
10830 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10831 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10833 ering
->rx_jumbo_pending
= 0;
10835 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10838 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10840 struct tg3
*tp
= netdev_priv(dev
);
10841 int i
, irq_sync
= 0, err
= 0;
10843 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10844 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10845 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10846 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10847 (tg3_flag(tp
, TSO_BUG
) &&
10848 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10851 if (netif_running(dev
)) {
10853 tg3_netif_stop(tp
);
10857 tg3_full_lock(tp
, irq_sync
);
10859 tp
->rx_pending
= ering
->rx_pending
;
10861 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10862 tp
->rx_pending
> 63)
10863 tp
->rx_pending
= 63;
10865 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10866 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10868 for (i
= 0; i
< tp
->irq_max
; i
++)
10869 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10871 if (netif_running(dev
)) {
10872 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10873 err
= tg3_restart_hw(tp
, 1);
10875 tg3_netif_start(tp
);
10878 tg3_full_unlock(tp
);
10880 if (irq_sync
&& !err
)
10886 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10888 struct tg3
*tp
= netdev_priv(dev
);
10890 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10892 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
10893 epause
->rx_pause
= 1;
10895 epause
->rx_pause
= 0;
10897 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
10898 epause
->tx_pause
= 1;
10900 epause
->tx_pause
= 0;
10903 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10905 struct tg3
*tp
= netdev_priv(dev
);
10908 if (tg3_flag(tp
, USE_PHYLIB
)) {
10910 struct phy_device
*phydev
;
10912 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10914 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10915 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10916 (epause
->rx_pause
!= epause
->tx_pause
)))
10919 tp
->link_config
.flowctrl
= 0;
10920 if (epause
->rx_pause
) {
10921 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10923 if (epause
->tx_pause
) {
10924 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10925 newadv
= ADVERTISED_Pause
;
10927 newadv
= ADVERTISED_Pause
|
10928 ADVERTISED_Asym_Pause
;
10929 } else if (epause
->tx_pause
) {
10930 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10931 newadv
= ADVERTISED_Asym_Pause
;
10935 if (epause
->autoneg
)
10936 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10938 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10940 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10941 u32 oldadv
= phydev
->advertising
&
10942 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10943 if (oldadv
!= newadv
) {
10944 phydev
->advertising
&=
10945 ~(ADVERTISED_Pause
|
10946 ADVERTISED_Asym_Pause
);
10947 phydev
->advertising
|= newadv
;
10948 if (phydev
->autoneg
) {
10950 * Always renegotiate the link to
10951 * inform our link partner of our
10952 * flow control settings, even if the
10953 * flow control is forced. Let
10954 * tg3_adjust_link() do the final
10955 * flow control setup.
10957 return phy_start_aneg(phydev
);
10961 if (!epause
->autoneg
)
10962 tg3_setup_flow_control(tp
, 0, 0);
10964 tp
->link_config
.advertising
&=
10965 ~(ADVERTISED_Pause
|
10966 ADVERTISED_Asym_Pause
);
10967 tp
->link_config
.advertising
|= newadv
;
10972 if (netif_running(dev
)) {
10973 tg3_netif_stop(tp
);
10977 tg3_full_lock(tp
, irq_sync
);
10979 if (epause
->autoneg
)
10980 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10982 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10983 if (epause
->rx_pause
)
10984 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10986 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10987 if (epause
->tx_pause
)
10988 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10990 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10992 if (netif_running(dev
)) {
10993 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10994 err
= tg3_restart_hw(tp
, 1);
10996 tg3_netif_start(tp
);
10999 tg3_full_unlock(tp
);
11005 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11009 return TG3_NUM_TEST
;
11011 return TG3_NUM_STATS
;
11013 return -EOPNOTSUPP
;
11017 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11018 u32
*rules __always_unused
)
11020 struct tg3
*tp
= netdev_priv(dev
);
11022 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11023 return -EOPNOTSUPP
;
11025 switch (info
->cmd
) {
11026 case ETHTOOL_GRXRINGS
:
11027 if (netif_running(tp
->dev
))
11028 info
->data
= tp
->irq_cnt
;
11030 info
->data
= num_online_cpus();
11031 if (info
->data
> TG3_IRQ_MAX_VECS_RSS
)
11032 info
->data
= TG3_IRQ_MAX_VECS_RSS
;
11035 /* The first interrupt vector only
11036 * handles link interrupts.
11042 return -EOPNOTSUPP
;
11046 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11049 struct tg3
*tp
= netdev_priv(dev
);
11051 if (tg3_flag(tp
, SUPPORT_MSIX
))
11052 size
= TG3_RSS_INDIR_TBL_SIZE
;
11057 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11059 struct tg3
*tp
= netdev_priv(dev
);
11062 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11063 indir
[i
] = tp
->rss_ind_tbl
[i
];
11068 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11070 struct tg3
*tp
= netdev_priv(dev
);
11073 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11074 tp
->rss_ind_tbl
[i
] = indir
[i
];
11076 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11079 /* It is legal to write the indirection
11080 * table while the device is running.
11082 tg3_full_lock(tp
, 0);
11083 tg3_rss_write_indir_tbl(tp
);
11084 tg3_full_unlock(tp
);
11089 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11091 switch (stringset
) {
11093 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11096 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11099 WARN_ON(1); /* we need a WARN() */
11104 static int tg3_set_phys_id(struct net_device
*dev
,
11105 enum ethtool_phys_id_state state
)
11107 struct tg3
*tp
= netdev_priv(dev
);
11109 if (!netif_running(tp
->dev
))
11113 case ETHTOOL_ID_ACTIVE
:
11114 return 1; /* cycle on/off once per second */
11116 case ETHTOOL_ID_ON
:
11117 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11118 LED_CTRL_1000MBPS_ON
|
11119 LED_CTRL_100MBPS_ON
|
11120 LED_CTRL_10MBPS_ON
|
11121 LED_CTRL_TRAFFIC_OVERRIDE
|
11122 LED_CTRL_TRAFFIC_BLINK
|
11123 LED_CTRL_TRAFFIC_LED
);
11126 case ETHTOOL_ID_OFF
:
11127 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11128 LED_CTRL_TRAFFIC_OVERRIDE
);
11131 case ETHTOOL_ID_INACTIVE
:
11132 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11139 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11140 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11142 struct tg3
*tp
= netdev_priv(dev
);
11145 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11147 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11150 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11154 u32 offset
= 0, len
= 0;
11157 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11160 if (magic
== TG3_EEPROM_MAGIC
) {
11161 for (offset
= TG3_NVM_DIR_START
;
11162 offset
< TG3_NVM_DIR_END
;
11163 offset
+= TG3_NVM_DIRENT_SIZE
) {
11164 if (tg3_nvram_read(tp
, offset
, &val
))
11167 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11168 TG3_NVM_DIRTYPE_EXTVPD
)
11172 if (offset
!= TG3_NVM_DIR_END
) {
11173 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11174 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11177 offset
= tg3_nvram_logical_addr(tp
, offset
);
11181 if (!offset
|| !len
) {
11182 offset
= TG3_NVM_VPD_OFF
;
11183 len
= TG3_NVM_VPD_LEN
;
11186 buf
= kmalloc(len
, GFP_KERNEL
);
11190 if (magic
== TG3_EEPROM_MAGIC
) {
11191 for (i
= 0; i
< len
; i
+= 4) {
11192 /* The data is in little-endian format in NVRAM.
11193 * Use the big-endian read routines to preserve
11194 * the byte order as it exists in NVRAM.
11196 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11202 unsigned int pos
= 0;
11204 ptr
= (u8
*)&buf
[0];
11205 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11206 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11208 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
11226 #define NVRAM_TEST_SIZE 0x100
11227 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11228 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11229 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11230 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11231 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11232 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11233 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11234 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11236 static int tg3_test_nvram(struct tg3
*tp
)
11238 u32 csum
, magic
, len
;
11240 int i
, j
, k
, err
= 0, size
;
11242 if (tg3_flag(tp
, NO_NVRAM
))
11245 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11248 if (magic
== TG3_EEPROM_MAGIC
)
11249 size
= NVRAM_TEST_SIZE
;
11250 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
11251 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
11252 TG3_EEPROM_SB_FORMAT_1
) {
11253 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
11254 case TG3_EEPROM_SB_REVISION_0
:
11255 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
11257 case TG3_EEPROM_SB_REVISION_2
:
11258 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
11260 case TG3_EEPROM_SB_REVISION_3
:
11261 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
11263 case TG3_EEPROM_SB_REVISION_4
:
11264 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
11266 case TG3_EEPROM_SB_REVISION_5
:
11267 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
11269 case TG3_EEPROM_SB_REVISION_6
:
11270 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
11277 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
11278 size
= NVRAM_SELFBOOT_HW_SIZE
;
11282 buf
= kmalloc(size
, GFP_KERNEL
);
11287 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
11288 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
11295 /* Selfboot format */
11296 magic
= be32_to_cpu(buf
[0]);
11297 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
11298 TG3_EEPROM_MAGIC_FW
) {
11299 u8
*buf8
= (u8
*) buf
, csum8
= 0;
11301 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
11302 TG3_EEPROM_SB_REVISION_2
) {
11303 /* For rev 2, the csum doesn't include the MBA. */
11304 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
11306 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
11309 for (i
= 0; i
< size
; i
++)
11322 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
11323 TG3_EEPROM_MAGIC_HW
) {
11324 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
11325 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
11326 u8
*buf8
= (u8
*) buf
;
11328 /* Separate the parity bits and the data bytes. */
11329 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
11330 if ((i
== 0) || (i
== 8)) {
11334 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
11335 parity
[k
++] = buf8
[i
] & msk
;
11337 } else if (i
== 16) {
11341 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
11342 parity
[k
++] = buf8
[i
] & msk
;
11345 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11346 parity
[k
++] = buf8
[i
] & msk
;
11349 data
[j
++] = buf8
[i
];
11353 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11354 u8 hw8
= hweight8(data
[i
]);
11356 if ((hw8
& 0x1) && parity
[i
])
11358 else if (!(hw8
& 0x1) && !parity
[i
])
11367 /* Bootstrap checksum at offset 0x10 */
11368 csum
= calc_crc((unsigned char *) buf
, 0x10);
11369 if (csum
!= le32_to_cpu(buf
[0x10/4]))
11372 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11373 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
11374 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11379 buf
= tg3_vpd_readblock(tp
, &len
);
11383 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11385 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11389 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11392 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11393 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11394 PCI_VPD_RO_KEYWORD_CHKSUM
);
11398 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11400 for (i
= 0; i
<= j
; i
++)
11401 csum8
+= ((u8
*)buf
)[i
];
11415 #define TG3_SERDES_TIMEOUT_SEC 2
11416 #define TG3_COPPER_TIMEOUT_SEC 6
11418 static int tg3_test_link(struct tg3
*tp
)
11422 if (!netif_running(tp
->dev
))
11425 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11426 max
= TG3_SERDES_TIMEOUT_SEC
;
11428 max
= TG3_COPPER_TIMEOUT_SEC
;
11430 for (i
= 0; i
< max
; i
++) {
11431 if (netif_carrier_ok(tp
->dev
))
11434 if (msleep_interruptible(1000))
11441 /* Only test the commonly used registers */
11442 static int tg3_test_registers(struct tg3
*tp
)
11444 int i
, is_5705
, is_5750
;
11445 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11449 #define TG3_FL_5705 0x1
11450 #define TG3_FL_NOT_5705 0x2
11451 #define TG3_FL_NOT_5788 0x4
11452 #define TG3_FL_NOT_5750 0x8
11456 /* MAC Control Registers */
11457 { MAC_MODE
, TG3_FL_NOT_5705
,
11458 0x00000000, 0x00ef6f8c },
11459 { MAC_MODE
, TG3_FL_5705
,
11460 0x00000000, 0x01ef6b8c },
11461 { MAC_STATUS
, TG3_FL_NOT_5705
,
11462 0x03800107, 0x00000000 },
11463 { MAC_STATUS
, TG3_FL_5705
,
11464 0x03800100, 0x00000000 },
11465 { MAC_ADDR_0_HIGH
, 0x0000,
11466 0x00000000, 0x0000ffff },
11467 { MAC_ADDR_0_LOW
, 0x0000,
11468 0x00000000, 0xffffffff },
11469 { MAC_RX_MTU_SIZE
, 0x0000,
11470 0x00000000, 0x0000ffff },
11471 { MAC_TX_MODE
, 0x0000,
11472 0x00000000, 0x00000070 },
11473 { MAC_TX_LENGTHS
, 0x0000,
11474 0x00000000, 0x00003fff },
11475 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11476 0x00000000, 0x000007fc },
11477 { MAC_RX_MODE
, TG3_FL_5705
,
11478 0x00000000, 0x000007dc },
11479 { MAC_HASH_REG_0
, 0x0000,
11480 0x00000000, 0xffffffff },
11481 { MAC_HASH_REG_1
, 0x0000,
11482 0x00000000, 0xffffffff },
11483 { MAC_HASH_REG_2
, 0x0000,
11484 0x00000000, 0xffffffff },
11485 { MAC_HASH_REG_3
, 0x0000,
11486 0x00000000, 0xffffffff },
11488 /* Receive Data and Receive BD Initiator Control Registers. */
11489 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11490 0x00000000, 0xffffffff },
11491 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11492 0x00000000, 0xffffffff },
11493 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11494 0x00000000, 0x00000003 },
11495 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11496 0x00000000, 0xffffffff },
11497 { RCVDBDI_STD_BD
+0, 0x0000,
11498 0x00000000, 0xffffffff },
11499 { RCVDBDI_STD_BD
+4, 0x0000,
11500 0x00000000, 0xffffffff },
11501 { RCVDBDI_STD_BD
+8, 0x0000,
11502 0x00000000, 0xffff0002 },
11503 { RCVDBDI_STD_BD
+0xc, 0x0000,
11504 0x00000000, 0xffffffff },
11506 /* Receive BD Initiator Control Registers. */
11507 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11508 0x00000000, 0xffffffff },
11509 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11510 0x00000000, 0x000003ff },
11511 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11512 0x00000000, 0xffffffff },
11514 /* Host Coalescing Control Registers. */
11515 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11516 0x00000000, 0x00000004 },
11517 { HOSTCC_MODE
, TG3_FL_5705
,
11518 0x00000000, 0x000000f6 },
11519 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11520 0x00000000, 0xffffffff },
11521 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11522 0x00000000, 0x000003ff },
11523 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11524 0x00000000, 0xffffffff },
11525 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11526 0x00000000, 0x000003ff },
11527 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11528 0x00000000, 0xffffffff },
11529 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11530 0x00000000, 0x000000ff },
11531 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11532 0x00000000, 0xffffffff },
11533 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11534 0x00000000, 0x000000ff },
11535 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11536 0x00000000, 0xffffffff },
11537 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11538 0x00000000, 0xffffffff },
11539 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11540 0x00000000, 0xffffffff },
11541 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11542 0x00000000, 0x000000ff },
11543 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11544 0x00000000, 0xffffffff },
11545 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11546 0x00000000, 0x000000ff },
11547 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11548 0x00000000, 0xffffffff },
11549 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11550 0x00000000, 0xffffffff },
11551 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11552 0x00000000, 0xffffffff },
11553 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11554 0x00000000, 0xffffffff },
11555 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11556 0x00000000, 0xffffffff },
11557 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11558 0xffffffff, 0x00000000 },
11559 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11560 0xffffffff, 0x00000000 },
11562 /* Buffer Manager Control Registers. */
11563 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11564 0x00000000, 0x007fff80 },
11565 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11566 0x00000000, 0x007fffff },
11567 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11568 0x00000000, 0x0000003f },
11569 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11570 0x00000000, 0x000001ff },
11571 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11572 0x00000000, 0x000001ff },
11573 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11574 0xffffffff, 0x00000000 },
11575 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11576 0xffffffff, 0x00000000 },
11578 /* Mailbox Registers */
11579 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11580 0x00000000, 0x000001ff },
11581 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11582 0x00000000, 0x000001ff },
11583 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11584 0x00000000, 0x000007ff },
11585 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11586 0x00000000, 0x000001ff },
11588 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11591 is_5705
= is_5750
= 0;
11592 if (tg3_flag(tp
, 5705_PLUS
)) {
11594 if (tg3_flag(tp
, 5750_PLUS
))
11598 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11599 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11602 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11605 if (tg3_flag(tp
, IS_5788
) &&
11606 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11609 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11612 offset
= (u32
) reg_tbl
[i
].offset
;
11613 read_mask
= reg_tbl
[i
].read_mask
;
11614 write_mask
= reg_tbl
[i
].write_mask
;
11616 /* Save the original register content */
11617 save_val
= tr32(offset
);
11619 /* Determine the read-only value. */
11620 read_val
= save_val
& read_mask
;
11622 /* Write zero to the register, then make sure the read-only bits
11623 * are not changed and the read/write bits are all zeros.
11627 val
= tr32(offset
);
11629 /* Test the read-only and read/write bits. */
11630 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11633 /* Write ones to all the bits defined by RdMask and WrMask, then
11634 * make sure the read-only bits are not changed and the
11635 * read/write bits are all ones.
11637 tw32(offset
, read_mask
| write_mask
);
11639 val
= tr32(offset
);
11641 /* Test the read-only bits. */
11642 if ((val
& read_mask
) != read_val
)
11645 /* Test the read/write bits. */
11646 if ((val
& write_mask
) != write_mask
)
11649 tw32(offset
, save_val
);
11655 if (netif_msg_hw(tp
))
11656 netdev_err(tp
->dev
,
11657 "Register test failed at offset %x\n", offset
);
11658 tw32(offset
, save_val
);
11662 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11664 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11668 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11669 for (j
= 0; j
< len
; j
+= 4) {
11672 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11673 tg3_read_mem(tp
, offset
+ j
, &val
);
11674 if (val
!= test_pattern
[i
])
11681 static int tg3_test_memory(struct tg3
*tp
)
11683 static struct mem_entry
{
11686 } mem_tbl_570x
[] = {
11687 { 0x00000000, 0x00b50},
11688 { 0x00002000, 0x1c000},
11689 { 0xffffffff, 0x00000}
11690 }, mem_tbl_5705
[] = {
11691 { 0x00000100, 0x0000c},
11692 { 0x00000200, 0x00008},
11693 { 0x00004000, 0x00800},
11694 { 0x00006000, 0x01000},
11695 { 0x00008000, 0x02000},
11696 { 0x00010000, 0x0e000},
11697 { 0xffffffff, 0x00000}
11698 }, mem_tbl_5755
[] = {
11699 { 0x00000200, 0x00008},
11700 { 0x00004000, 0x00800},
11701 { 0x00006000, 0x00800},
11702 { 0x00008000, 0x02000},
11703 { 0x00010000, 0x0c000},
11704 { 0xffffffff, 0x00000}
11705 }, mem_tbl_5906
[] = {
11706 { 0x00000200, 0x00008},
11707 { 0x00004000, 0x00400},
11708 { 0x00006000, 0x00400},
11709 { 0x00008000, 0x01000},
11710 { 0x00010000, 0x01000},
11711 { 0xffffffff, 0x00000}
11712 }, mem_tbl_5717
[] = {
11713 { 0x00000200, 0x00008},
11714 { 0x00010000, 0x0a000},
11715 { 0x00020000, 0x13c00},
11716 { 0xffffffff, 0x00000}
11717 }, mem_tbl_57765
[] = {
11718 { 0x00000200, 0x00008},
11719 { 0x00004000, 0x00800},
11720 { 0x00006000, 0x09800},
11721 { 0x00010000, 0x0a000},
11722 { 0xffffffff, 0x00000}
11724 struct mem_entry
*mem_tbl
;
11728 if (tg3_flag(tp
, 5717_PLUS
))
11729 mem_tbl
= mem_tbl_5717
;
11730 else if (tg3_flag(tp
, 57765_CLASS
))
11731 mem_tbl
= mem_tbl_57765
;
11732 else if (tg3_flag(tp
, 5755_PLUS
))
11733 mem_tbl
= mem_tbl_5755
;
11734 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11735 mem_tbl
= mem_tbl_5906
;
11736 else if (tg3_flag(tp
, 5705_PLUS
))
11737 mem_tbl
= mem_tbl_5705
;
11739 mem_tbl
= mem_tbl_570x
;
11741 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11742 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11750 #define TG3_TSO_MSS 500
11752 #define TG3_TSO_IP_HDR_LEN 20
11753 #define TG3_TSO_TCP_HDR_LEN 20
11754 #define TG3_TSO_TCP_OPT_LEN 12
11756 static const u8 tg3_tso_header
[] = {
11758 0x45, 0x00, 0x00, 0x00,
11759 0x00, 0x00, 0x40, 0x00,
11760 0x40, 0x06, 0x00, 0x00,
11761 0x0a, 0x00, 0x00, 0x01,
11762 0x0a, 0x00, 0x00, 0x02,
11763 0x0d, 0x00, 0xe0, 0x00,
11764 0x00, 0x00, 0x01, 0x00,
11765 0x00, 0x00, 0x02, 0x00,
11766 0x80, 0x10, 0x10, 0x00,
11767 0x14, 0x09, 0x00, 0x00,
11768 0x01, 0x01, 0x08, 0x0a,
11769 0x11, 0x11, 0x11, 0x11,
11770 0x11, 0x11, 0x11, 0x11,
11773 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11775 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11776 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11778 struct sk_buff
*skb
;
11779 u8
*tx_data
, *rx_data
;
11781 int num_pkts
, tx_len
, rx_len
, i
, err
;
11782 struct tg3_rx_buffer_desc
*desc
;
11783 struct tg3_napi
*tnapi
, *rnapi
;
11784 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11786 tnapi
= &tp
->napi
[0];
11787 rnapi
= &tp
->napi
[0];
11788 if (tp
->irq_cnt
> 1) {
11789 if (tg3_flag(tp
, ENABLE_RSS
))
11790 rnapi
= &tp
->napi
[1];
11791 if (tg3_flag(tp
, ENABLE_TSS
))
11792 tnapi
= &tp
->napi
[1];
11794 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11799 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11803 tx_data
= skb_put(skb
, tx_len
);
11804 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11805 memset(tx_data
+ 6, 0x0, 8);
11807 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11809 if (tso_loopback
) {
11810 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11812 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11813 TG3_TSO_TCP_OPT_LEN
;
11815 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11816 sizeof(tg3_tso_header
));
11819 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11820 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11822 /* Set the total length field in the IP header */
11823 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11825 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11826 TXD_FLAG_CPU_POST_DMA
);
11828 if (tg3_flag(tp
, HW_TSO_1
) ||
11829 tg3_flag(tp
, HW_TSO_2
) ||
11830 tg3_flag(tp
, HW_TSO_3
)) {
11832 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11833 th
= (struct tcphdr
*)&tx_data
[val
];
11836 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11838 if (tg3_flag(tp
, HW_TSO_3
)) {
11839 mss
|= (hdr_len
& 0xc) << 12;
11840 if (hdr_len
& 0x10)
11841 base_flags
|= 0x00000010;
11842 base_flags
|= (hdr_len
& 0x3e0) << 5;
11843 } else if (tg3_flag(tp
, HW_TSO_2
))
11844 mss
|= hdr_len
<< 9;
11845 else if (tg3_flag(tp
, HW_TSO_1
) ||
11846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11847 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11849 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11852 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11855 data_off
= ETH_HLEN
;
11857 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
11858 tx_len
> VLAN_ETH_FRAME_LEN
)
11859 base_flags
|= TXD_FLAG_JMB_PKT
;
11862 for (i
= data_off
; i
< tx_len
; i
++)
11863 tx_data
[i
] = (u8
) (i
& 0xff);
11865 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11866 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11867 dev_kfree_skb(skb
);
11871 val
= tnapi
->tx_prod
;
11872 tnapi
->tx_buffers
[val
].skb
= skb
;
11873 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11875 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11880 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11882 budget
= tg3_tx_avail(tnapi
);
11883 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11884 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11885 tnapi
->tx_buffers
[val
].skb
= NULL
;
11886 dev_kfree_skb(skb
);
11892 /* Sync BD data before updating mailbox */
11895 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11896 tr32_mailbox(tnapi
->prodmbox
);
11900 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11901 for (i
= 0; i
< 35; i
++) {
11902 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11907 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11908 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11909 if ((tx_idx
== tnapi
->tx_prod
) &&
11910 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11914 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
11915 dev_kfree_skb(skb
);
11917 if (tx_idx
!= tnapi
->tx_prod
)
11920 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11924 while (rx_idx
!= rx_start_idx
) {
11925 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11926 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11927 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11929 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11930 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11933 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11936 if (!tso_loopback
) {
11937 if (rx_len
!= tx_len
)
11940 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11941 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11944 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11947 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11948 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11949 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11953 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11954 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
11955 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11957 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11958 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
11959 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11964 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11965 PCI_DMA_FROMDEVICE
);
11967 rx_data
+= TG3_RX_OFFSET(tp
);
11968 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11969 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
11976 /* tg3_free_rings will unmap and free the rx_data */
11981 #define TG3_STD_LOOPBACK_FAILED 1
11982 #define TG3_JMB_LOOPBACK_FAILED 2
11983 #define TG3_TSO_LOOPBACK_FAILED 4
11984 #define TG3_LOOPBACK_FAILED \
11985 (TG3_STD_LOOPBACK_FAILED | \
11986 TG3_JMB_LOOPBACK_FAILED | \
11987 TG3_TSO_LOOPBACK_FAILED)
11989 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
11993 u32 jmb_pkt_sz
= 9000;
11996 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
11998 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11999 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12001 if (!netif_running(tp
->dev
)) {
12002 data
[0] = TG3_LOOPBACK_FAILED
;
12003 data
[1] = TG3_LOOPBACK_FAILED
;
12005 data
[2] = TG3_LOOPBACK_FAILED
;
12009 err
= tg3_reset_hw(tp
, 1);
12011 data
[0] = TG3_LOOPBACK_FAILED
;
12012 data
[1] = TG3_LOOPBACK_FAILED
;
12014 data
[2] = TG3_LOOPBACK_FAILED
;
12018 if (tg3_flag(tp
, ENABLE_RSS
)) {
12021 /* Reroute all rx packets to the 1st queue */
12022 for (i
= MAC_RSS_INDIR_TBL_0
;
12023 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12027 /* HW errata - mac loopback fails in some cases on 5780.
12028 * Normal traffic and PHY loopback are not affected by
12029 * errata. Also, the MAC loopback test is deprecated for
12030 * all newer ASIC revisions.
12032 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
12033 !tg3_flag(tp
, CPMU_PRESENT
)) {
12034 tg3_mac_loopback(tp
, true);
12036 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12037 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
12039 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12040 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12041 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
12043 tg3_mac_loopback(tp
, false);
12046 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12047 !tg3_flag(tp
, USE_PHYLIB
)) {
12050 tg3_phy_lpbk_set(tp
, 0, false);
12052 /* Wait for link */
12053 for (i
= 0; i
< 100; i
++) {
12054 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12059 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12060 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
12061 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12062 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12063 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
12064 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12065 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12066 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
12069 tg3_phy_lpbk_set(tp
, 0, true);
12071 /* All link indications report up, but the hardware
12072 * isn't really ready for about 20 msec. Double it
12077 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12078 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
12079 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12080 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12081 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
12082 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12083 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12084 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
12087 /* Re-enable gphy autopowerdown. */
12088 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12089 tg3_phy_toggle_apd(tp
, true);
12092 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
12095 tp
->phy_flags
|= eee_cap
;
12100 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12103 struct tg3
*tp
= netdev_priv(dev
);
12104 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12106 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12107 tg3_power_up(tp
)) {
12108 etest
->flags
|= ETH_TEST_FL_FAILED
;
12109 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12113 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12115 if (tg3_test_nvram(tp
) != 0) {
12116 etest
->flags
|= ETH_TEST_FL_FAILED
;
12119 if (!doextlpbk
&& tg3_test_link(tp
)) {
12120 etest
->flags
|= ETH_TEST_FL_FAILED
;
12123 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12124 int err
, err2
= 0, irq_sync
= 0;
12126 if (netif_running(dev
)) {
12128 tg3_netif_stop(tp
);
12132 tg3_full_lock(tp
, irq_sync
);
12134 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12135 err
= tg3_nvram_lock(tp
);
12136 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12137 if (!tg3_flag(tp
, 5705_PLUS
))
12138 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12140 tg3_nvram_unlock(tp
);
12142 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12145 if (tg3_test_registers(tp
) != 0) {
12146 etest
->flags
|= ETH_TEST_FL_FAILED
;
12150 if (tg3_test_memory(tp
) != 0) {
12151 etest
->flags
|= ETH_TEST_FL_FAILED
;
12156 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12158 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
12159 etest
->flags
|= ETH_TEST_FL_FAILED
;
12161 tg3_full_unlock(tp
);
12163 if (tg3_test_interrupt(tp
) != 0) {
12164 etest
->flags
|= ETH_TEST_FL_FAILED
;
12168 tg3_full_lock(tp
, 0);
12170 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12171 if (netif_running(dev
)) {
12172 tg3_flag_set(tp
, INIT_COMPLETE
);
12173 err2
= tg3_restart_hw(tp
, 1);
12175 tg3_netif_start(tp
);
12178 tg3_full_unlock(tp
);
12180 if (irq_sync
&& !err2
)
12183 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12184 tg3_power_down(tp
);
12188 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
12190 struct mii_ioctl_data
*data
= if_mii(ifr
);
12191 struct tg3
*tp
= netdev_priv(dev
);
12194 if (tg3_flag(tp
, USE_PHYLIB
)) {
12195 struct phy_device
*phydev
;
12196 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12198 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12199 return phy_mii_ioctl(phydev
, ifr
, cmd
);
12204 data
->phy_id
= tp
->phy_addr
;
12207 case SIOCGMIIREG
: {
12210 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12211 break; /* We have no PHY */
12213 if (!netif_running(dev
))
12216 spin_lock_bh(&tp
->lock
);
12217 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
12218 spin_unlock_bh(&tp
->lock
);
12220 data
->val_out
= mii_regval
;
12226 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12227 break; /* We have no PHY */
12229 if (!netif_running(dev
))
12232 spin_lock_bh(&tp
->lock
);
12233 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
12234 spin_unlock_bh(&tp
->lock
);
12242 return -EOPNOTSUPP
;
12245 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12247 struct tg3
*tp
= netdev_priv(dev
);
12249 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
12253 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12255 struct tg3
*tp
= netdev_priv(dev
);
12256 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
12257 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
12259 if (!tg3_flag(tp
, 5705_PLUS
)) {
12260 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
12261 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
12262 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
12263 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
12266 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
12267 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
12268 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
12269 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
12270 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
12271 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
12272 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
12273 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
12274 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
12275 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
12278 /* No rx interrupts will be generated if both are zero */
12279 if ((ec
->rx_coalesce_usecs
== 0) &&
12280 (ec
->rx_max_coalesced_frames
== 0))
12283 /* No tx interrupts will be generated if both are zero */
12284 if ((ec
->tx_coalesce_usecs
== 0) &&
12285 (ec
->tx_max_coalesced_frames
== 0))
12288 /* Only copy relevant parameters, ignore all others. */
12289 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
12290 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
12291 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
12292 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
12293 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
12294 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
12295 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
12296 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
12297 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
12299 if (netif_running(dev
)) {
12300 tg3_full_lock(tp
, 0);
12301 __tg3_set_coalesce(tp
, &tp
->coal
);
12302 tg3_full_unlock(tp
);
12307 static const struct ethtool_ops tg3_ethtool_ops
= {
12308 .get_settings
= tg3_get_settings
,
12309 .set_settings
= tg3_set_settings
,
12310 .get_drvinfo
= tg3_get_drvinfo
,
12311 .get_regs_len
= tg3_get_regs_len
,
12312 .get_regs
= tg3_get_regs
,
12313 .get_wol
= tg3_get_wol
,
12314 .set_wol
= tg3_set_wol
,
12315 .get_msglevel
= tg3_get_msglevel
,
12316 .set_msglevel
= tg3_set_msglevel
,
12317 .nway_reset
= tg3_nway_reset
,
12318 .get_link
= ethtool_op_get_link
,
12319 .get_eeprom_len
= tg3_get_eeprom_len
,
12320 .get_eeprom
= tg3_get_eeprom
,
12321 .set_eeprom
= tg3_set_eeprom
,
12322 .get_ringparam
= tg3_get_ringparam
,
12323 .set_ringparam
= tg3_set_ringparam
,
12324 .get_pauseparam
= tg3_get_pauseparam
,
12325 .set_pauseparam
= tg3_set_pauseparam
,
12326 .self_test
= tg3_self_test
,
12327 .get_strings
= tg3_get_strings
,
12328 .set_phys_id
= tg3_set_phys_id
,
12329 .get_ethtool_stats
= tg3_get_ethtool_stats
,
12330 .get_coalesce
= tg3_get_coalesce
,
12331 .set_coalesce
= tg3_set_coalesce
,
12332 .get_sset_count
= tg3_get_sset_count
,
12333 .get_rxnfc
= tg3_get_rxnfc
,
12334 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
12335 .get_rxfh_indir
= tg3_get_rxfh_indir
,
12336 .set_rxfh_indir
= tg3_set_rxfh_indir
,
12339 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
12340 struct rtnl_link_stats64
*stats
)
12342 struct tg3
*tp
= netdev_priv(dev
);
12344 spin_lock_bh(&tp
->lock
);
12345 if (!tp
->hw_stats
) {
12346 spin_unlock_bh(&tp
->lock
);
12347 return &tp
->net_stats_prev
;
12350 tg3_get_nstats(tp
, stats
);
12351 spin_unlock_bh(&tp
->lock
);
12356 static void tg3_set_rx_mode(struct net_device
*dev
)
12358 struct tg3
*tp
= netdev_priv(dev
);
12360 if (!netif_running(dev
))
12363 tg3_full_lock(tp
, 0);
12364 __tg3_set_rx_mode(dev
);
12365 tg3_full_unlock(tp
);
12368 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
12371 dev
->mtu
= new_mtu
;
12373 if (new_mtu
> ETH_DATA_LEN
) {
12374 if (tg3_flag(tp
, 5780_CLASS
)) {
12375 netdev_update_features(dev
);
12376 tg3_flag_clear(tp
, TSO_CAPABLE
);
12378 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
12381 if (tg3_flag(tp
, 5780_CLASS
)) {
12382 tg3_flag_set(tp
, TSO_CAPABLE
);
12383 netdev_update_features(dev
);
12385 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
12389 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
12391 struct tg3
*tp
= netdev_priv(dev
);
12392 int err
, reset_phy
= 0;
12394 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
12397 if (!netif_running(dev
)) {
12398 /* We'll just catch it later when the
12401 tg3_set_mtu(dev
, tp
, new_mtu
);
12407 tg3_netif_stop(tp
);
12409 tg3_set_mtu(dev
, tp
, new_mtu
);
12411 tg3_full_lock(tp
, 1);
12413 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12415 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12416 * breaks all requests to 256 bytes.
12418 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
12421 err
= tg3_restart_hw(tp
, reset_phy
);
12424 tg3_netif_start(tp
);
12426 tg3_full_unlock(tp
);
12434 static const struct net_device_ops tg3_netdev_ops
= {
12435 .ndo_open
= tg3_open
,
12436 .ndo_stop
= tg3_close
,
12437 .ndo_start_xmit
= tg3_start_xmit
,
12438 .ndo_get_stats64
= tg3_get_stats64
,
12439 .ndo_validate_addr
= eth_validate_addr
,
12440 .ndo_set_rx_mode
= tg3_set_rx_mode
,
12441 .ndo_set_mac_address
= tg3_set_mac_addr
,
12442 .ndo_do_ioctl
= tg3_ioctl
,
12443 .ndo_tx_timeout
= tg3_tx_timeout
,
12444 .ndo_change_mtu
= tg3_change_mtu
,
12445 .ndo_fix_features
= tg3_fix_features
,
12446 .ndo_set_features
= tg3_set_features
,
12447 #ifdef CONFIG_NET_POLL_CONTROLLER
12448 .ndo_poll_controller
= tg3_poll_controller
,
12452 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
12454 u32 cursize
, val
, magic
;
12456 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
12458 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12461 if ((magic
!= TG3_EEPROM_MAGIC
) &&
12462 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
12463 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
12467 * Size the chip by reading offsets at increasing powers of two.
12468 * When we encounter our validation signature, we know the addressing
12469 * has wrapped around, and thus have our chip size.
12473 while (cursize
< tp
->nvram_size
) {
12474 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
12483 tp
->nvram_size
= cursize
;
12486 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
12490 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
12493 /* Selfboot format */
12494 if (val
!= TG3_EEPROM_MAGIC
) {
12495 tg3_get_eeprom_size(tp
);
12499 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
12501 /* This is confusing. We want to operate on the
12502 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12503 * call will read from NVRAM and byteswap the data
12504 * according to the byteswapping settings for all
12505 * other register accesses. This ensures the data we
12506 * want will always reside in the lower 16-bits.
12507 * However, the data in NVRAM is in LE format, which
12508 * means the data from the NVRAM read will always be
12509 * opposite the endianness of the CPU. The 16-bit
12510 * byteswap then brings the data to CPU endianness.
12512 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12516 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12519 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12523 nvcfg1
= tr32(NVRAM_CFG1
);
12524 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12525 tg3_flag_set(tp
, FLASH
);
12527 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12528 tw32(NVRAM_CFG1
, nvcfg1
);
12531 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12532 tg3_flag(tp
, 5780_CLASS
)) {
12533 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12534 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12535 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12536 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12537 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12539 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12540 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12541 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12543 case FLASH_VENDOR_ATMEL_EEPROM
:
12544 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12545 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12546 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12548 case FLASH_VENDOR_ST
:
12549 tp
->nvram_jedecnum
= JEDEC_ST
;
12550 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12551 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12553 case FLASH_VENDOR_SAIFUN
:
12554 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12555 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12557 case FLASH_VENDOR_SST_SMALL
:
12558 case FLASH_VENDOR_SST_LARGE
:
12559 tp
->nvram_jedecnum
= JEDEC_SST
;
12560 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12564 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12565 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12566 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12570 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12572 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12573 case FLASH_5752PAGE_SIZE_256
:
12574 tp
->nvram_pagesize
= 256;
12576 case FLASH_5752PAGE_SIZE_512
:
12577 tp
->nvram_pagesize
= 512;
12579 case FLASH_5752PAGE_SIZE_1K
:
12580 tp
->nvram_pagesize
= 1024;
12582 case FLASH_5752PAGE_SIZE_2K
:
12583 tp
->nvram_pagesize
= 2048;
12585 case FLASH_5752PAGE_SIZE_4K
:
12586 tp
->nvram_pagesize
= 4096;
12588 case FLASH_5752PAGE_SIZE_264
:
12589 tp
->nvram_pagesize
= 264;
12591 case FLASH_5752PAGE_SIZE_528
:
12592 tp
->nvram_pagesize
= 528;
12597 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12601 nvcfg1
= tr32(NVRAM_CFG1
);
12603 /* NVRAM protection for TPM */
12604 if (nvcfg1
& (1 << 27))
12605 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12607 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12608 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12609 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12610 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12611 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12613 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12614 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12615 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12616 tg3_flag_set(tp
, FLASH
);
12618 case FLASH_5752VENDOR_ST_M45PE10
:
12619 case FLASH_5752VENDOR_ST_M45PE20
:
12620 case FLASH_5752VENDOR_ST_M45PE40
:
12621 tp
->nvram_jedecnum
= JEDEC_ST
;
12622 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12623 tg3_flag_set(tp
, FLASH
);
12627 if (tg3_flag(tp
, FLASH
)) {
12628 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12630 /* For eeprom, set pagesize to maximum eeprom size */
12631 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12633 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12634 tw32(NVRAM_CFG1
, nvcfg1
);
12638 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12640 u32 nvcfg1
, protect
= 0;
12642 nvcfg1
= tr32(NVRAM_CFG1
);
12644 /* NVRAM protection for TPM */
12645 if (nvcfg1
& (1 << 27)) {
12646 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12650 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12652 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12653 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12654 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12655 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12656 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12657 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12658 tg3_flag_set(tp
, FLASH
);
12659 tp
->nvram_pagesize
= 264;
12660 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12661 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12662 tp
->nvram_size
= (protect
? 0x3e200 :
12663 TG3_NVRAM_SIZE_512KB
);
12664 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12665 tp
->nvram_size
= (protect
? 0x1f200 :
12666 TG3_NVRAM_SIZE_256KB
);
12668 tp
->nvram_size
= (protect
? 0x1f200 :
12669 TG3_NVRAM_SIZE_128KB
);
12671 case FLASH_5752VENDOR_ST_M45PE10
:
12672 case FLASH_5752VENDOR_ST_M45PE20
:
12673 case FLASH_5752VENDOR_ST_M45PE40
:
12674 tp
->nvram_jedecnum
= JEDEC_ST
;
12675 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12676 tg3_flag_set(tp
, FLASH
);
12677 tp
->nvram_pagesize
= 256;
12678 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12679 tp
->nvram_size
= (protect
?
12680 TG3_NVRAM_SIZE_64KB
:
12681 TG3_NVRAM_SIZE_128KB
);
12682 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12683 tp
->nvram_size
= (protect
?
12684 TG3_NVRAM_SIZE_64KB
:
12685 TG3_NVRAM_SIZE_256KB
);
12687 tp
->nvram_size
= (protect
?
12688 TG3_NVRAM_SIZE_128KB
:
12689 TG3_NVRAM_SIZE_512KB
);
12694 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12698 nvcfg1
= tr32(NVRAM_CFG1
);
12700 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12701 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12702 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12703 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12704 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12705 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12706 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12707 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12709 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12710 tw32(NVRAM_CFG1
, nvcfg1
);
12712 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12713 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12714 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12715 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12716 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12717 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12718 tg3_flag_set(tp
, FLASH
);
12719 tp
->nvram_pagesize
= 264;
12721 case FLASH_5752VENDOR_ST_M45PE10
:
12722 case FLASH_5752VENDOR_ST_M45PE20
:
12723 case FLASH_5752VENDOR_ST_M45PE40
:
12724 tp
->nvram_jedecnum
= JEDEC_ST
;
12725 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12726 tg3_flag_set(tp
, FLASH
);
12727 tp
->nvram_pagesize
= 256;
12732 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12734 u32 nvcfg1
, protect
= 0;
12736 nvcfg1
= tr32(NVRAM_CFG1
);
12738 /* NVRAM protection for TPM */
12739 if (nvcfg1
& (1 << 27)) {
12740 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12744 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12746 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12747 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12748 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12749 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12750 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12751 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12752 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12753 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12754 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12755 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12756 tg3_flag_set(tp
, FLASH
);
12757 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12758 tp
->nvram_pagesize
= 256;
12760 case FLASH_5761VENDOR_ST_A_M45PE20
:
12761 case FLASH_5761VENDOR_ST_A_M45PE40
:
12762 case FLASH_5761VENDOR_ST_A_M45PE80
:
12763 case FLASH_5761VENDOR_ST_A_M45PE16
:
12764 case FLASH_5761VENDOR_ST_M_M45PE20
:
12765 case FLASH_5761VENDOR_ST_M_M45PE40
:
12766 case FLASH_5761VENDOR_ST_M_M45PE80
:
12767 case FLASH_5761VENDOR_ST_M_M45PE16
:
12768 tp
->nvram_jedecnum
= JEDEC_ST
;
12769 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12770 tg3_flag_set(tp
, FLASH
);
12771 tp
->nvram_pagesize
= 256;
12776 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12779 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12780 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12781 case FLASH_5761VENDOR_ST_A_M45PE16
:
12782 case FLASH_5761VENDOR_ST_M_M45PE16
:
12783 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12785 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12786 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12787 case FLASH_5761VENDOR_ST_A_M45PE80
:
12788 case FLASH_5761VENDOR_ST_M_M45PE80
:
12789 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12791 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12792 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12793 case FLASH_5761VENDOR_ST_A_M45PE40
:
12794 case FLASH_5761VENDOR_ST_M_M45PE40
:
12795 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12797 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12798 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12799 case FLASH_5761VENDOR_ST_A_M45PE20
:
12800 case FLASH_5761VENDOR_ST_M_M45PE20
:
12801 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12807 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12809 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12810 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12811 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12814 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12818 nvcfg1
= tr32(NVRAM_CFG1
);
12820 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12821 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12822 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12823 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12824 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12825 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12827 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12828 tw32(NVRAM_CFG1
, nvcfg1
);
12830 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12831 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12832 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12833 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12834 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12835 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12836 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12837 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12838 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12839 tg3_flag_set(tp
, FLASH
);
12841 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12842 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12843 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12844 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12845 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12847 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12848 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12849 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12851 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12852 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12853 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12857 case FLASH_5752VENDOR_ST_M45PE10
:
12858 case FLASH_5752VENDOR_ST_M45PE20
:
12859 case FLASH_5752VENDOR_ST_M45PE40
:
12860 tp
->nvram_jedecnum
= JEDEC_ST
;
12861 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12862 tg3_flag_set(tp
, FLASH
);
12864 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12865 case FLASH_5752VENDOR_ST_M45PE10
:
12866 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12868 case FLASH_5752VENDOR_ST_M45PE20
:
12869 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12871 case FLASH_5752VENDOR_ST_M45PE40
:
12872 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12877 tg3_flag_set(tp
, NO_NVRAM
);
12881 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12882 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12883 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12887 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12891 nvcfg1
= tr32(NVRAM_CFG1
);
12893 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12894 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12895 case FLASH_5717VENDOR_MICRO_EEPROM
:
12896 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12897 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12898 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12900 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12901 tw32(NVRAM_CFG1
, nvcfg1
);
12903 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12904 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12905 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12906 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12907 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12908 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12909 case FLASH_5717VENDOR_ATMEL_45USPT
:
12910 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12911 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12912 tg3_flag_set(tp
, FLASH
);
12914 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12915 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12916 /* Detect size with tg3_nvram_get_size() */
12918 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12919 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12920 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12923 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12927 case FLASH_5717VENDOR_ST_M_M25PE10
:
12928 case FLASH_5717VENDOR_ST_A_M25PE10
:
12929 case FLASH_5717VENDOR_ST_M_M45PE10
:
12930 case FLASH_5717VENDOR_ST_A_M45PE10
:
12931 case FLASH_5717VENDOR_ST_M_M25PE20
:
12932 case FLASH_5717VENDOR_ST_A_M25PE20
:
12933 case FLASH_5717VENDOR_ST_M_M45PE20
:
12934 case FLASH_5717VENDOR_ST_A_M45PE20
:
12935 case FLASH_5717VENDOR_ST_25USPT
:
12936 case FLASH_5717VENDOR_ST_45USPT
:
12937 tp
->nvram_jedecnum
= JEDEC_ST
;
12938 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12939 tg3_flag_set(tp
, FLASH
);
12941 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12942 case FLASH_5717VENDOR_ST_M_M25PE20
:
12943 case FLASH_5717VENDOR_ST_M_M45PE20
:
12944 /* Detect size with tg3_nvram_get_size() */
12946 case FLASH_5717VENDOR_ST_A_M25PE20
:
12947 case FLASH_5717VENDOR_ST_A_M45PE20
:
12948 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12951 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12956 tg3_flag_set(tp
, NO_NVRAM
);
12960 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12961 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12962 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12965 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12967 u32 nvcfg1
, nvmpinstrp
;
12969 nvcfg1
= tr32(NVRAM_CFG1
);
12970 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12972 switch (nvmpinstrp
) {
12973 case FLASH_5720_EEPROM_HD
:
12974 case FLASH_5720_EEPROM_LD
:
12975 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12976 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12978 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12979 tw32(NVRAM_CFG1
, nvcfg1
);
12980 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12981 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12983 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12985 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12986 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12987 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12988 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12989 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12990 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12991 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12992 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12993 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12994 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12995 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12996 case FLASH_5720VENDOR_ATMEL_45USPT
:
12997 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12998 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12999 tg3_flag_set(tp
, FLASH
);
13001 switch (nvmpinstrp
) {
13002 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13003 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13004 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13005 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13007 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13008 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13009 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13010 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13012 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13013 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13014 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13017 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13021 case FLASH_5720VENDOR_M_ST_M25PE10
:
13022 case FLASH_5720VENDOR_M_ST_M45PE10
:
13023 case FLASH_5720VENDOR_A_ST_M25PE10
:
13024 case FLASH_5720VENDOR_A_ST_M45PE10
:
13025 case FLASH_5720VENDOR_M_ST_M25PE20
:
13026 case FLASH_5720VENDOR_M_ST_M45PE20
:
13027 case FLASH_5720VENDOR_A_ST_M25PE20
:
13028 case FLASH_5720VENDOR_A_ST_M45PE20
:
13029 case FLASH_5720VENDOR_M_ST_M25PE40
:
13030 case FLASH_5720VENDOR_M_ST_M45PE40
:
13031 case FLASH_5720VENDOR_A_ST_M25PE40
:
13032 case FLASH_5720VENDOR_A_ST_M45PE40
:
13033 case FLASH_5720VENDOR_M_ST_M25PE80
:
13034 case FLASH_5720VENDOR_M_ST_M45PE80
:
13035 case FLASH_5720VENDOR_A_ST_M25PE80
:
13036 case FLASH_5720VENDOR_A_ST_M45PE80
:
13037 case FLASH_5720VENDOR_ST_25USPT
:
13038 case FLASH_5720VENDOR_ST_45USPT
:
13039 tp
->nvram_jedecnum
= JEDEC_ST
;
13040 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13041 tg3_flag_set(tp
, FLASH
);
13043 switch (nvmpinstrp
) {
13044 case FLASH_5720VENDOR_M_ST_M25PE20
:
13045 case FLASH_5720VENDOR_M_ST_M45PE20
:
13046 case FLASH_5720VENDOR_A_ST_M25PE20
:
13047 case FLASH_5720VENDOR_A_ST_M45PE20
:
13048 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13050 case FLASH_5720VENDOR_M_ST_M25PE40
:
13051 case FLASH_5720VENDOR_M_ST_M45PE40
:
13052 case FLASH_5720VENDOR_A_ST_M25PE40
:
13053 case FLASH_5720VENDOR_A_ST_M45PE40
:
13054 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13056 case FLASH_5720VENDOR_M_ST_M25PE80
:
13057 case FLASH_5720VENDOR_M_ST_M45PE80
:
13058 case FLASH_5720VENDOR_A_ST_M25PE80
:
13059 case FLASH_5720VENDOR_A_ST_M45PE80
:
13060 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13063 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13068 tg3_flag_set(tp
, NO_NVRAM
);
13072 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13073 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13074 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13077 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13078 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
13080 tw32_f(GRC_EEPROM_ADDR
,
13081 (EEPROM_ADDR_FSM_RESET
|
13082 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
13083 EEPROM_ADDR_CLKPERD_SHIFT
)));
13087 /* Enable seeprom accesses. */
13088 tw32_f(GRC_LOCAL_CTRL
,
13089 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
13092 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13093 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
13094 tg3_flag_set(tp
, NVRAM
);
13096 if (tg3_nvram_lock(tp
)) {
13097 netdev_warn(tp
->dev
,
13098 "Cannot get nvram lock, %s failed\n",
13102 tg3_enable_nvram_access(tp
);
13104 tp
->nvram_size
= 0;
13106 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13107 tg3_get_5752_nvram_info(tp
);
13108 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
13109 tg3_get_5755_nvram_info(tp
);
13110 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13111 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13112 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13113 tg3_get_5787_nvram_info(tp
);
13114 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
13115 tg3_get_5761_nvram_info(tp
);
13116 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13117 tg3_get_5906_nvram_info(tp
);
13118 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13119 tg3_flag(tp
, 57765_CLASS
))
13120 tg3_get_57780_nvram_info(tp
);
13121 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13122 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13123 tg3_get_5717_nvram_info(tp
);
13124 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13125 tg3_get_5720_nvram_info(tp
);
13127 tg3_get_nvram_info(tp
);
13129 if (tp
->nvram_size
== 0)
13130 tg3_get_nvram_size(tp
);
13132 tg3_disable_nvram_access(tp
);
13133 tg3_nvram_unlock(tp
);
13136 tg3_flag_clear(tp
, NVRAM
);
13137 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13139 tg3_get_eeprom_size(tp
);
13143 struct subsys_tbl_ent
{
13144 u16 subsys_vendor
, subsys_devid
;
13148 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
13149 /* Broadcom boards. */
13150 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13151 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
13152 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13153 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
13154 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13155 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
13156 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13157 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
13158 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13159 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
13160 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
13162 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
13164 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13165 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
13166 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13167 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
13168 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13169 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
13170 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13171 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
13174 { TG3PCI_SUBVENDOR_ID_3COM
,
13175 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
13176 { TG3PCI_SUBVENDOR_ID_3COM
,
13177 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
13178 { TG3PCI_SUBVENDOR_ID_3COM
,
13179 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
13180 { TG3PCI_SUBVENDOR_ID_3COM
,
13181 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
13182 { TG3PCI_SUBVENDOR_ID_3COM
,
13183 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
13186 { TG3PCI_SUBVENDOR_ID_DELL
,
13187 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
13188 { TG3PCI_SUBVENDOR_ID_DELL
,
13189 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
13190 { TG3PCI_SUBVENDOR_ID_DELL
,
13191 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
13192 { TG3PCI_SUBVENDOR_ID_DELL
,
13193 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
13195 /* Compaq boards. */
13196 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13197 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
13198 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13199 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
13200 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13201 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
13202 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13203 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
13204 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13205 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
13208 { TG3PCI_SUBVENDOR_ID_IBM
,
13209 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
13212 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
13216 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
13217 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
13218 tp
->pdev
->subsystem_vendor
) &&
13219 (subsys_id_to_phy_id
[i
].subsys_devid
==
13220 tp
->pdev
->subsystem_device
))
13221 return &subsys_id_to_phy_id
[i
];
13226 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13230 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13231 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13233 /* Assume an onboard device and WOL capable by default. */
13234 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13235 tg3_flag_set(tp
, WOL_CAP
);
13237 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13238 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13239 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13240 tg3_flag_set(tp
, IS_NIC
);
13242 val
= tr32(VCPU_CFGSHDW
);
13243 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13244 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13245 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13246 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13247 tg3_flag_set(tp
, WOL_ENABLE
);
13248 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13253 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13254 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13255 u32 nic_cfg
, led_cfg
;
13256 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13257 int eeprom_phy_serdes
= 0;
13259 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13260 tp
->nic_sram_data_cfg
= nic_cfg
;
13262 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13263 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13264 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13265 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13266 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13267 (ver
> 0) && (ver
< 0x100))
13268 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13270 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13271 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13273 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13274 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13275 eeprom_phy_serdes
= 1;
13277 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13278 if (nic_phy_id
!= 0) {
13279 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13280 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13282 eeprom_phy_id
= (id1
>> 16) << 10;
13283 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13284 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13288 tp
->phy_id
= eeprom_phy_id
;
13289 if (eeprom_phy_serdes
) {
13290 if (!tg3_flag(tp
, 5705_PLUS
))
13291 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13293 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13296 if (tg3_flag(tp
, 5750_PLUS
))
13297 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13298 SHASTA_EXT_LED_MODE_MASK
);
13300 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13304 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13305 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13308 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13309 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13312 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13313 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13315 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13316 * read on some older 5700/5701 bootcode.
13318 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13320 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13322 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13326 case SHASTA_EXT_LED_SHARED
:
13327 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13328 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13329 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13330 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13331 LED_CTRL_MODE_PHY_2
);
13334 case SHASTA_EXT_LED_MAC
:
13335 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13338 case SHASTA_EXT_LED_COMBO
:
13339 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13340 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13341 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13342 LED_CTRL_MODE_PHY_2
);
13347 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13348 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13349 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13350 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13352 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13353 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13355 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13356 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13357 if ((tp
->pdev
->subsystem_vendor
==
13358 PCI_VENDOR_ID_ARIMA
) &&
13359 (tp
->pdev
->subsystem_device
== 0x205a ||
13360 tp
->pdev
->subsystem_device
== 0x2063))
13361 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13363 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13364 tg3_flag_set(tp
, IS_NIC
);
13367 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13368 tg3_flag_set(tp
, ENABLE_ASF
);
13369 if (tg3_flag(tp
, 5750_PLUS
))
13370 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13373 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13374 tg3_flag(tp
, 5750_PLUS
))
13375 tg3_flag_set(tp
, ENABLE_APE
);
13377 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13378 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13379 tg3_flag_clear(tp
, WOL_CAP
);
13381 if (tg3_flag(tp
, WOL_CAP
) &&
13382 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13383 tg3_flag_set(tp
, WOL_ENABLE
);
13384 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13387 if (cfg2
& (1 << 17))
13388 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13390 /* serdes signal pre-emphasis in register 0x590 set by */
13391 /* bootcode if bit 18 is set */
13392 if (cfg2
& (1 << 18))
13393 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13395 if ((tg3_flag(tp
, 57765_PLUS
) ||
13396 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13397 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13398 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13399 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13401 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13402 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13403 !tg3_flag(tp
, 57765_PLUS
)) {
13406 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13407 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13408 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13411 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13412 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13413 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13414 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13415 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13416 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13419 if (tg3_flag(tp
, WOL_CAP
))
13420 device_set_wakeup_enable(&tp
->pdev
->dev
,
13421 tg3_flag(tp
, WOL_ENABLE
));
13423 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13426 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13431 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13432 tw32(OTP_CTRL
, cmd
);
13434 /* Wait for up to 1 ms for command to execute. */
13435 for (i
= 0; i
< 100; i
++) {
13436 val
= tr32(OTP_STATUS
);
13437 if (val
& OTP_STATUS_CMD_DONE
)
13442 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13445 /* Read the gphy configuration from the OTP region of the chip. The gphy
13446 * configuration is a 32-bit value that straddles the alignment boundary.
13447 * We do two 32-bit reads and then shift and merge the results.
13449 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13451 u32 bhalf_otp
, thalf_otp
;
13453 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13455 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13458 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13460 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13463 thalf_otp
= tr32(OTP_READ_DATA
);
13465 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13467 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13470 bhalf_otp
= tr32(OTP_READ_DATA
);
13472 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13475 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13477 u32 adv
= ADVERTISED_Autoneg
;
13479 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13480 adv
|= ADVERTISED_1000baseT_Half
|
13481 ADVERTISED_1000baseT_Full
;
13483 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13484 adv
|= ADVERTISED_100baseT_Half
|
13485 ADVERTISED_100baseT_Full
|
13486 ADVERTISED_10baseT_Half
|
13487 ADVERTISED_10baseT_Full
|
13490 adv
|= ADVERTISED_FIBRE
;
13492 tp
->link_config
.advertising
= adv
;
13493 tp
->link_config
.speed
= SPEED_UNKNOWN
;
13494 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
13495 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13496 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
13497 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
13502 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13504 u32 hw_phy_id_1
, hw_phy_id_2
;
13505 u32 hw_phy_id
, hw_phy_id_masked
;
13508 /* flow control autonegotiation is default behavior */
13509 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13510 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13512 if (tg3_flag(tp
, USE_PHYLIB
))
13513 return tg3_phy_init(tp
);
13515 /* Reading the PHY ID register can conflict with ASF
13516 * firmware access to the PHY hardware.
13519 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13520 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13522 /* Now read the physical PHY_ID from the chip and verify
13523 * that it is sane. If it doesn't look good, we fall back
13524 * to either the hard-coded table based PHY_ID and failing
13525 * that the value found in the eeprom area.
13527 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13528 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13530 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13531 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13532 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13534 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13537 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13538 tp
->phy_id
= hw_phy_id
;
13539 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13540 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13542 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13544 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13545 /* Do nothing, phy ID already set up in
13546 * tg3_get_eeprom_hw_cfg().
13549 struct subsys_tbl_ent
*p
;
13551 /* No eeprom signature? Try the hardcoded
13552 * subsys device table.
13554 p
= tg3_lookup_by_subsys(tp
);
13558 tp
->phy_id
= p
->phy_id
;
13560 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13561 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13565 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13566 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13567 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13568 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13569 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13570 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13571 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13572 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13574 tg3_phy_init_link_config(tp
);
13576 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13577 !tg3_flag(tp
, ENABLE_APE
) &&
13578 !tg3_flag(tp
, ENABLE_ASF
)) {
13581 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13582 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13583 (bmsr
& BMSR_LSTATUS
))
13584 goto skip_phy_reset
;
13586 err
= tg3_phy_reset(tp
);
13590 tg3_phy_set_wirespeed(tp
);
13592 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
13593 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13594 tp
->link_config
.flowctrl
);
13596 tg3_writephy(tp
, MII_BMCR
,
13597 BMCR_ANENABLE
| BMCR_ANRESTART
);
13602 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13603 err
= tg3_init_5401phy_dsp(tp
);
13607 err
= tg3_init_5401phy_dsp(tp
);
13613 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13616 unsigned int block_end
, rosize
, len
;
13620 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13624 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13626 goto out_not_found
;
13628 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13629 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13630 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13632 if (block_end
> vpdlen
)
13633 goto out_not_found
;
13635 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13636 PCI_VPD_RO_KEYWORD_MFR_ID
);
13638 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13640 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13641 if (j
+ len
> block_end
|| len
!= 4 ||
13642 memcmp(&vpd_data
[j
], "1028", 4))
13645 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13646 PCI_VPD_RO_KEYWORD_VENDOR0
);
13650 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13652 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13653 if (j
+ len
> block_end
)
13656 if (len
>= sizeof(tp
->fw_ver
))
13657 len
= sizeof(tp
->fw_ver
) - 1;
13658 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
13659 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
13664 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13665 PCI_VPD_RO_KEYWORD_PARTNO
);
13667 goto out_not_found
;
13669 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13671 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13672 if (len
> TG3_BPN_SIZE
||
13673 (len
+ i
) > vpdlen
)
13674 goto out_not_found
;
13676 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13680 if (tp
->board_part_number
[0])
13684 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13685 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13686 strcpy(tp
->board_part_number
, "BCM5717");
13687 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13688 strcpy(tp
->board_part_number
, "BCM5718");
13691 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13692 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13693 strcpy(tp
->board_part_number
, "BCM57780");
13694 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13695 strcpy(tp
->board_part_number
, "BCM57760");
13696 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13697 strcpy(tp
->board_part_number
, "BCM57790");
13698 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13699 strcpy(tp
->board_part_number
, "BCM57788");
13702 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13703 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13704 strcpy(tp
->board_part_number
, "BCM57761");
13705 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13706 strcpy(tp
->board_part_number
, "BCM57765");
13707 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13708 strcpy(tp
->board_part_number
, "BCM57781");
13709 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13710 strcpy(tp
->board_part_number
, "BCM57785");
13711 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13712 strcpy(tp
->board_part_number
, "BCM57791");
13713 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13714 strcpy(tp
->board_part_number
, "BCM57795");
13717 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
) {
13718 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
13719 strcpy(tp
->board_part_number
, "BCM57762");
13720 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
13721 strcpy(tp
->board_part_number
, "BCM57766");
13722 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
13723 strcpy(tp
->board_part_number
, "BCM57782");
13724 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13725 strcpy(tp
->board_part_number
, "BCM57786");
13728 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13729 strcpy(tp
->board_part_number
, "BCM95906");
13732 strcpy(tp
->board_part_number
, "none");
13736 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13740 if (tg3_nvram_read(tp
, offset
, &val
) ||
13741 (val
& 0xfc000000) != 0x0c000000 ||
13742 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13749 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13751 u32 val
, offset
, start
, ver_offset
;
13753 bool newver
= false;
13755 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13756 tg3_nvram_read(tp
, 0x4, &start
))
13759 offset
= tg3_nvram_logical_addr(tp
, offset
);
13761 if (tg3_nvram_read(tp
, offset
, &val
))
13764 if ((val
& 0xfc000000) == 0x0c000000) {
13765 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13772 dst_off
= strlen(tp
->fw_ver
);
13775 if (TG3_VER_SIZE
- dst_off
< 16 ||
13776 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13779 offset
= offset
+ ver_offset
- start
;
13780 for (i
= 0; i
< 16; i
+= 4) {
13782 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13785 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13790 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13793 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13794 TG3_NVM_BCVER_MAJSFT
;
13795 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13796 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13797 "v%d.%02d", major
, minor
);
13801 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13803 u32 val
, major
, minor
;
13805 /* Use native endian representation */
13806 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13809 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13810 TG3_NVM_HWSB_CFG1_MAJSFT
;
13811 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13812 TG3_NVM_HWSB_CFG1_MINSFT
;
13814 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13817 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13819 u32 offset
, major
, minor
, build
;
13821 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13823 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13826 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13827 case TG3_EEPROM_SB_REVISION_0
:
13828 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13830 case TG3_EEPROM_SB_REVISION_2
:
13831 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13833 case TG3_EEPROM_SB_REVISION_3
:
13834 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13836 case TG3_EEPROM_SB_REVISION_4
:
13837 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13839 case TG3_EEPROM_SB_REVISION_5
:
13840 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13842 case TG3_EEPROM_SB_REVISION_6
:
13843 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13849 if (tg3_nvram_read(tp
, offset
, &val
))
13852 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13853 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13854 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13855 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13856 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13858 if (minor
> 99 || build
> 26)
13861 offset
= strlen(tp
->fw_ver
);
13862 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13863 " v%d.%02d", major
, minor
);
13866 offset
= strlen(tp
->fw_ver
);
13867 if (offset
< TG3_VER_SIZE
- 1)
13868 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13872 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13874 u32 val
, offset
, start
;
13877 for (offset
= TG3_NVM_DIR_START
;
13878 offset
< TG3_NVM_DIR_END
;
13879 offset
+= TG3_NVM_DIRENT_SIZE
) {
13880 if (tg3_nvram_read(tp
, offset
, &val
))
13883 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13887 if (offset
== TG3_NVM_DIR_END
)
13890 if (!tg3_flag(tp
, 5705_PLUS
))
13891 start
= 0x08000000;
13892 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13895 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13896 !tg3_fw_img_is_valid(tp
, offset
) ||
13897 tg3_nvram_read(tp
, offset
+ 8, &val
))
13900 offset
+= val
- start
;
13902 vlen
= strlen(tp
->fw_ver
);
13904 tp
->fw_ver
[vlen
++] = ',';
13905 tp
->fw_ver
[vlen
++] = ' ';
13907 for (i
= 0; i
< 4; i
++) {
13909 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13912 offset
+= sizeof(v
);
13914 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13915 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13919 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13924 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13930 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13933 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13934 if (apedata
!= APE_SEG_SIG_MAGIC
)
13937 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13938 if (!(apedata
& APE_FW_STATUS_READY
))
13941 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13943 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13944 tg3_flag_set(tp
, APE_HAS_NCSI
);
13950 vlen
= strlen(tp
->fw_ver
);
13952 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13954 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13955 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13956 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13957 (apedata
& APE_FW_VERSION_BLDMSK
));
13960 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13963 bool vpd_vers
= false;
13965 if (tp
->fw_ver
[0] != 0)
13968 if (tg3_flag(tp
, NO_NVRAM
)) {
13969 strcat(tp
->fw_ver
, "sb");
13973 if (tg3_nvram_read(tp
, 0, &val
))
13976 if (val
== TG3_EEPROM_MAGIC
)
13977 tg3_read_bc_ver(tp
);
13978 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13979 tg3_read_sb_ver(tp
, val
);
13980 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13981 tg3_read_hwsb_ver(tp
);
13988 if (tg3_flag(tp
, ENABLE_APE
)) {
13989 if (tg3_flag(tp
, ENABLE_ASF
))
13990 tg3_read_dash_ver(tp
);
13991 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13992 tg3_read_mgmtfw_ver(tp
);
13996 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13999 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
14001 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
14002 return TG3_RX_RET_MAX_SIZE_5717
;
14003 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
14004 return TG3_RX_RET_MAX_SIZE_5700
;
14006 return TG3_RX_RET_MAX_SIZE_5705
;
14009 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
14010 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
14011 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
14012 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
14016 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
14018 struct pci_dev
*peer
;
14019 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14021 for (func
= 0; func
< 8; func
++) {
14022 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14023 if (peer
&& peer
!= tp
->pdev
)
14027 /* 5704 can be configured in single-port mode, set peer to
14028 * tp->pdev in that case.
14036 * We don't need to keep the refcount elevated; there's no way
14037 * to remove one half of this device without removing the other
14044 static void __devinit
tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
14046 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
14047 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
14050 /* All devices that use the alternate
14051 * ASIC REV location have a CPMU.
14053 tg3_flag_set(tp
, CPMU_PRESENT
);
14055 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14056 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
14057 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
14058 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
14059 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
14060 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
14061 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
14062 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
14063 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
14064 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14065 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14066 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
14067 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
14068 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
14069 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14070 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
14072 reg
= TG3PCI_PRODID_ASICREV
;
14074 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
14077 /* Wrong chip ID in 5752 A0. This code can be removed later
14078 * as A0 is not in production.
14080 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
14081 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
14083 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14084 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14085 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14086 tg3_flag_set(tp
, 5717_PLUS
);
14088 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
14089 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14090 tg3_flag_set(tp
, 57765_CLASS
);
14092 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
))
14093 tg3_flag_set(tp
, 57765_PLUS
);
14095 /* Intentionally exclude ASIC_REV_5906 */
14096 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14097 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14098 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14099 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14100 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14101 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14102 tg3_flag(tp
, 57765_PLUS
))
14103 tg3_flag_set(tp
, 5755_PLUS
);
14105 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
14106 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14107 tg3_flag_set(tp
, 5780_CLASS
);
14109 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14110 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14111 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
14112 tg3_flag(tp
, 5755_PLUS
) ||
14113 tg3_flag(tp
, 5780_CLASS
))
14114 tg3_flag_set(tp
, 5750_PLUS
);
14116 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14117 tg3_flag(tp
, 5750_PLUS
))
14118 tg3_flag_set(tp
, 5705_PLUS
);
14121 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
14124 u32 pci_state_reg
, grc_misc_cfg
;
14129 /* Force memory write invalidate off. If we leave it on,
14130 * then on 5700_BX chips we have to enable a workaround.
14131 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14132 * to match the cacheline size. The Broadcom driver have this
14133 * workaround but turns MWI off all the times so never uses
14134 * it. This seems to suggest that the workaround is insufficient.
14136 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14137 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
14138 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14140 /* Important! -- Make sure register accesses are byteswapped
14141 * correctly. Also, for those chips that require it, make
14142 * sure that indirect register accesses are enabled before
14143 * the first operation.
14145 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14147 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
14148 MISC_HOST_CTRL_CHIPREV
);
14149 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14150 tp
->misc_host_ctrl
);
14152 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
14154 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14155 * we need to disable memory and use config. cycles
14156 * only to access all registers. The 5702/03 chips
14157 * can mistakenly decode the special cycles from the
14158 * ICH chipsets as memory write cycles, causing corruption
14159 * of register and memory space. Only certain ICH bridges
14160 * will drive special cycles with non-zero data during the
14161 * address phase which can fall within the 5703's address
14162 * range. This is not an ICH bug as the PCI spec allows
14163 * non-zero address during special cycles. However, only
14164 * these ICH bridges are known to drive non-zero addresses
14165 * during special cycles.
14167 * Since special cycles do not cross PCI bridges, we only
14168 * enable this workaround if the 5703 is on the secondary
14169 * bus of these ICH bridges.
14171 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
14172 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
14173 static struct tg3_dev_id
{
14177 } ich_chipsets
[] = {
14178 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
14180 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
14182 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
14184 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
14188 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
14189 struct pci_dev
*bridge
= NULL
;
14191 while (pci_id
->vendor
!= 0) {
14192 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
14198 if (pci_id
->rev
!= PCI_ANY_ID
) {
14199 if (bridge
->revision
> pci_id
->rev
)
14202 if (bridge
->subordinate
&&
14203 (bridge
->subordinate
->number
==
14204 tp
->pdev
->bus
->number
)) {
14205 tg3_flag_set(tp
, ICH_WORKAROUND
);
14206 pci_dev_put(bridge
);
14212 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14213 static struct tg3_dev_id
{
14216 } bridge_chipsets
[] = {
14217 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
14218 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
14221 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
14222 struct pci_dev
*bridge
= NULL
;
14224 while (pci_id
->vendor
!= 0) {
14225 bridge
= pci_get_device(pci_id
->vendor
,
14232 if (bridge
->subordinate
&&
14233 (bridge
->subordinate
->number
<=
14234 tp
->pdev
->bus
->number
) &&
14235 (bridge
->subordinate
->subordinate
>=
14236 tp
->pdev
->bus
->number
)) {
14237 tg3_flag_set(tp
, 5701_DMA_BUG
);
14238 pci_dev_put(bridge
);
14244 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14245 * DMA addresses > 40-bit. This bridge may have other additional
14246 * 57xx devices behind it in some 4-port NIC designs for example.
14247 * Any tg3 device found behind the bridge will also need the 40-bit
14250 if (tg3_flag(tp
, 5780_CLASS
)) {
14251 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14252 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
14254 struct pci_dev
*bridge
= NULL
;
14257 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
14258 PCI_DEVICE_ID_SERVERWORKS_EPB
,
14260 if (bridge
&& bridge
->subordinate
&&
14261 (bridge
->subordinate
->number
<=
14262 tp
->pdev
->bus
->number
) &&
14263 (bridge
->subordinate
->subordinate
>=
14264 tp
->pdev
->bus
->number
)) {
14265 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14266 pci_dev_put(bridge
);
14272 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14274 tp
->pdev_peer
= tg3_find_peer(tp
);
14276 /* Determine TSO capabilities */
14277 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
14278 ; /* Do nothing. HW bug. */
14279 else if (tg3_flag(tp
, 57765_PLUS
))
14280 tg3_flag_set(tp
, HW_TSO_3
);
14281 else if (tg3_flag(tp
, 5755_PLUS
) ||
14282 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14283 tg3_flag_set(tp
, HW_TSO_2
);
14284 else if (tg3_flag(tp
, 5750_PLUS
)) {
14285 tg3_flag_set(tp
, HW_TSO_1
);
14286 tg3_flag_set(tp
, TSO_BUG
);
14287 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
14288 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
14289 tg3_flag_clear(tp
, TSO_BUG
);
14290 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14291 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14292 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
14293 tg3_flag_set(tp
, TSO_BUG
);
14294 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
14295 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
14297 tp
->fw_needed
= FIRMWARE_TG3TSO
;
14300 /* Selectively allow TSO based on operating conditions */
14301 if (tg3_flag(tp
, HW_TSO_1
) ||
14302 tg3_flag(tp
, HW_TSO_2
) ||
14303 tg3_flag(tp
, HW_TSO_3
) ||
14305 /* For firmware TSO, assume ASF is disabled.
14306 * We'll disable TSO later if we discover ASF
14307 * is enabled in tg3_get_eeprom_hw_cfg().
14309 tg3_flag_set(tp
, TSO_CAPABLE
);
14311 tg3_flag_clear(tp
, TSO_CAPABLE
);
14312 tg3_flag_clear(tp
, TSO_BUG
);
14313 tp
->fw_needed
= NULL
;
14316 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14317 tp
->fw_needed
= FIRMWARE_TG3
;
14321 if (tg3_flag(tp
, 5750_PLUS
)) {
14322 tg3_flag_set(tp
, SUPPORT_MSI
);
14323 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14324 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14325 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14326 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14327 tp
->pdev_peer
== tp
->pdev
))
14328 tg3_flag_clear(tp
, SUPPORT_MSI
);
14330 if (tg3_flag(tp
, 5755_PLUS
) ||
14331 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14332 tg3_flag_set(tp
, 1SHOT_MSI
);
14335 if (tg3_flag(tp
, 57765_PLUS
)) {
14336 tg3_flag_set(tp
, SUPPORT_MSIX
);
14337 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14338 tg3_rss_init_dflt_indir_tbl(tp
);
14342 if (tg3_flag(tp
, 5755_PLUS
) ||
14343 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14344 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14346 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14347 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
14349 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14350 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14351 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14352 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14354 if (tg3_flag(tp
, 57765_PLUS
) &&
14355 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14356 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14358 if (!tg3_flag(tp
, 5705_PLUS
) ||
14359 tg3_flag(tp
, 5780_CLASS
) ||
14360 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14361 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14363 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14366 if (pci_is_pcie(tp
->pdev
)) {
14369 tg3_flag_set(tp
, PCI_EXPRESS
);
14371 pci_read_config_word(tp
->pdev
,
14372 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
14374 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14375 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14377 tg3_flag_clear(tp
, HW_TSO_2
);
14378 tg3_flag_clear(tp
, TSO_CAPABLE
);
14380 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14381 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14382 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14383 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14384 tg3_flag_set(tp
, CLKREQ_BUG
);
14385 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14386 tg3_flag_set(tp
, L1PLLPD_EN
);
14388 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14389 /* BCM5785 devices are effectively PCIe devices, and should
14390 * follow PCIe codepaths, but do not have a PCIe capabilities
14393 tg3_flag_set(tp
, PCI_EXPRESS
);
14394 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14395 tg3_flag(tp
, 5780_CLASS
)) {
14396 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14397 if (!tp
->pcix_cap
) {
14398 dev_err(&tp
->pdev
->dev
,
14399 "Cannot find PCI-X capability, aborting\n");
14403 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14404 tg3_flag_set(tp
, PCIX_MODE
);
14407 /* If we have an AMD 762 or VIA K8T800 chipset, write
14408 * reordering to the mailbox registers done by the host
14409 * controller can cause major troubles. We read back from
14410 * every mailbox register write to force the writes to be
14411 * posted to the chip in order.
14413 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14414 !tg3_flag(tp
, PCI_EXPRESS
))
14415 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14417 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14418 &tp
->pci_cacheline_sz
);
14419 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14420 &tp
->pci_lat_timer
);
14421 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14422 tp
->pci_lat_timer
< 64) {
14423 tp
->pci_lat_timer
= 64;
14424 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14425 tp
->pci_lat_timer
);
14428 /* Important! -- It is critical that the PCI-X hw workaround
14429 * situation is decided before the first MMIO register access.
14431 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14432 /* 5700 BX chips need to have their TX producer index
14433 * mailboxes written twice to workaround a bug.
14435 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14437 /* If we are in PCI-X mode, enable register write workaround.
14439 * The workaround is to use indirect register accesses
14440 * for all chip writes not to mailbox registers.
14442 if (tg3_flag(tp
, PCIX_MODE
)) {
14445 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14447 /* The chip can have it's power management PCI config
14448 * space registers clobbered due to this bug.
14449 * So explicitly force the chip into D0 here.
14451 pci_read_config_dword(tp
->pdev
,
14452 tp
->pm_cap
+ PCI_PM_CTRL
,
14454 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14455 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14456 pci_write_config_dword(tp
->pdev
,
14457 tp
->pm_cap
+ PCI_PM_CTRL
,
14460 /* Also, force SERR#/PERR# in PCI command. */
14461 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14462 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14463 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14467 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14468 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14469 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14470 tg3_flag_set(tp
, PCI_32BIT
);
14472 /* Chip-specific fixup from Broadcom driver */
14473 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14474 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14475 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14476 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14479 /* Default fast path register access methods */
14480 tp
->read32
= tg3_read32
;
14481 tp
->write32
= tg3_write32
;
14482 tp
->read32_mbox
= tg3_read32
;
14483 tp
->write32_mbox
= tg3_write32
;
14484 tp
->write32_tx_mbox
= tg3_write32
;
14485 tp
->write32_rx_mbox
= tg3_write32
;
14487 /* Various workaround register access methods */
14488 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14489 tp
->write32
= tg3_write_indirect_reg32
;
14490 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14491 (tg3_flag(tp
, PCI_EXPRESS
) &&
14492 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14494 * Back to back register writes can cause problems on these
14495 * chips, the workaround is to read back all reg writes
14496 * except those to mailbox regs.
14498 * See tg3_write_indirect_reg32().
14500 tp
->write32
= tg3_write_flush_reg32
;
14503 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14504 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14505 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14506 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14509 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14510 tp
->read32
= tg3_read_indirect_reg32
;
14511 tp
->write32
= tg3_write_indirect_reg32
;
14512 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14513 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14514 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14515 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14520 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14521 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14522 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14525 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14526 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14527 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14528 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14531 if (tp
->write32
== tg3_write_indirect_reg32
||
14532 (tg3_flag(tp
, PCIX_MODE
) &&
14533 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14534 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14535 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14537 /* The memory arbiter has to be enabled in order for SRAM accesses
14538 * to succeed. Normally on powerup the tg3 chip firmware will make
14539 * sure it is enabled, but other entities such as system netboot
14540 * code might disable it.
14542 val
= tr32(MEMARB_MODE
);
14543 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14545 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14546 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14547 tg3_flag(tp
, 5780_CLASS
)) {
14548 if (tg3_flag(tp
, PCIX_MODE
)) {
14549 pci_read_config_dword(tp
->pdev
,
14550 tp
->pcix_cap
+ PCI_X_STATUS
,
14552 tp
->pci_fn
= val
& 0x7;
14554 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14555 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14556 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14557 NIC_SRAM_CPMUSTAT_SIG
) {
14558 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14559 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14561 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14562 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14563 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14564 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14565 NIC_SRAM_CPMUSTAT_SIG
) {
14566 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14567 TG3_CPMU_STATUS_FSHFT_5719
;
14571 /* Get eeprom hw config before calling tg3_set_power_state().
14572 * In particular, the TG3_FLAG_IS_NIC flag must be
14573 * determined before calling tg3_set_power_state() so that
14574 * we know whether or not to switch out of Vaux power.
14575 * When the flag is set, it means that GPIO1 is used for eeprom
14576 * write protect and also implies that it is a LOM where GPIOs
14577 * are not used to switch power.
14579 tg3_get_eeprom_hw_cfg(tp
);
14581 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
14582 tg3_flag_clear(tp
, TSO_CAPABLE
);
14583 tg3_flag_clear(tp
, TSO_BUG
);
14584 tp
->fw_needed
= NULL
;
14587 if (tg3_flag(tp
, ENABLE_APE
)) {
14588 /* Allow reads and writes to the
14589 * APE register and memory space.
14591 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14592 PCISTATE_ALLOW_APE_SHMEM_WR
|
14593 PCISTATE_ALLOW_APE_PSPACE_WR
;
14594 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14597 tg3_ape_lock_init(tp
);
14600 /* Set up tp->grc_local_ctrl before calling
14601 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14602 * will bring 5700's external PHY out of reset.
14603 * It is also used as eeprom write protect on LOMs.
14605 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14606 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14607 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14608 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14609 GRC_LCLCTRL_GPIO_OUTPUT1
);
14610 /* Unused GPIO3 must be driven as output on 5752 because there
14611 * are no pull-up resistors on unused GPIO pins.
14613 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14614 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14616 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14617 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14618 tg3_flag(tp
, 57765_CLASS
))
14619 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14621 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14622 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14623 /* Turn off the debug UART. */
14624 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14625 if (tg3_flag(tp
, IS_NIC
))
14626 /* Keep VMain power. */
14627 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14628 GRC_LCLCTRL_GPIO_OUTPUT0
;
14631 /* Switch out of Vaux if it is a NIC */
14632 tg3_pwrsrc_switch_to_vmain(tp
);
14634 /* Derive initial jumbo mode from MTU assigned in
14635 * ether_setup() via the alloc_etherdev() call
14637 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14638 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14640 /* Determine WakeOnLan speed to use. */
14641 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14642 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14643 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14644 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14645 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14647 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14650 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14651 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14653 /* A few boards don't want Ethernet@WireSpeed phy feature */
14654 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14655 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14656 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14657 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14658 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14659 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14660 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14662 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14663 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14664 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14665 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14666 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14668 if (tg3_flag(tp
, 5705_PLUS
) &&
14669 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14670 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14671 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14672 !tg3_flag(tp
, 57765_PLUS
)) {
14673 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14674 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14675 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14676 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14677 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14678 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14679 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14680 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14681 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14683 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14686 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14687 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14688 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14689 if (tp
->phy_otp
== 0)
14690 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14693 if (tg3_flag(tp
, CPMU_PRESENT
))
14694 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14696 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14698 tp
->coalesce_mode
= 0;
14699 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14700 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14701 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14703 /* Set these bits to enable statistics workaround. */
14704 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14705 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14706 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14707 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14708 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14711 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14712 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14713 tg3_flag_set(tp
, USE_PHYLIB
);
14715 err
= tg3_mdio_init(tp
);
14719 /* Initialize data/descriptor byte/word swapping. */
14720 val
= tr32(GRC_MODE
);
14721 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14722 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14723 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14724 GRC_MODE_B2HRX_ENABLE
|
14725 GRC_MODE_HTX2B_ENABLE
|
14726 GRC_MODE_HOST_STACKUP
);
14728 val
&= GRC_MODE_HOST_STACKUP
;
14730 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14732 tg3_switch_clocks(tp
);
14734 /* Clear this out for sanity. */
14735 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14737 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
14738 tw32(TG3PCI_REG_BASE_ADDR
, 0);
14740 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14742 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14743 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14744 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14746 if (chiprevid
== CHIPREV_ID_5701_A0
||
14747 chiprevid
== CHIPREV_ID_5701_B0
||
14748 chiprevid
== CHIPREV_ID_5701_B2
||
14749 chiprevid
== CHIPREV_ID_5701_B5
) {
14750 void __iomem
*sram_base
;
14752 /* Write some dummy words into the SRAM status block
14753 * area, see if it reads back correctly. If the return
14754 * value is bad, force enable the PCIX workaround.
14756 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14758 writel(0x00000000, sram_base
);
14759 writel(0x00000000, sram_base
+ 4);
14760 writel(0xffffffff, sram_base
+ 4);
14761 if (readl(sram_base
) != 0x00000000)
14762 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14767 tg3_nvram_init(tp
);
14769 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14770 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14772 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14773 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14774 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14775 tg3_flag_set(tp
, IS_5788
);
14777 if (!tg3_flag(tp
, IS_5788
) &&
14778 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14779 tg3_flag_set(tp
, TAGGED_STATUS
);
14780 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14781 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14782 HOSTCC_MODE_CLRTICK_TXBD
);
14784 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14785 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14786 tp
->misc_host_ctrl
);
14789 /* Preserve the APE MAC_MODE bits */
14790 if (tg3_flag(tp
, ENABLE_APE
))
14791 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14795 /* these are limited to 10/100 only */
14796 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14797 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14798 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14799 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14800 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14801 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14802 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14803 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14804 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14805 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14806 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14807 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14808 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14809 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14810 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14811 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14813 err
= tg3_phy_probe(tp
);
14815 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14816 /* ... but do not return immediately ... */
14821 tg3_read_fw_ver(tp
);
14823 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14824 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14826 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14827 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14829 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14832 /* 5700 {AX,BX} chips have a broken status block link
14833 * change bit implementation, so we must use the
14834 * status register in those cases.
14836 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14837 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14839 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14841 /* The led_ctrl is set during tg3_phy_probe, here we might
14842 * have to force the link status polling mechanism based
14843 * upon subsystem IDs.
14845 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14847 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14848 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14849 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14852 /* For all SERDES we poll the MAC status register. */
14853 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14854 tg3_flag_set(tp
, POLL_SERDES
);
14856 tg3_flag_clear(tp
, POLL_SERDES
);
14858 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
14859 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14860 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14861 tg3_flag(tp
, PCIX_MODE
)) {
14862 tp
->rx_offset
= NET_SKB_PAD
;
14863 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14864 tp
->rx_copy_thresh
= ~(u16
)0;
14868 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14869 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14870 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14872 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14874 /* Increment the rx prod index on the rx std ring by at most
14875 * 8 for these chips to workaround hw errata.
14877 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14878 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14879 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14880 tp
->rx_std_max_post
= 8;
14882 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14883 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14884 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14889 #ifdef CONFIG_SPARC
14890 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14892 struct net_device
*dev
= tp
->dev
;
14893 struct pci_dev
*pdev
= tp
->pdev
;
14894 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14895 const unsigned char *addr
;
14898 addr
= of_get_property(dp
, "local-mac-address", &len
);
14899 if (addr
&& len
== 6) {
14900 memcpy(dev
->dev_addr
, addr
, 6);
14901 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14907 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14909 struct net_device
*dev
= tp
->dev
;
14911 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14912 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14917 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14919 struct net_device
*dev
= tp
->dev
;
14920 u32 hi
, lo
, mac_offset
;
14923 #ifdef CONFIG_SPARC
14924 if (!tg3_get_macaddr_sparc(tp
))
14929 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14930 tg3_flag(tp
, 5780_CLASS
)) {
14931 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14933 if (tg3_nvram_lock(tp
))
14934 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14936 tg3_nvram_unlock(tp
);
14937 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14938 if (tp
->pci_fn
& 1)
14940 if (tp
->pci_fn
> 1)
14941 mac_offset
+= 0x18c;
14942 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14945 /* First try to get it from MAC address mailbox. */
14946 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14947 if ((hi
>> 16) == 0x484b) {
14948 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14949 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14951 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14952 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14953 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14954 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14955 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14957 /* Some old bootcode may report a 0 MAC address in SRAM */
14958 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14961 /* Next, try NVRAM. */
14962 if (!tg3_flag(tp
, NO_NVRAM
) &&
14963 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14964 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14965 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14966 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14968 /* Finally just fetch it out of the MAC control regs. */
14970 hi
= tr32(MAC_ADDR_0_HIGH
);
14971 lo
= tr32(MAC_ADDR_0_LOW
);
14973 dev
->dev_addr
[5] = lo
& 0xff;
14974 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14975 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14976 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14977 dev
->dev_addr
[1] = hi
& 0xff;
14978 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14982 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14983 #ifdef CONFIG_SPARC
14984 if (!tg3_get_default_macaddr_sparc(tp
))
14989 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14993 #define BOUNDARY_SINGLE_CACHELINE 1
14994 #define BOUNDARY_MULTI_CACHELINE 2
14996 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14998 int cacheline_size
;
15002 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
15004 cacheline_size
= 1024;
15006 cacheline_size
= (int) byte
* 4;
15008 /* On 5703 and later chips, the boundary bits have no
15011 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15012 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
15013 !tg3_flag(tp
, PCI_EXPRESS
))
15016 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15017 goal
= BOUNDARY_MULTI_CACHELINE
;
15019 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15020 goal
= BOUNDARY_SINGLE_CACHELINE
;
15026 if (tg3_flag(tp
, 57765_PLUS
)) {
15027 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
15034 /* PCI controllers on most RISC systems tend to disconnect
15035 * when a device tries to burst across a cache-line boundary.
15036 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15038 * Unfortunately, for PCI-E there are only limited
15039 * write-side controls for this, and thus for reads
15040 * we will still get the disconnects. We'll also waste
15041 * these PCI cycles for both read and write for chips
15042 * other than 5700 and 5701 which do not implement the
15045 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
15046 switch (cacheline_size
) {
15051 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15052 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
15053 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
15055 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15056 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15061 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
15062 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
15066 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15067 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15070 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
15071 switch (cacheline_size
) {
15075 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15076 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15077 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
15083 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15084 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
15088 switch (cacheline_size
) {
15090 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15091 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
15092 DMA_RWCTRL_WRITE_BNDRY_16
);
15097 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15098 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
15099 DMA_RWCTRL_WRITE_BNDRY_32
);
15104 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15105 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
15106 DMA_RWCTRL_WRITE_BNDRY_64
);
15111 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15112 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
15113 DMA_RWCTRL_WRITE_BNDRY_128
);
15118 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
15119 DMA_RWCTRL_WRITE_BNDRY_256
);
15122 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
15123 DMA_RWCTRL_WRITE_BNDRY_512
);
15127 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
15128 DMA_RWCTRL_WRITE_BNDRY_1024
);
15137 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
15139 struct tg3_internal_buffer_desc test_desc
;
15140 u32 sram_dma_descs
;
15143 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
15145 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
15146 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
15147 tw32(RDMAC_STATUS
, 0);
15148 tw32(WDMAC_STATUS
, 0);
15150 tw32(BUFMGR_MODE
, 0);
15151 tw32(FTQ_RESET
, 0);
15153 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
15154 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
15155 test_desc
.nic_mbuf
= 0x00002100;
15156 test_desc
.len
= size
;
15159 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15160 * the *second* time the tg3 driver was getting loaded after an
15163 * Broadcom tells me:
15164 * ...the DMA engine is connected to the GRC block and a DMA
15165 * reset may affect the GRC block in some unpredictable way...
15166 * The behavior of resets to individual blocks has not been tested.
15168 * Broadcom noted the GRC reset will also reset all sub-components.
15171 test_desc
.cqid_sqid
= (13 << 8) | 2;
15173 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
15176 test_desc
.cqid_sqid
= (16 << 8) | 7;
15178 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
15181 test_desc
.flags
= 0x00000005;
15183 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
15186 val
= *(((u32
*)&test_desc
) + i
);
15187 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
15188 sram_dma_descs
+ (i
* sizeof(u32
)));
15189 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
15191 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15194 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
15196 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
15199 for (i
= 0; i
< 40; i
++) {
15203 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
15205 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
15206 if ((val
& 0xffff) == sram_dma_descs
) {
15217 #define TEST_BUFFER_SIZE 0x2000
15219 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
15220 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
15224 static int __devinit
tg3_test_dma(struct tg3
*tp
)
15226 dma_addr_t buf_dma
;
15227 u32
*buf
, saved_dma_rwctrl
;
15230 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
15231 &buf_dma
, GFP_KERNEL
);
15237 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
15238 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
15240 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
15242 if (tg3_flag(tp
, 57765_PLUS
))
15245 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15246 /* DMA read watermark not used on PCIE */
15247 tp
->dma_rwctrl
|= 0x00180000;
15248 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
15249 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
15250 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
15251 tp
->dma_rwctrl
|= 0x003f0000;
15253 tp
->dma_rwctrl
|= 0x003f000f;
15255 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15256 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
15257 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
15258 u32 read_water
= 0x7;
15260 /* If the 5704 is behind the EPB bridge, we can
15261 * do the less restrictive ONE_DMA workaround for
15262 * better performance.
15264 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
15265 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15266 tp
->dma_rwctrl
|= 0x8000;
15267 else if (ccval
== 0x6 || ccval
== 0x7)
15268 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
15270 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
15272 /* Set bit 23 to enable PCIX hw bug fix */
15274 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
15275 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
15277 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
15278 /* 5780 always in PCIX mode */
15279 tp
->dma_rwctrl
|= 0x00144000;
15280 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
15281 /* 5714 always in PCIX mode */
15282 tp
->dma_rwctrl
|= 0x00148000;
15284 tp
->dma_rwctrl
|= 0x001b000f;
15288 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15289 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15290 tp
->dma_rwctrl
&= 0xfffffff0;
15292 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15293 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
15294 /* Remove this if it causes problems for some boards. */
15295 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
15297 /* On 5700/5701 chips, we need to set this bit.
15298 * Otherwise the chip will issue cacheline transactions
15299 * to streamable DMA memory with not all the byte
15300 * enables turned on. This is an error on several
15301 * RISC PCI controllers, in particular sparc64.
15303 * On 5703/5704 chips, this bit has been reassigned
15304 * a different meaning. In particular, it is used
15305 * on those chips to enable a PCI-X workaround.
15307 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15310 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15313 /* Unneeded, already done by tg3_get_invariants. */
15314 tg3_switch_clocks(tp
);
15317 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15318 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15321 /* It is best to perform DMA test with maximum write burst size
15322 * to expose the 5700/5701 write DMA bug.
15324 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15325 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15326 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15331 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15334 /* Send the buffer to the chip. */
15335 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15337 dev_err(&tp
->pdev
->dev
,
15338 "%s: Buffer write failed. err = %d\n",
15344 /* validate data reached card RAM correctly. */
15345 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15347 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15348 if (le32_to_cpu(val
) != p
[i
]) {
15349 dev_err(&tp
->pdev
->dev
,
15350 "%s: Buffer corrupted on device! "
15351 "(%d != %d)\n", __func__
, val
, i
);
15352 /* ret = -ENODEV here? */
15357 /* Now read it back. */
15358 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15360 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15361 "err = %d\n", __func__
, ret
);
15366 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15370 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15371 DMA_RWCTRL_WRITE_BNDRY_16
) {
15372 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15373 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15374 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15377 dev_err(&tp
->pdev
->dev
,
15378 "%s: Buffer corrupted on read back! "
15379 "(%d != %d)\n", __func__
, p
[i
], i
);
15385 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15391 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15392 DMA_RWCTRL_WRITE_BNDRY_16
) {
15393 /* DMA test passed without adjusting DMA boundary,
15394 * now look for chipsets that are known to expose the
15395 * DMA bug without failing the test.
15397 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15398 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15399 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15401 /* Safe to use the calculated DMA boundary. */
15402 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15405 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15409 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15414 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15416 if (tg3_flag(tp
, 57765_PLUS
)) {
15417 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15418 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15419 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15420 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15421 tp
->bufmgr_config
.mbuf_high_water
=
15422 DEFAULT_MB_HIGH_WATER_57765
;
15424 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15425 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15426 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15427 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15428 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15429 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15430 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15431 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15432 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15433 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15434 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15435 tp
->bufmgr_config
.mbuf_high_water
=
15436 DEFAULT_MB_HIGH_WATER_5705
;
15437 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15438 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15439 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15440 tp
->bufmgr_config
.mbuf_high_water
=
15441 DEFAULT_MB_HIGH_WATER_5906
;
15444 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15445 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15446 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15447 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15448 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15449 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15451 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15452 DEFAULT_MB_RDMA_LOW_WATER
;
15453 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15454 DEFAULT_MB_MACRX_LOW_WATER
;
15455 tp
->bufmgr_config
.mbuf_high_water
=
15456 DEFAULT_MB_HIGH_WATER
;
15458 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15459 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15460 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15461 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15462 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15463 DEFAULT_MB_HIGH_WATER_JUMBO
;
15466 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15467 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15470 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15472 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15473 case TG3_PHY_ID_BCM5400
: return "5400";
15474 case TG3_PHY_ID_BCM5401
: return "5401";
15475 case TG3_PHY_ID_BCM5411
: return "5411";
15476 case TG3_PHY_ID_BCM5701
: return "5701";
15477 case TG3_PHY_ID_BCM5703
: return "5703";
15478 case TG3_PHY_ID_BCM5704
: return "5704";
15479 case TG3_PHY_ID_BCM5705
: return "5705";
15480 case TG3_PHY_ID_BCM5750
: return "5750";
15481 case TG3_PHY_ID_BCM5752
: return "5752";
15482 case TG3_PHY_ID_BCM5714
: return "5714";
15483 case TG3_PHY_ID_BCM5780
: return "5780";
15484 case TG3_PHY_ID_BCM5755
: return "5755";
15485 case TG3_PHY_ID_BCM5787
: return "5787";
15486 case TG3_PHY_ID_BCM5784
: return "5784";
15487 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15488 case TG3_PHY_ID_BCM5906
: return "5906";
15489 case TG3_PHY_ID_BCM5761
: return "5761";
15490 case TG3_PHY_ID_BCM5718C
: return "5718C";
15491 case TG3_PHY_ID_BCM5718S
: return "5718S";
15492 case TG3_PHY_ID_BCM57765
: return "57765";
15493 case TG3_PHY_ID_BCM5719C
: return "5719C";
15494 case TG3_PHY_ID_BCM5720C
: return "5720C";
15495 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15496 case 0: return "serdes";
15497 default: return "unknown";
15501 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15503 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15504 strcpy(str
, "PCI Express");
15506 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15507 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15509 strcpy(str
, "PCIX:");
15511 if ((clock_ctrl
== 7) ||
15512 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15513 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15514 strcat(str
, "133MHz");
15515 else if (clock_ctrl
== 0)
15516 strcat(str
, "33MHz");
15517 else if (clock_ctrl
== 2)
15518 strcat(str
, "50MHz");
15519 else if (clock_ctrl
== 4)
15520 strcat(str
, "66MHz");
15521 else if (clock_ctrl
== 6)
15522 strcat(str
, "100MHz");
15524 strcpy(str
, "PCI:");
15525 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15526 strcat(str
, "66MHz");
15528 strcat(str
, "33MHz");
15530 if (tg3_flag(tp
, PCI_32BIT
))
15531 strcat(str
, ":32-bit");
15533 strcat(str
, ":64-bit");
15537 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15539 struct ethtool_coalesce
*ec
= &tp
->coal
;
15541 memset(ec
, 0, sizeof(*ec
));
15542 ec
->cmd
= ETHTOOL_GCOALESCE
;
15543 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15544 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15545 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15546 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15547 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15548 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15549 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15550 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15551 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15553 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15554 HOSTCC_MODE_CLRTICK_TXBD
)) {
15555 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15556 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15557 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15558 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15561 if (tg3_flag(tp
, 5705_PLUS
)) {
15562 ec
->rx_coalesce_usecs_irq
= 0;
15563 ec
->tx_coalesce_usecs_irq
= 0;
15564 ec
->stats_block_coalesce_usecs
= 0;
15568 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15569 const struct pci_device_id
*ent
)
15571 struct net_device
*dev
;
15573 int i
, err
, pm_cap
;
15574 u32 sndmbx
, rcvmbx
, intmbx
;
15576 u64 dma_mask
, persist_dma_mask
;
15577 netdev_features_t features
= 0;
15579 printk_once(KERN_INFO
"%s\n", version
);
15581 err
= pci_enable_device(pdev
);
15583 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15587 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15589 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15590 goto err_out_disable_pdev
;
15593 pci_set_master(pdev
);
15595 /* Find power-management capability. */
15596 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15598 dev_err(&pdev
->dev
,
15599 "Cannot find Power Management capability, aborting\n");
15601 goto err_out_free_res
;
15604 err
= pci_set_power_state(pdev
, PCI_D0
);
15606 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15607 goto err_out_free_res
;
15610 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15613 goto err_out_power_down
;
15616 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15618 tp
= netdev_priv(dev
);
15621 tp
->pm_cap
= pm_cap
;
15622 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15623 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15627 tp
->msg_enable
= tg3_debug
;
15629 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15631 /* The word/byte swap controls here control register access byte
15632 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15635 tp
->misc_host_ctrl
=
15636 MISC_HOST_CTRL_MASK_PCI_INT
|
15637 MISC_HOST_CTRL_WORD_SWAP
|
15638 MISC_HOST_CTRL_INDIR_ACCESS
|
15639 MISC_HOST_CTRL_PCISTATE_RW
;
15641 /* The NONFRM (non-frame) byte/word swap controls take effect
15642 * on descriptor entries, anything which isn't packet data.
15644 * The StrongARM chips on the board (one for tx, one for rx)
15645 * are running in big-endian mode.
15647 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15648 GRC_MODE_WSWAP_NONFRM_DATA
);
15649 #ifdef __BIG_ENDIAN
15650 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15652 spin_lock_init(&tp
->lock
);
15653 spin_lock_init(&tp
->indirect_lock
);
15654 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15656 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15658 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15660 goto err_out_free_dev
;
15663 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15664 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15665 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15666 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15667 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15668 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15669 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15670 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15671 tg3_flag_set(tp
, ENABLE_APE
);
15672 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15673 if (!tp
->aperegs
) {
15674 dev_err(&pdev
->dev
,
15675 "Cannot map APE registers, aborting\n");
15677 goto err_out_iounmap
;
15681 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15682 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15684 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15685 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15686 dev
->netdev_ops
= &tg3_netdev_ops
;
15687 dev
->irq
= pdev
->irq
;
15689 err
= tg3_get_invariants(tp
);
15691 dev_err(&pdev
->dev
,
15692 "Problem fetching invariants of chip, aborting\n");
15693 goto err_out_apeunmap
;
15696 /* The EPB bridge inside 5714, 5715, and 5780 and any
15697 * device behind the EPB cannot support DMA addresses > 40-bit.
15698 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15699 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15700 * do DMA address check in tg3_start_xmit().
15702 if (tg3_flag(tp
, IS_5788
))
15703 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15704 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15705 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15706 #ifdef CONFIG_HIGHMEM
15707 dma_mask
= DMA_BIT_MASK(64);
15710 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15712 /* Configure DMA attributes. */
15713 if (dma_mask
> DMA_BIT_MASK(32)) {
15714 err
= pci_set_dma_mask(pdev
, dma_mask
);
15716 features
|= NETIF_F_HIGHDMA
;
15717 err
= pci_set_consistent_dma_mask(pdev
,
15720 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15721 "DMA for consistent allocations\n");
15722 goto err_out_apeunmap
;
15726 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15727 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15729 dev_err(&pdev
->dev
,
15730 "No usable DMA configuration, aborting\n");
15731 goto err_out_apeunmap
;
15735 tg3_init_bufmgr_config(tp
);
15737 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15739 /* 5700 B0 chips do not support checksumming correctly due
15740 * to hardware bugs.
15742 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15743 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15745 if (tg3_flag(tp
, 5755_PLUS
))
15746 features
|= NETIF_F_IPV6_CSUM
;
15749 /* TSO is on by default on chips that support hardware TSO.
15750 * Firmware TSO on older chips gives lower performance, so it
15751 * is off by default, but can be enabled using ethtool.
15753 if ((tg3_flag(tp
, HW_TSO_1
) ||
15754 tg3_flag(tp
, HW_TSO_2
) ||
15755 tg3_flag(tp
, HW_TSO_3
)) &&
15756 (features
& NETIF_F_IP_CSUM
))
15757 features
|= NETIF_F_TSO
;
15758 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15759 if (features
& NETIF_F_IPV6_CSUM
)
15760 features
|= NETIF_F_TSO6
;
15761 if (tg3_flag(tp
, HW_TSO_3
) ||
15762 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15763 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15764 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15765 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15766 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15767 features
|= NETIF_F_TSO_ECN
;
15770 dev
->features
|= features
;
15771 dev
->vlan_features
|= features
;
15774 * Add loopback capability only for a subset of devices that support
15775 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15776 * loopback for the remaining devices.
15778 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15779 !tg3_flag(tp
, CPMU_PRESENT
))
15780 /* Add the loopback capability */
15781 features
|= NETIF_F_LOOPBACK
;
15783 dev
->hw_features
|= features
;
15785 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15786 !tg3_flag(tp
, TSO_CAPABLE
) &&
15787 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15788 tg3_flag_set(tp
, MAX_RXPEND_64
);
15789 tp
->rx_pending
= 63;
15792 err
= tg3_get_device_address(tp
);
15794 dev_err(&pdev
->dev
,
15795 "Could not obtain valid ethernet address, aborting\n");
15796 goto err_out_apeunmap
;
15800 * Reset chip in case UNDI or EFI driver did not shutdown
15801 * DMA self test will enable WDMAC and we'll see (spurious)
15802 * pending DMA on the PCI bus at that point.
15804 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15805 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15806 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15807 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15810 err
= tg3_test_dma(tp
);
15812 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15813 goto err_out_apeunmap
;
15816 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15817 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15818 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15819 for (i
= 0; i
< tp
->irq_max
; i
++) {
15820 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15823 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15825 tnapi
->int_mbox
= intmbx
;
15831 tnapi
->consmbox
= rcvmbx
;
15832 tnapi
->prodmbox
= sndmbx
;
15835 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15837 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15839 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15843 * If we support MSIX, we'll be using RSS. If we're using
15844 * RSS, the first vector only handles link interrupts and the
15845 * remaining vectors handle rx and tx interrupts. Reuse the
15846 * mailbox values for the next iteration. The values we setup
15847 * above are still useful for the single vectored mode.
15862 pci_set_drvdata(pdev
, dev
);
15864 if (tg3_flag(tp
, 5717_PLUS
)) {
15865 /* Resume a low-power mode */
15866 tg3_frob_aux_power(tp
, false);
15869 tg3_timer_init(tp
);
15871 err
= register_netdev(dev
);
15873 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15874 goto err_out_apeunmap
;
15877 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15878 tp
->board_part_number
,
15879 tp
->pci_chip_rev_id
,
15880 tg3_bus_string(tp
, str
),
15883 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15884 struct phy_device
*phydev
;
15885 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15887 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15888 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15892 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15893 ethtype
= "10/100Base-TX";
15894 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15895 ethtype
= "1000Base-SX";
15897 ethtype
= "10/100/1000Base-T";
15899 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15900 "(WireSpeed[%d], EEE[%d])\n",
15901 tg3_phy_string(tp
), ethtype
,
15902 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15903 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15906 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15907 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15908 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15909 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15910 tg3_flag(tp
, ENABLE_ASF
) != 0,
15911 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15912 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15914 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15915 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15917 pci_save_state(pdev
);
15923 iounmap(tp
->aperegs
);
15924 tp
->aperegs
= NULL
;
15936 err_out_power_down
:
15937 pci_set_power_state(pdev
, PCI_D3hot
);
15940 pci_release_regions(pdev
);
15942 err_out_disable_pdev
:
15943 pci_disable_device(pdev
);
15944 pci_set_drvdata(pdev
, NULL
);
15948 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15950 struct net_device
*dev
= pci_get_drvdata(pdev
);
15953 struct tg3
*tp
= netdev_priv(dev
);
15956 release_firmware(tp
->fw
);
15958 tg3_reset_task_cancel(tp
);
15960 if (tg3_flag(tp
, USE_PHYLIB
)) {
15965 unregister_netdev(dev
);
15967 iounmap(tp
->aperegs
);
15968 tp
->aperegs
= NULL
;
15975 pci_release_regions(pdev
);
15976 pci_disable_device(pdev
);
15977 pci_set_drvdata(pdev
, NULL
);
15981 #ifdef CONFIG_PM_SLEEP
15982 static int tg3_suspend(struct device
*device
)
15984 struct pci_dev
*pdev
= to_pci_dev(device
);
15985 struct net_device
*dev
= pci_get_drvdata(pdev
);
15986 struct tg3
*tp
= netdev_priv(dev
);
15989 if (!netif_running(dev
))
15992 tg3_reset_task_cancel(tp
);
15994 tg3_netif_stop(tp
);
15996 tg3_timer_stop(tp
);
15998 tg3_full_lock(tp
, 1);
15999 tg3_disable_ints(tp
);
16000 tg3_full_unlock(tp
);
16002 netif_device_detach(dev
);
16004 tg3_full_lock(tp
, 0);
16005 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16006 tg3_flag_clear(tp
, INIT_COMPLETE
);
16007 tg3_full_unlock(tp
);
16009 err
= tg3_power_down_prepare(tp
);
16013 tg3_full_lock(tp
, 0);
16015 tg3_flag_set(tp
, INIT_COMPLETE
);
16016 err2
= tg3_restart_hw(tp
, 1);
16020 tg3_timer_start(tp
);
16022 netif_device_attach(dev
);
16023 tg3_netif_start(tp
);
16026 tg3_full_unlock(tp
);
16035 static int tg3_resume(struct device
*device
)
16037 struct pci_dev
*pdev
= to_pci_dev(device
);
16038 struct net_device
*dev
= pci_get_drvdata(pdev
);
16039 struct tg3
*tp
= netdev_priv(dev
);
16042 if (!netif_running(dev
))
16045 netif_device_attach(dev
);
16047 tg3_full_lock(tp
, 0);
16049 tg3_flag_set(tp
, INIT_COMPLETE
);
16050 err
= tg3_restart_hw(tp
, 1);
16054 tg3_timer_start(tp
);
16056 tg3_netif_start(tp
);
16059 tg3_full_unlock(tp
);
16067 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
16068 #define TG3_PM_OPS (&tg3_pm_ops)
16072 #define TG3_PM_OPS NULL
16074 #endif /* CONFIG_PM_SLEEP */
16077 * tg3_io_error_detected - called when PCI error is detected
16078 * @pdev: Pointer to PCI device
16079 * @state: The current pci connection state
16081 * This function is called after a PCI bus error affecting
16082 * this device has been detected.
16084 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
16085 pci_channel_state_t state
)
16087 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16088 struct tg3
*tp
= netdev_priv(netdev
);
16089 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
16091 netdev_info(netdev
, "PCI I/O error detected\n");
16095 if (!netif_running(netdev
))
16100 tg3_netif_stop(tp
);
16102 tg3_timer_stop(tp
);
16104 /* Want to make sure that the reset task doesn't run */
16105 tg3_reset_task_cancel(tp
);
16107 netif_device_detach(netdev
);
16109 /* Clean up software state, even if MMIO is blocked */
16110 tg3_full_lock(tp
, 0);
16111 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
16112 tg3_full_unlock(tp
);
16115 if (state
== pci_channel_io_perm_failure
)
16116 err
= PCI_ERS_RESULT_DISCONNECT
;
16118 pci_disable_device(pdev
);
16126 * tg3_io_slot_reset - called after the pci bus has been reset.
16127 * @pdev: Pointer to PCI device
16129 * Restart the card from scratch, as if from a cold-boot.
16130 * At this point, the card has exprienced a hard reset,
16131 * followed by fixups by BIOS, and has its config space
16132 * set up identically to what it was at cold boot.
16134 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
16136 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16137 struct tg3
*tp
= netdev_priv(netdev
);
16138 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
16143 if (pci_enable_device(pdev
)) {
16144 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
16148 pci_set_master(pdev
);
16149 pci_restore_state(pdev
);
16150 pci_save_state(pdev
);
16152 if (!netif_running(netdev
)) {
16153 rc
= PCI_ERS_RESULT_RECOVERED
;
16157 err
= tg3_power_up(tp
);
16161 rc
= PCI_ERS_RESULT_RECOVERED
;
16170 * tg3_io_resume - called when traffic can start flowing again.
16171 * @pdev: Pointer to PCI device
16173 * This callback is called when the error recovery driver tells
16174 * us that its OK to resume normal operation.
16176 static void tg3_io_resume(struct pci_dev
*pdev
)
16178 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16179 struct tg3
*tp
= netdev_priv(netdev
);
16184 if (!netif_running(netdev
))
16187 tg3_full_lock(tp
, 0);
16188 tg3_flag_set(tp
, INIT_COMPLETE
);
16189 err
= tg3_restart_hw(tp
, 1);
16190 tg3_full_unlock(tp
);
16192 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
16196 netif_device_attach(netdev
);
16198 tg3_timer_start(tp
);
16200 tg3_netif_start(tp
);
16208 static struct pci_error_handlers tg3_err_handler
= {
16209 .error_detected
= tg3_io_error_detected
,
16210 .slot_reset
= tg3_io_slot_reset
,
16211 .resume
= tg3_io_resume
16214 static struct pci_driver tg3_driver
= {
16215 .name
= DRV_MODULE_NAME
,
16216 .id_table
= tg3_pci_tbl
,
16217 .probe
= tg3_init_one
,
16218 .remove
= __devexit_p(tg3_remove_one
),
16219 .err_handler
= &tg3_err_handler
,
16220 .driver
.pm
= TG3_PM_OPS
,
16223 static int __init
tg3_init(void)
16225 return pci_register_driver(&tg3_driver
);
16228 static void __exit
tg3_cleanup(void)
16230 pci_unregister_driver(&tg3_driver
);
16233 module_init(tg3_init
);
16234 module_exit(tg3_cleanup
);