2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 #define TG3_DMA_BYTE_ENAB 64
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version
[] __devinitdata
=
213 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION
);
219 MODULE_FIRMWARE(FIRMWARE_TG3
);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
223 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug
, int, 0);
225 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
314 static const struct {
315 const char string
[ETH_GSTRING_LEN
];
316 } ethtool_stats_keys
[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string
[ETH_GSTRING_LEN
];
402 } ethtool_test_keys
[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
418 writel(val
, tp
->regs
+ off
);
421 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
423 return readl(tp
->regs
+ off
);
426 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
428 writel(val
, tp
->aperegs
+ off
);
431 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
433 return readl(tp
->aperegs
+ off
);
436 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
440 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
441 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
443 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
446 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
448 writel(val
, tp
->regs
+ off
);
449 readl(tp
->regs
+ off
);
452 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
457 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
458 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
459 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
460 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
464 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
468 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
469 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
470 TG3_64BIT_REG_LOW
, val
);
473 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
475 TG3_64BIT_REG_LOW
, val
);
479 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
480 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
482 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
490 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
494 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
499 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
501 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
502 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
513 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
514 /* Non-posted methods */
515 tp
->write32(tp
, off
, val
);
518 tg3_write32(tp
, off
, val
);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
532 tp
->write32_mbox(tp
, off
, val
);
533 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
534 tp
->read32_mbox(tp
, off
);
537 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
539 void __iomem
*mbox
= tp
->regs
+ off
;
541 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
543 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
547 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
549 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
552 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
554 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
572 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
573 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
576 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
577 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
578 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
579 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
585 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
590 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
593 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
597 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
598 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
603 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
604 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
605 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
606 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
612 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
617 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
620 static void tg3_ape_lock_init(struct tg3
*tp
)
625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
626 regbase
= TG3_APE_LOCK_GRANT
;
628 regbase
= TG3_APE_PER_LOCK_GRANT
;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
633 case TG3_APE_LOCK_PHY0
:
634 case TG3_APE_LOCK_PHY1
:
635 case TG3_APE_LOCK_PHY2
:
636 case TG3_APE_LOCK_PHY3
:
637 bit
= APE_LOCK_GRANT_DRIVER
;
641 bit
= APE_LOCK_GRANT_DRIVER
;
643 bit
= 1 << tp
->pci_fn
;
645 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
650 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
654 u32 status
, req
, gnt
, bit
;
656 if (!tg3_flag(tp
, ENABLE_APE
))
660 case TG3_APE_LOCK_GPIO
:
661 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
663 case TG3_APE_LOCK_GRC
:
664 case TG3_APE_LOCK_MEM
:
666 bit
= APE_LOCK_REQ_DRIVER
;
668 bit
= 1 << tp
->pci_fn
;
674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
675 req
= TG3_APE_LOCK_REQ
;
676 gnt
= TG3_APE_LOCK_GRANT
;
678 req
= TG3_APE_PER_LOCK_REQ
;
679 gnt
= TG3_APE_PER_LOCK_GRANT
;
684 tg3_ape_write32(tp
, req
+ off
, bit
);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i
= 0; i
< 100; i
++) {
688 status
= tg3_ape_read32(tp
, gnt
+ off
);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp
, gnt
+ off
, bit
);
703 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
707 if (!tg3_flag(tp
, ENABLE_APE
))
711 case TG3_APE_LOCK_GPIO
:
712 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
714 case TG3_APE_LOCK_GRC
:
715 case TG3_APE_LOCK_MEM
:
717 bit
= APE_LOCK_GRANT_DRIVER
;
719 bit
= 1 << tp
->pci_fn
;
725 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
726 gnt
= TG3_APE_LOCK_GRANT
;
728 gnt
= TG3_APE_PER_LOCK_GRANT
;
730 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
733 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp
, APE_HAS_NCSI
))
742 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
743 if (apedata
!= APE_SEG_SIG_MAGIC
)
746 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
747 if (!(apedata
& APE_FW_STATUS_READY
))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i
= 0; i
< 10; i
++) {
752 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
755 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
757 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
758 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
759 event
| APE_EVENT_STATUS_EVENT_PENDING
);
761 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
763 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
769 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
770 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
773 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case RESET_KIND_INIT
:
783 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
784 APE_HOST_SEG_SIG_MAGIC
);
785 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
786 APE_HOST_SEG_LEN_MAGIC
);
787 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
788 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
789 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
791 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
792 APE_HOST_BEHAV_NO_PHYLOCK
);
793 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
794 TG3_APE_HOST_DRVR_STATE_START
);
796 event
= APE_EVENT_STATUS_STATE_START
;
798 case RESET_KIND_SHUTDOWN
:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
806 if (device_may_wakeup(&tp
->pdev
->dev
) &&
807 tg3_flag(tp
, WOL_ENABLE
)) {
808 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
809 TG3_APE_HOST_WOL_SPEED_AUTO
);
810 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
812 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
814 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
816 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
818 case RESET_KIND_SUSPEND
:
819 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
825 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
827 tg3_ape_send_event(tp
, event
);
830 static void tg3_disable_ints(struct tg3
*tp
)
834 tw32(TG3PCI_MISC_HOST_CTRL
,
835 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
836 for (i
= 0; i
< tp
->irq_max
; i
++)
837 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
840 static void tg3_enable_ints(struct tg3
*tp
)
847 tw32(TG3PCI_MISC_HOST_CTRL
,
848 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
850 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
851 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
852 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
854 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
855 if (tg3_flag(tp
, 1SHOT_MSI
))
856 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
858 tp
->coal_now
|= tnapi
->coal_now
;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
863 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
864 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
866 tw32(HOSTCC_MODE
, tp
->coal_now
);
868 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
871 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
873 struct tg3
*tp
= tnapi
->tp
;
874 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
875 unsigned int work_exists
= 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
879 if (sblk
->status
& SD_STATUS_LINK_CHG
)
883 /* check for TX work to do */
884 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
887 /* check for RX work to do */
888 if (tnapi
->rx_rcb_prod_idx
&&
889 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
896 * similar to tg3_enable_ints, but it accurately determines whether there
897 * is new work pending and can return without flushing the PIO write
898 * which reenables interrupts
900 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
902 struct tg3
*tp
= tnapi
->tp
;
904 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
907 /* When doing tagged status, this work check is unnecessary.
908 * The last_tag we write above tells the chip which piece of
909 * work we've completed.
911 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
912 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
913 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
916 static void tg3_switch_clocks(struct tg3
*tp
)
921 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
924 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
926 orig_clock_ctrl
= clock_ctrl
;
927 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
928 CLOCK_CTRL_CLKRUN_OENABLE
|
930 tp
->pci_clock_ctrl
= clock_ctrl
;
932 if (tg3_flag(tp
, 5705_PLUS
)) {
933 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
935 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
937 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
938 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
940 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
943 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
946 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
949 #define PHY_BUSY_LOOPS 5000
951 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
957 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
959 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
965 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
966 MI_COM_PHY_ADDR_MASK
);
967 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
968 MI_COM_REG_ADDR_MASK
);
969 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
971 tw32_f(MAC_MI_COM
, frame_val
);
973 loops
= PHY_BUSY_LOOPS
;
976 frame_val
= tr32(MAC_MI_COM
);
978 if ((frame_val
& MI_COM_BUSY
) == 0) {
980 frame_val
= tr32(MAC_MI_COM
);
988 *val
= frame_val
& MI_COM_DATA_MASK
;
992 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
993 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1000 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1006 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1007 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1010 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1012 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1016 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1017 MI_COM_PHY_ADDR_MASK
);
1018 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1019 MI_COM_REG_ADDR_MASK
);
1020 frame_val
|= (val
& MI_COM_DATA_MASK
);
1021 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1023 tw32_f(MAC_MI_COM
, frame_val
);
1025 loops
= PHY_BUSY_LOOPS
;
1026 while (loops
!= 0) {
1028 frame_val
= tr32(MAC_MI_COM
);
1029 if ((frame_val
& MI_COM_BUSY
) == 0) {
1031 frame_val
= tr32(MAC_MI_COM
);
1041 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1042 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1049 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1053 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1057 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1061 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1062 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1066 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1072 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1076 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1080 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1084 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1085 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1089 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1095 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1099 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1101 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1106 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1110 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1112 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1117 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1121 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1122 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1123 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1125 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1130 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1132 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1133 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1135 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1138 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1141 MII_TG3_AUXCTL_ACTL_TX_6DB)
1143 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1144 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1145 MII_TG3_AUXCTL_ACTL_TX_6DB);
1147 static int tg3_bmcr_reset(struct tg3
*tp
)
1152 /* OK, reset it, and poll the BMCR_RESET bit until it
1153 * clears or we time out.
1155 phy_control
= BMCR_RESET
;
1156 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1162 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1166 if ((phy_control
& BMCR_RESET
) == 0) {
1178 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1180 struct tg3
*tp
= bp
->priv
;
1183 spin_lock_bh(&tp
->lock
);
1185 if (tg3_readphy(tp
, reg
, &val
))
1188 spin_unlock_bh(&tp
->lock
);
1193 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1195 struct tg3
*tp
= bp
->priv
;
1198 spin_lock_bh(&tp
->lock
);
1200 if (tg3_writephy(tp
, reg
, val
))
1203 spin_unlock_bh(&tp
->lock
);
1208 static int tg3_mdio_reset(struct mii_bus
*bp
)
1213 static void tg3_mdio_config_5785(struct tg3
*tp
)
1216 struct phy_device
*phydev
;
1218 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1219 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1220 case PHY_ID_BCM50610
:
1221 case PHY_ID_BCM50610M
:
1222 val
= MAC_PHYCFG2_50610_LED_MODES
;
1224 case PHY_ID_BCMAC131
:
1225 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1227 case PHY_ID_RTL8211C
:
1228 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1230 case PHY_ID_RTL8201E
:
1231 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1237 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1238 tw32(MAC_PHYCFG2
, val
);
1240 val
= tr32(MAC_PHYCFG1
);
1241 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1242 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1243 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1244 tw32(MAC_PHYCFG1
, val
);
1249 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1250 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1251 MAC_PHYCFG2_FMODE_MASK_MASK
|
1252 MAC_PHYCFG2_GMODE_MASK_MASK
|
1253 MAC_PHYCFG2_ACT_MASK_MASK
|
1254 MAC_PHYCFG2_QUAL_MASK_MASK
|
1255 MAC_PHYCFG2_INBAND_ENABLE
;
1257 tw32(MAC_PHYCFG2
, val
);
1259 val
= tr32(MAC_PHYCFG1
);
1260 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1261 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1262 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1263 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1264 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1265 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1266 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1268 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1269 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1270 tw32(MAC_PHYCFG1
, val
);
1272 val
= tr32(MAC_EXT_RGMII_MODE
);
1273 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1274 MAC_RGMII_MODE_RX_QUALITY
|
1275 MAC_RGMII_MODE_RX_ACTIVITY
|
1276 MAC_RGMII_MODE_RX_ENG_DET
|
1277 MAC_RGMII_MODE_TX_ENABLE
|
1278 MAC_RGMII_MODE_TX_LOWPWR
|
1279 MAC_RGMII_MODE_TX_RESET
);
1280 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1281 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1282 val
|= MAC_RGMII_MODE_RX_INT_B
|
1283 MAC_RGMII_MODE_RX_QUALITY
|
1284 MAC_RGMII_MODE_RX_ACTIVITY
|
1285 MAC_RGMII_MODE_RX_ENG_DET
;
1286 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1287 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1288 MAC_RGMII_MODE_TX_LOWPWR
|
1289 MAC_RGMII_MODE_TX_RESET
;
1291 tw32(MAC_EXT_RGMII_MODE
, val
);
1294 static void tg3_mdio_start(struct tg3
*tp
)
1296 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1297 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1300 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1301 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1302 tg3_mdio_config_5785(tp
);
1305 static int tg3_mdio_init(struct tg3
*tp
)
1309 struct phy_device
*phydev
;
1311 if (tg3_flag(tp
, 5717_PLUS
)) {
1314 tp
->phy_addr
= tp
->pci_fn
+ 1;
1316 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1317 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1319 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1320 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1324 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1328 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1331 tp
->mdio_bus
= mdiobus_alloc();
1332 if (tp
->mdio_bus
== NULL
)
1335 tp
->mdio_bus
->name
= "tg3 mdio bus";
1336 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1337 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1338 tp
->mdio_bus
->priv
= tp
;
1339 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1340 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1341 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1342 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1343 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1344 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1346 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1347 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1349 /* The bus registration will look for all the PHYs on the mdio bus.
1350 * Unfortunately, it does not ensure the PHY is powered up before
1351 * accessing the PHY ID registers. A chip reset is the
1352 * quickest way to bring the device back to an operational state..
1354 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1357 i
= mdiobus_register(tp
->mdio_bus
);
1359 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1360 mdiobus_free(tp
->mdio_bus
);
1364 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1366 if (!phydev
|| !phydev
->drv
) {
1367 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1368 mdiobus_unregister(tp
->mdio_bus
);
1369 mdiobus_free(tp
->mdio_bus
);
1373 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1374 case PHY_ID_BCM57780
:
1375 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1376 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1378 case PHY_ID_BCM50610
:
1379 case PHY_ID_BCM50610M
:
1380 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1381 PHY_BRCM_RX_REFCLK_UNUSED
|
1382 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1383 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1384 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1385 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1386 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1387 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1388 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1389 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1391 case PHY_ID_RTL8211C
:
1392 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1394 case PHY_ID_RTL8201E
:
1395 case PHY_ID_BCMAC131
:
1396 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1397 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1398 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1402 tg3_flag_set(tp
, MDIOBUS_INITED
);
1404 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1405 tg3_mdio_config_5785(tp
);
1410 static void tg3_mdio_fini(struct tg3
*tp
)
1412 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1413 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1414 mdiobus_unregister(tp
->mdio_bus
);
1415 mdiobus_free(tp
->mdio_bus
);
1419 /* tp->lock is held. */
1420 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1424 val
= tr32(GRC_RX_CPU_EVENT
);
1425 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1426 tw32_f(GRC_RX_CPU_EVENT
, val
);
1428 tp
->last_event_jiffies
= jiffies
;
1431 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1433 /* tp->lock is held. */
1434 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1437 unsigned int delay_cnt
;
1440 /* If enough time has passed, no wait is necessary. */
1441 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1442 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1444 if (time_remain
< 0)
1447 /* Check if we can shorten the wait time. */
1448 delay_cnt
= jiffies_to_usecs(time_remain
);
1449 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1450 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1451 delay_cnt
= (delay_cnt
>> 3) + 1;
1453 for (i
= 0; i
< delay_cnt
; i
++) {
1454 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1460 /* tp->lock is held. */
1461 static void tg3_ump_link_report(struct tg3
*tp
)
1466 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1469 tg3_wait_for_event_ack(tp
);
1471 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1473 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1476 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1478 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1479 val
|= (reg
& 0xffff);
1480 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1483 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1485 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1486 val
|= (reg
& 0xffff);
1487 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1490 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1491 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1493 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1494 val
|= (reg
& 0xffff);
1496 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1498 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1502 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1504 tg3_generate_fw_event(tp
);
1507 /* tp->lock is held. */
1508 static void tg3_stop_fw(struct tg3
*tp
)
1510 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1511 /* Wait for RX cpu to ACK the previous event. */
1512 tg3_wait_for_event_ack(tp
);
1514 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1516 tg3_generate_fw_event(tp
);
1518 /* Wait for RX cpu to ACK this event. */
1519 tg3_wait_for_event_ack(tp
);
1523 /* tp->lock is held. */
1524 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1526 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1527 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1529 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1531 case RESET_KIND_INIT
:
1532 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1536 case RESET_KIND_SHUTDOWN
:
1537 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1541 case RESET_KIND_SUSPEND
:
1542 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1551 if (kind
== RESET_KIND_INIT
||
1552 kind
== RESET_KIND_SUSPEND
)
1553 tg3_ape_driver_state_change(tp
, kind
);
1556 /* tp->lock is held. */
1557 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1559 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1561 case RESET_KIND_INIT
:
1562 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1563 DRV_STATE_START_DONE
);
1566 case RESET_KIND_SHUTDOWN
:
1567 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1568 DRV_STATE_UNLOAD_DONE
);
1576 if (kind
== RESET_KIND_SHUTDOWN
)
1577 tg3_ape_driver_state_change(tp
, kind
);
1580 /* tp->lock is held. */
1581 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1583 if (tg3_flag(tp
, ENABLE_ASF
)) {
1585 case RESET_KIND_INIT
:
1586 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1590 case RESET_KIND_SHUTDOWN
:
1591 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1595 case RESET_KIND_SUSPEND
:
1596 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1606 static int tg3_poll_fw(struct tg3
*tp
)
1611 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1612 /* Wait up to 20ms for init done. */
1613 for (i
= 0; i
< 200; i
++) {
1614 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1621 /* Wait for firmware initialization to complete. */
1622 for (i
= 0; i
< 100000; i
++) {
1623 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1624 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1629 /* Chip might not be fitted with firmware. Some Sun onboard
1630 * parts are configured like that. So don't signal the timeout
1631 * of the above loop as an error, but do report the lack of
1632 * running firmware once.
1634 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1635 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1637 netdev_info(tp
->dev
, "No firmware running\n");
1640 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1641 /* The 57765 A0 needs a little more
1642 * time to do some important work.
1650 static void tg3_link_report(struct tg3
*tp
)
1652 if (!netif_carrier_ok(tp
->dev
)) {
1653 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1654 tg3_ump_link_report(tp
);
1655 } else if (netif_msg_link(tp
)) {
1656 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1657 (tp
->link_config
.active_speed
== SPEED_1000
?
1659 (tp
->link_config
.active_speed
== SPEED_100
?
1661 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1664 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1665 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1667 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1670 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1671 netdev_info(tp
->dev
, "EEE is %s\n",
1672 tp
->setlpicnt
? "enabled" : "disabled");
1674 tg3_ump_link_report(tp
);
1678 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1682 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1683 miireg
= ADVERTISE_1000XPAUSE
;
1684 else if (flow_ctrl
& FLOW_CTRL_TX
)
1685 miireg
= ADVERTISE_1000XPSE_ASYM
;
1686 else if (flow_ctrl
& FLOW_CTRL_RX
)
1687 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1694 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1698 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1699 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1700 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1701 if (lcladv
& ADVERTISE_1000XPAUSE
)
1703 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1710 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1714 u32 old_rx_mode
= tp
->rx_mode
;
1715 u32 old_tx_mode
= tp
->tx_mode
;
1717 if (tg3_flag(tp
, USE_PHYLIB
))
1718 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1720 autoneg
= tp
->link_config
.autoneg
;
1722 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1723 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1724 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1726 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1728 flowctrl
= tp
->link_config
.flowctrl
;
1730 tp
->link_config
.active_flowctrl
= flowctrl
;
1732 if (flowctrl
& FLOW_CTRL_RX
)
1733 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1735 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1737 if (old_rx_mode
!= tp
->rx_mode
)
1738 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1740 if (flowctrl
& FLOW_CTRL_TX
)
1741 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1743 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1745 if (old_tx_mode
!= tp
->tx_mode
)
1746 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1749 static void tg3_adjust_link(struct net_device
*dev
)
1751 u8 oldflowctrl
, linkmesg
= 0;
1752 u32 mac_mode
, lcl_adv
, rmt_adv
;
1753 struct tg3
*tp
= netdev_priv(dev
);
1754 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1756 spin_lock_bh(&tp
->lock
);
1758 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1759 MAC_MODE_HALF_DUPLEX
);
1761 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1767 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1768 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1769 else if (phydev
->speed
== SPEED_1000
||
1770 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1771 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1773 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1775 if (phydev
->duplex
== DUPLEX_HALF
)
1776 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1778 lcl_adv
= mii_advertise_flowctrl(
1779 tp
->link_config
.flowctrl
);
1782 rmt_adv
= LPA_PAUSE_CAP
;
1783 if (phydev
->asym_pause
)
1784 rmt_adv
|= LPA_PAUSE_ASYM
;
1787 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1789 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1791 if (mac_mode
!= tp
->mac_mode
) {
1792 tp
->mac_mode
= mac_mode
;
1793 tw32_f(MAC_MODE
, tp
->mac_mode
);
1797 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1798 if (phydev
->speed
== SPEED_10
)
1800 MAC_MI_STAT_10MBPS_MODE
|
1801 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1803 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1806 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1807 tw32(MAC_TX_LENGTHS
,
1808 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1809 (6 << TX_LENGTHS_IPG_SHIFT
) |
1810 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1812 tw32(MAC_TX_LENGTHS
,
1813 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1814 (6 << TX_LENGTHS_IPG_SHIFT
) |
1815 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1817 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1818 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1819 phydev
->speed
!= tp
->link_config
.active_speed
||
1820 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1821 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1824 tp
->link_config
.active_speed
= phydev
->speed
;
1825 tp
->link_config
.active_duplex
= phydev
->duplex
;
1827 spin_unlock_bh(&tp
->lock
);
1830 tg3_link_report(tp
);
1833 static int tg3_phy_init(struct tg3
*tp
)
1835 struct phy_device
*phydev
;
1837 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1840 /* Bring the PHY back to a known state. */
1843 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1845 /* Attach the MAC to the PHY. */
1846 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1847 phydev
->dev_flags
, phydev
->interface
);
1848 if (IS_ERR(phydev
)) {
1849 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1850 return PTR_ERR(phydev
);
1853 /* Mask with MAC supported features. */
1854 switch (phydev
->interface
) {
1855 case PHY_INTERFACE_MODE_GMII
:
1856 case PHY_INTERFACE_MODE_RGMII
:
1857 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1858 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1860 SUPPORTED_Asym_Pause
);
1864 case PHY_INTERFACE_MODE_MII
:
1865 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1867 SUPPORTED_Asym_Pause
);
1870 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1874 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1876 phydev
->advertising
= phydev
->supported
;
1881 static void tg3_phy_start(struct tg3
*tp
)
1883 struct phy_device
*phydev
;
1885 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1888 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1890 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1891 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1892 phydev
->speed
= tp
->link_config
.orig_speed
;
1893 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1894 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1895 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1900 phy_start_aneg(phydev
);
1903 static void tg3_phy_stop(struct tg3
*tp
)
1905 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1908 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1911 static void tg3_phy_fini(struct tg3
*tp
)
1913 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1914 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1915 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1919 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
1924 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
1927 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
1928 /* Cannot do read-modify-write on 5401 */
1929 err
= tg3_phy_auxctl_write(tp
,
1930 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1931 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
1936 err
= tg3_phy_auxctl_read(tp
,
1937 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1941 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
1942 err
= tg3_phy_auxctl_write(tp
,
1943 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
1949 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1953 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1956 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1957 phytest
| MII_TG3_FET_SHADOW_EN
);
1958 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1960 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1962 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1963 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1965 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1969 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1973 if (!tg3_flag(tp
, 5705_PLUS
) ||
1974 (tg3_flag(tp
, 5717_PLUS
) &&
1975 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1978 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1979 tg3_phy_fet_toggle_apd(tp
, enable
);
1983 reg
= MII_TG3_MISC_SHDW_WREN
|
1984 MII_TG3_MISC_SHDW_SCR5_SEL
|
1985 MII_TG3_MISC_SHDW_SCR5_LPED
|
1986 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1987 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1988 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1989 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1990 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1992 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1995 reg
= MII_TG3_MISC_SHDW_WREN
|
1996 MII_TG3_MISC_SHDW_APD_SEL
|
1997 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1999 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2001 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2004 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2008 if (!tg3_flag(tp
, 5705_PLUS
) ||
2009 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2012 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2015 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2016 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2018 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2019 ephy
| MII_TG3_FET_SHADOW_EN
);
2020 if (!tg3_readphy(tp
, reg
, &phy
)) {
2022 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2024 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2025 tg3_writephy(tp
, reg
, phy
);
2027 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2032 ret
= tg3_phy_auxctl_read(tp
,
2033 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2036 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2038 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2039 tg3_phy_auxctl_write(tp
,
2040 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2045 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2050 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2053 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2055 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2056 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2059 static void tg3_phy_apply_otp(struct tg3
*tp
)
2068 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
2071 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2072 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2073 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2075 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2076 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2077 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2079 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2080 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2081 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2083 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2084 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2086 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2087 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2089 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2090 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2091 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2093 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2096 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2100 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2105 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2106 current_link_up
== 1 &&
2107 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2108 (tp
->link_config
.active_speed
== SPEED_100
||
2109 tp
->link_config
.active_speed
== SPEED_1000
)) {
2112 if (tp
->link_config
.active_speed
== SPEED_1000
)
2113 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2115 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2117 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2119 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2120 TG3_CL45_D7_EEERES_STAT
, &val
);
2122 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2123 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2127 if (!tp
->setlpicnt
) {
2128 if (current_link_up
== 1 &&
2129 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2130 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2131 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2134 val
= tr32(TG3_CPMU_EEE_MODE
);
2135 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2139 static void tg3_phy_eee_enable(struct tg3
*tp
)
2143 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2144 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2145 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2146 tg3_flag(tp
, 57765_CLASS
)) &&
2147 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2148 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2149 MII_TG3_DSP_TAP26_RMRXSTO
;
2150 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2151 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2154 val
= tr32(TG3_CPMU_EEE_MODE
);
2155 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2158 static int tg3_wait_macro_done(struct tg3
*tp
)
2165 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2166 if ((tmp32
& 0x1000) == 0)
2176 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2178 static const u32 test_pat
[4][6] = {
2179 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2180 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2181 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2182 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2186 for (chan
= 0; chan
< 4; chan
++) {
2189 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2190 (chan
* 0x2000) | 0x0200);
2191 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2193 for (i
= 0; i
< 6; i
++)
2194 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2197 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2198 if (tg3_wait_macro_done(tp
)) {
2203 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2204 (chan
* 0x2000) | 0x0200);
2205 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2206 if (tg3_wait_macro_done(tp
)) {
2211 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2212 if (tg3_wait_macro_done(tp
)) {
2217 for (i
= 0; i
< 6; i
+= 2) {
2220 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2221 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2222 tg3_wait_macro_done(tp
)) {
2228 if (low
!= test_pat
[chan
][i
] ||
2229 high
!= test_pat
[chan
][i
+1]) {
2230 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2231 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2232 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2242 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2246 for (chan
= 0; chan
< 4; chan
++) {
2249 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2250 (chan
* 0x2000) | 0x0200);
2251 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2252 for (i
= 0; i
< 6; i
++)
2253 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2254 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2255 if (tg3_wait_macro_done(tp
))
2262 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2264 u32 reg32
, phy9_orig
;
2265 int retries
, do_phy_reset
, err
;
2271 err
= tg3_bmcr_reset(tp
);
2277 /* Disable transmitter and interrupt. */
2278 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2282 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2284 /* Set full-duplex, 1000 mbps. */
2285 tg3_writephy(tp
, MII_BMCR
,
2286 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2288 /* Set to master mode. */
2289 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2292 tg3_writephy(tp
, MII_CTRL1000
,
2293 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2295 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2299 /* Block the PHY control access. */
2300 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2302 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2305 } while (--retries
);
2307 err
= tg3_phy_reset_chanpat(tp
);
2311 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2313 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2314 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2316 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2318 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2320 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2322 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2329 /* This will reset the tigon3 PHY if there is no valid
2330 * link unless the FORCE argument is non-zero.
2332 static int tg3_phy_reset(struct tg3
*tp
)
2337 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2338 val
= tr32(GRC_MISC_CFG
);
2339 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2342 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2343 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2347 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2348 netif_carrier_off(tp
->dev
);
2349 tg3_link_report(tp
);
2352 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2353 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2354 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2355 err
= tg3_phy_reset_5703_4_5(tp
);
2362 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2363 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2364 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2365 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2367 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2370 err
= tg3_bmcr_reset(tp
);
2374 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2375 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2376 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2378 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2381 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2382 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2383 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2384 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2385 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2386 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2388 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2392 if (tg3_flag(tp
, 5717_PLUS
) &&
2393 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2396 tg3_phy_apply_otp(tp
);
2398 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2399 tg3_phy_toggle_apd(tp
, true);
2401 tg3_phy_toggle_apd(tp
, false);
2404 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2405 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2406 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2407 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2408 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2411 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2412 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2413 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2416 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2417 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2418 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2419 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2420 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2421 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2423 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2424 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2425 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2426 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2427 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2428 tg3_writephy(tp
, MII_TG3_TEST1
,
2429 MII_TG3_TEST1_TRIM_EN
| 0x4);
2431 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2433 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2437 /* Set Extended packet length bit (bit 14) on all chips that */
2438 /* support jumbo frames */
2439 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2440 /* Cannot do read-modify-write on 5401 */
2441 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2442 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2443 /* Set bit 14 with read-modify-write to preserve other bits */
2444 err
= tg3_phy_auxctl_read(tp
,
2445 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2447 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2448 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2451 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2452 * jumbo frames transmission.
2454 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2455 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2456 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2457 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2460 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2461 /* adjust output voltage */
2462 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2465 tg3_phy_toggle_automdix(tp
, 1);
2466 tg3_phy_set_wirespeed(tp
);
2470 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2471 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2472 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2473 TG3_GPIO_MSG_NEED_VAUX)
2474 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2475 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2476 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2477 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2478 (TG3_GPIO_MSG_DRVR_PRES << 12))
2480 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2481 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2482 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2483 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2484 (TG3_GPIO_MSG_NEED_VAUX << 12))
2486 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2490 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2491 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2492 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2494 status
= tr32(TG3_CPMU_DRV_STATUS
);
2496 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2497 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2498 status
|= (newstat
<< shift
);
2500 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2501 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2502 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2504 tw32(TG3_CPMU_DRV_STATUS
, status
);
2506 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2509 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2511 if (!tg3_flag(tp
, IS_NIC
))
2514 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2515 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2516 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2517 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2520 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2522 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2523 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2525 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2527 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2528 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2534 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2538 if (!tg3_flag(tp
, IS_NIC
) ||
2539 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2540 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2543 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2545 tw32_wait_f(GRC_LOCAL_CTRL
,
2546 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2547 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2549 tw32_wait_f(GRC_LOCAL_CTRL
,
2551 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2553 tw32_wait_f(GRC_LOCAL_CTRL
,
2554 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2555 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2558 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2560 if (!tg3_flag(tp
, IS_NIC
))
2563 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2564 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2565 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2566 (GRC_LCLCTRL_GPIO_OE0
|
2567 GRC_LCLCTRL_GPIO_OE1
|
2568 GRC_LCLCTRL_GPIO_OE2
|
2569 GRC_LCLCTRL_GPIO_OUTPUT0
|
2570 GRC_LCLCTRL_GPIO_OUTPUT1
),
2571 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2572 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2573 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2574 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2575 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2576 GRC_LCLCTRL_GPIO_OE1
|
2577 GRC_LCLCTRL_GPIO_OE2
|
2578 GRC_LCLCTRL_GPIO_OUTPUT0
|
2579 GRC_LCLCTRL_GPIO_OUTPUT1
|
2581 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2582 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2584 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2585 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2586 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2588 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2589 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2590 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2593 u32 grc_local_ctrl
= 0;
2595 /* Workaround to prevent overdrawing Amps. */
2596 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2597 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2598 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2600 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2603 /* On 5753 and variants, GPIO2 cannot be used. */
2604 no_gpio2
= tp
->nic_sram_data_cfg
&
2605 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2607 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2608 GRC_LCLCTRL_GPIO_OE1
|
2609 GRC_LCLCTRL_GPIO_OE2
|
2610 GRC_LCLCTRL_GPIO_OUTPUT1
|
2611 GRC_LCLCTRL_GPIO_OUTPUT2
;
2613 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2614 GRC_LCLCTRL_GPIO_OUTPUT2
);
2616 tw32_wait_f(GRC_LOCAL_CTRL
,
2617 tp
->grc_local_ctrl
| grc_local_ctrl
,
2618 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2620 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2622 tw32_wait_f(GRC_LOCAL_CTRL
,
2623 tp
->grc_local_ctrl
| grc_local_ctrl
,
2624 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2627 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2628 tw32_wait_f(GRC_LOCAL_CTRL
,
2629 tp
->grc_local_ctrl
| grc_local_ctrl
,
2630 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2635 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2639 /* Serialize power state transitions */
2640 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2643 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2644 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2646 msg
= tg3_set_function_status(tp
, msg
);
2648 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2651 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2652 tg3_pwrsrc_switch_to_vaux(tp
);
2654 tg3_pwrsrc_die_with_vmain(tp
);
2657 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2660 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2662 bool need_vaux
= false;
2664 /* The GPIOs do something completely different on 57765. */
2665 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2668 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2669 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2670 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2671 tg3_frob_aux_power_5717(tp
, include_wol
?
2672 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2676 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2677 struct net_device
*dev_peer
;
2679 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2681 /* remove_one() may have been run on the peer. */
2683 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2685 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2688 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2689 tg3_flag(tp_peer
, ENABLE_ASF
))
2694 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2695 tg3_flag(tp
, ENABLE_ASF
))
2699 tg3_pwrsrc_switch_to_vaux(tp
);
2701 tg3_pwrsrc_die_with_vmain(tp
);
2704 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2706 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2708 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2709 if (speed
!= SPEED_10
)
2711 } else if (speed
== SPEED_10
)
2717 static int tg3_setup_phy(struct tg3
*, int);
2718 static int tg3_halt_cpu(struct tg3
*, u32
);
2720 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2724 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2725 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2726 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2727 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2730 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2731 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2732 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2737 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2739 val
= tr32(GRC_MISC_CFG
);
2740 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2743 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2745 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2748 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2749 tg3_writephy(tp
, MII_BMCR
,
2750 BMCR_ANENABLE
| BMCR_ANRESTART
);
2752 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2753 phytest
| MII_TG3_FET_SHADOW_EN
);
2754 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2755 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2757 MII_TG3_FET_SHDW_AUXMODE4
,
2760 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2763 } else if (do_low_power
) {
2764 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2765 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2767 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2768 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2769 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2770 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2773 /* The PHY should not be powered down on some chips because
2776 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2777 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2778 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2779 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2780 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
&&
2784 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2785 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2786 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2787 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2788 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2789 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2792 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3
*tp
)
2798 if (tg3_flag(tp
, NVRAM
)) {
2801 if (tp
->nvram_lock_cnt
== 0) {
2802 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2803 for (i
= 0; i
< 8000; i
++) {
2804 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2809 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2813 tp
->nvram_lock_cnt
++;
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3
*tp
)
2821 if (tg3_flag(tp
, NVRAM
)) {
2822 if (tp
->nvram_lock_cnt
> 0)
2823 tp
->nvram_lock_cnt
--;
2824 if (tp
->nvram_lock_cnt
== 0)
2825 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3
*tp
)
2832 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2833 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2835 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3
*tp
)
2842 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2843 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2845 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2849 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2850 u32 offset
, u32
*val
)
2855 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2858 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2859 EEPROM_ADDR_DEVID_MASK
|
2861 tw32(GRC_EEPROM_ADDR
,
2863 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2864 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2865 EEPROM_ADDR_ADDR_MASK
) |
2866 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2868 for (i
= 0; i
< 1000; i
++) {
2869 tmp
= tr32(GRC_EEPROM_ADDR
);
2871 if (tmp
& EEPROM_ADDR_COMPLETE
)
2875 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2878 tmp
= tr32(GRC_EEPROM_DATA
);
2881 * The data will always be opposite the native endian
2882 * format. Perform a blind byteswap to compensate.
2889 #define NVRAM_CMD_TIMEOUT 10000
2891 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2895 tw32(NVRAM_CMD
, nvram_cmd
);
2896 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2898 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2904 if (i
== NVRAM_CMD_TIMEOUT
)
2910 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2912 if (tg3_flag(tp
, NVRAM
) &&
2913 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2914 tg3_flag(tp
, FLASH
) &&
2915 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2916 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2918 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2919 ATMEL_AT45DB0X1B_PAGE_POS
) +
2920 (addr
% tp
->nvram_pagesize
);
2925 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2927 if (tg3_flag(tp
, NVRAM
) &&
2928 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2929 tg3_flag(tp
, FLASH
) &&
2930 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2931 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2933 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2934 tp
->nvram_pagesize
) +
2935 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941 * the byteswapping settings for all other register accesses.
2942 * tg3 devices are BE devices, so on a BE machine, the data
2943 * returned will be exactly as it is seen in NVRAM. On a LE
2944 * machine, the 32-bit value will be byteswapped.
2946 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2950 if (!tg3_flag(tp
, NVRAM
))
2951 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2953 offset
= tg3_nvram_phys_addr(tp
, offset
);
2955 if (offset
> NVRAM_ADDR_MSK
)
2958 ret
= tg3_nvram_lock(tp
);
2962 tg3_enable_nvram_access(tp
);
2964 tw32(NVRAM_ADDR
, offset
);
2965 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2966 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2969 *val
= tr32(NVRAM_RDDATA
);
2971 tg3_disable_nvram_access(tp
);
2973 tg3_nvram_unlock(tp
);
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2982 int res
= tg3_nvram_read(tp
, offset
, &v
);
2984 *val
= cpu_to_be32(v
);
2988 #define RX_CPU_SCRATCH_BASE 0x30000
2989 #define RX_CPU_SCRATCH_SIZE 0x04000
2990 #define TX_CPU_SCRATCH_BASE 0x34000
2991 #define TX_CPU_SCRATCH_SIZE 0x04000
2993 /* tp->lock is held. */
2994 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
2998 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3000 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3001 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3003 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3006 if (offset
== RX_CPU_BASE
) {
3007 for (i
= 0; i
< 10000; i
++) {
3008 tw32(offset
+ CPU_STATE
, 0xffffffff);
3009 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3010 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3014 tw32(offset
+ CPU_STATE
, 0xffffffff);
3015 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3018 for (i
= 0; i
< 10000; i
++) {
3019 tw32(offset
+ CPU_STATE
, 0xffffffff);
3020 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3021 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3027 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3028 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3032 /* Clear firmware's nvram arbitration. */
3033 if (tg3_flag(tp
, NVRAM
))
3034 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3039 unsigned int fw_base
;
3040 unsigned int fw_len
;
3041 const __be32
*fw_data
;
3044 /* tp->lock is held. */
3045 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3046 u32 cpu_scratch_base
, int cpu_scratch_size
,
3047 struct fw_info
*info
)
3049 int err
, lock_err
, i
;
3050 void (*write_op
)(struct tg3
*, u32
, u32
);
3052 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3054 "%s: Trying to load TX cpu firmware which is 5705\n",
3059 if (tg3_flag(tp
, 5705_PLUS
))
3060 write_op
= tg3_write_mem
;
3062 write_op
= tg3_write_indirect_reg32
;
3064 /* It is possible that bootcode is still loading at this point.
3065 * Get the nvram lock first before halting the cpu.
3067 lock_err
= tg3_nvram_lock(tp
);
3068 err
= tg3_halt_cpu(tp
, cpu_base
);
3070 tg3_nvram_unlock(tp
);
3074 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3075 write_op(tp
, cpu_scratch_base
+ i
, 0);
3076 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3077 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3078 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3079 write_op(tp
, (cpu_scratch_base
+
3080 (info
->fw_base
& 0xffff) +
3082 be32_to_cpu(info
->fw_data
[i
]));
3090 /* tp->lock is held. */
3091 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3093 struct fw_info info
;
3094 const __be32
*fw_data
;
3097 fw_data
= (void *)tp
->fw
->data
;
3099 /* Firmware blob starts with version numbers, followed by
3100 start address and length. We are setting complete length.
3101 length = end_address_of_bss - start_address_of_text.
3102 Remainder is the blob to be loaded contiguously
3103 from start address. */
3105 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3106 info
.fw_len
= tp
->fw
->size
- 12;
3107 info
.fw_data
= &fw_data
[3];
3109 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3110 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3115 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3116 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3121 /* Now startup only the RX cpu. */
3122 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3123 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3125 for (i
= 0; i
< 5; i
++) {
3126 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3128 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3129 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3130 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3134 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3135 "should be %08x\n", __func__
,
3136 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3139 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3140 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3145 /* tp->lock is held. */
3146 static int tg3_load_tso_firmware(struct tg3
*tp
)
3148 struct fw_info info
;
3149 const __be32
*fw_data
;
3150 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3153 if (tg3_flag(tp
, HW_TSO_1
) ||
3154 tg3_flag(tp
, HW_TSO_2
) ||
3155 tg3_flag(tp
, HW_TSO_3
))
3158 fw_data
= (void *)tp
->fw
->data
;
3160 /* Firmware blob starts with version numbers, followed by
3161 start address and length. We are setting complete length.
3162 length = end_address_of_bss - start_address_of_text.
3163 Remainder is the blob to be loaded contiguously
3164 from start address. */
3166 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3167 cpu_scratch_size
= tp
->fw_len
;
3168 info
.fw_len
= tp
->fw
->size
- 12;
3169 info
.fw_data
= &fw_data
[3];
3171 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3172 cpu_base
= RX_CPU_BASE
;
3173 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3175 cpu_base
= TX_CPU_BASE
;
3176 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3177 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3180 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3181 cpu_scratch_base
, cpu_scratch_size
,
3186 /* Now startup the cpu. */
3187 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3188 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3190 for (i
= 0; i
< 5; i
++) {
3191 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3193 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3194 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3195 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3200 "%s fails to set CPU PC, is %08x should be %08x\n",
3201 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3204 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3205 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3210 /* tp->lock is held. */
3211 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3213 u32 addr_high
, addr_low
;
3216 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3217 tp
->dev
->dev_addr
[1]);
3218 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3219 (tp
->dev
->dev_addr
[3] << 16) |
3220 (tp
->dev
->dev_addr
[4] << 8) |
3221 (tp
->dev
->dev_addr
[5] << 0));
3222 for (i
= 0; i
< 4; i
++) {
3223 if (i
== 1 && skip_mac_1
)
3225 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3226 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3229 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3230 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3231 for (i
= 0; i
< 12; i
++) {
3232 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3233 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3237 addr_high
= (tp
->dev
->dev_addr
[0] +
3238 tp
->dev
->dev_addr
[1] +
3239 tp
->dev
->dev_addr
[2] +
3240 tp
->dev
->dev_addr
[3] +
3241 tp
->dev
->dev_addr
[4] +
3242 tp
->dev
->dev_addr
[5]) &
3243 TX_BACKOFF_SEED_MASK
;
3244 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3247 static void tg3_enable_register_access(struct tg3
*tp
)
3250 * Make sure register accesses (indirect or otherwise) will function
3253 pci_write_config_dword(tp
->pdev
,
3254 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3257 static int tg3_power_up(struct tg3
*tp
)
3261 tg3_enable_register_access(tp
);
3263 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3265 /* Switch out of Vaux if it is a NIC */
3266 tg3_pwrsrc_switch_to_vmain(tp
);
3268 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3274 static int tg3_power_down_prepare(struct tg3
*tp
)
3277 bool device_should_wake
, do_low_power
;
3279 tg3_enable_register_access(tp
);
3281 /* Restore the CLKREQ setting. */
3282 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3285 pci_read_config_word(tp
->pdev
,
3286 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3288 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
3289 pci_write_config_word(tp
->pdev
,
3290 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3294 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3295 tw32(TG3PCI_MISC_HOST_CTRL
,
3296 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3298 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3299 tg3_flag(tp
, WOL_ENABLE
);
3301 if (tg3_flag(tp
, USE_PHYLIB
)) {
3302 do_low_power
= false;
3303 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3304 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3305 struct phy_device
*phydev
;
3306 u32 phyid
, advertising
;
3308 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3310 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3312 tp
->link_config
.orig_speed
= phydev
->speed
;
3313 tp
->link_config
.orig_duplex
= phydev
->duplex
;
3314 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
3315 tp
->link_config
.orig_advertising
= phydev
->advertising
;
3317 advertising
= ADVERTISED_TP
|
3319 ADVERTISED_Autoneg
|
3320 ADVERTISED_10baseT_Half
;
3322 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3323 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3325 ADVERTISED_100baseT_Half
|
3326 ADVERTISED_100baseT_Full
|
3327 ADVERTISED_10baseT_Full
;
3329 advertising
|= ADVERTISED_10baseT_Full
;
3332 phydev
->advertising
= advertising
;
3334 phy_start_aneg(phydev
);
3336 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3337 if (phyid
!= PHY_ID_BCMAC131
) {
3338 phyid
&= PHY_BCM_OUI_MASK
;
3339 if (phyid
== PHY_BCM_OUI_1
||
3340 phyid
== PHY_BCM_OUI_2
||
3341 phyid
== PHY_BCM_OUI_3
)
3342 do_low_power
= true;
3346 do_low_power
= true;
3348 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3349 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3350 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
3351 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
3352 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
3355 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
3356 tp
->link_config
.speed
= SPEED_10
;
3357 tp
->link_config
.duplex
= DUPLEX_HALF
;
3358 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
3359 tg3_setup_phy(tp
, 0);
3363 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3366 val
= tr32(GRC_VCPU_EXT_CTRL
);
3367 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3368 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3372 for (i
= 0; i
< 200; i
++) {
3373 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3374 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3379 if (tg3_flag(tp
, WOL_CAP
))
3380 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3381 WOL_DRV_STATE_SHUTDOWN
|
3385 if (device_should_wake
) {
3388 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3390 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3391 tg3_phy_auxctl_write(tp
,
3392 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3393 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3394 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3395 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3399 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3400 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3402 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3404 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3405 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3407 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3408 SPEED_100
: SPEED_10
;
3409 if (tg3_5700_link_polarity(tp
, speed
))
3410 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3412 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3415 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3418 if (!tg3_flag(tp
, 5750_PLUS
))
3419 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3421 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3422 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3423 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3424 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3426 if (tg3_flag(tp
, ENABLE_APE
))
3427 mac_mode
|= MAC_MODE_APE_TX_EN
|
3428 MAC_MODE_APE_RX_EN
|
3429 MAC_MODE_TDE_ENABLE
;
3431 tw32_f(MAC_MODE
, mac_mode
);
3434 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3438 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3439 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3440 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3443 base_val
= tp
->pci_clock_ctrl
;
3444 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3445 CLOCK_CTRL_TXCLK_DISABLE
);
3447 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3448 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3449 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3450 tg3_flag(tp
, CPMU_PRESENT
) ||
3451 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3453 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3454 u32 newbits1
, newbits2
;
3456 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3457 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3458 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3459 CLOCK_CTRL_TXCLK_DISABLE
|
3461 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3462 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3463 newbits1
= CLOCK_CTRL_625_CORE
;
3464 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3466 newbits1
= CLOCK_CTRL_ALTCLK
;
3467 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3470 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3473 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3476 if (!tg3_flag(tp
, 5705_PLUS
)) {
3479 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3480 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3481 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3482 CLOCK_CTRL_TXCLK_DISABLE
|
3483 CLOCK_CTRL_44MHZ_CORE
);
3485 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3488 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3489 tp
->pci_clock_ctrl
| newbits3
, 40);
3493 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3494 tg3_power_down_phy(tp
, do_low_power
);
3496 tg3_frob_aux_power(tp
, true);
3498 /* Workaround for unstable PLL clock */
3499 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3500 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3501 u32 val
= tr32(0x7d00);
3503 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3505 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3508 err
= tg3_nvram_lock(tp
);
3509 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3511 tg3_nvram_unlock(tp
);
3515 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3520 static void tg3_power_down(struct tg3
*tp
)
3522 tg3_power_down_prepare(tp
);
3524 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3525 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3528 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3530 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3531 case MII_TG3_AUX_STAT_10HALF
:
3533 *duplex
= DUPLEX_HALF
;
3536 case MII_TG3_AUX_STAT_10FULL
:
3538 *duplex
= DUPLEX_FULL
;
3541 case MII_TG3_AUX_STAT_100HALF
:
3543 *duplex
= DUPLEX_HALF
;
3546 case MII_TG3_AUX_STAT_100FULL
:
3548 *duplex
= DUPLEX_FULL
;
3551 case MII_TG3_AUX_STAT_1000HALF
:
3552 *speed
= SPEED_1000
;
3553 *duplex
= DUPLEX_HALF
;
3556 case MII_TG3_AUX_STAT_1000FULL
:
3557 *speed
= SPEED_1000
;
3558 *duplex
= DUPLEX_FULL
;
3562 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3563 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3565 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3569 *speed
= SPEED_INVALID
;
3570 *duplex
= DUPLEX_INVALID
;
3575 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3580 new_adv
= ADVERTISE_CSMA
;
3581 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
3582 new_adv
|= mii_advertise_flowctrl(flowctrl
);
3584 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3588 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3589 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
3591 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3592 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3593 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3595 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3600 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3603 tw32(TG3_CPMU_EEE_MODE
,
3604 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3606 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3611 /* Advertise 100-BaseTX EEE ability */
3612 if (advertise
& ADVERTISED_100baseT_Full
)
3613 val
|= MDIO_AN_EEE_ADV_100TX
;
3614 /* Advertise 1000-BaseT EEE ability */
3615 if (advertise
& ADVERTISED_1000baseT_Full
)
3616 val
|= MDIO_AN_EEE_ADV_1000T
;
3617 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3621 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3623 case ASIC_REV_57765
:
3624 case ASIC_REV_57766
:
3626 /* If we advertised any eee advertisements above... */
3628 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3629 MII_TG3_DSP_TAP26_RMRXSTO
|
3630 MII_TG3_DSP_TAP26_OPCSINPT
;
3631 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3634 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3635 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3636 MII_TG3_DSP_CH34TP2_HIBW01
);
3639 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3648 static void tg3_phy_copper_begin(struct tg3
*tp
)
3653 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3654 new_adv
= ADVERTISED_10baseT_Half
|
3655 ADVERTISED_10baseT_Full
;
3656 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3657 new_adv
|= ADVERTISED_100baseT_Half
|
3658 ADVERTISED_100baseT_Full
;
3660 tg3_phy_autoneg_cfg(tp
, new_adv
,
3661 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3662 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3663 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3664 tp
->link_config
.advertising
&=
3665 ~(ADVERTISED_1000baseT_Half
|
3666 ADVERTISED_1000baseT_Full
);
3668 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3669 tp
->link_config
.flowctrl
);
3671 /* Asking for a specific link mode. */
3672 if (tp
->link_config
.speed
== SPEED_1000
) {
3673 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3674 new_adv
= ADVERTISED_1000baseT_Full
;
3676 new_adv
= ADVERTISED_1000baseT_Half
;
3677 } else if (tp
->link_config
.speed
== SPEED_100
) {
3678 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3679 new_adv
= ADVERTISED_100baseT_Full
;
3681 new_adv
= ADVERTISED_100baseT_Half
;
3683 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3684 new_adv
= ADVERTISED_10baseT_Full
;
3686 new_adv
= ADVERTISED_10baseT_Half
;
3689 tg3_phy_autoneg_cfg(tp
, new_adv
,
3690 tp
->link_config
.flowctrl
);
3693 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3694 tp
->link_config
.speed
!= SPEED_INVALID
) {
3695 u32 bmcr
, orig_bmcr
;
3697 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3698 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3701 switch (tp
->link_config
.speed
) {
3707 bmcr
|= BMCR_SPEED100
;
3711 bmcr
|= BMCR_SPEED1000
;
3715 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3716 bmcr
|= BMCR_FULLDPLX
;
3718 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3719 (bmcr
!= orig_bmcr
)) {
3720 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3721 for (i
= 0; i
< 1500; i
++) {
3725 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3726 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3728 if (!(tmp
& BMSR_LSTATUS
)) {
3733 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3737 tg3_writephy(tp
, MII_BMCR
,
3738 BMCR_ANENABLE
| BMCR_ANRESTART
);
3742 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3746 /* Turn off tap power management. */
3747 /* Set Extended packet length bit */
3748 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3750 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3751 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3752 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3753 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3754 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3761 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
3763 u32 advmsk
, tgtadv
, advertising
;
3765 advertising
= tp
->link_config
.advertising
;
3766 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
3768 advmsk
= ADVERTISE_ALL
;
3769 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3770 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
3771 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3774 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3777 if ((*lcladv
& advmsk
) != tgtadv
)
3780 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3783 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
3785 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3788 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
3789 if (tg3_ctrl
!= tgtadv
)
3796 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
3800 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3803 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
3806 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
3809 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
3812 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
3813 tp
->link_config
.rmt_adv
= lpeth
;
3818 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3820 int current_link_up
;
3822 u32 lcl_adv
, rmt_adv
;
3830 (MAC_STATUS_SYNC_CHANGED
|
3831 MAC_STATUS_CFG_CHANGED
|
3832 MAC_STATUS_MI_COMPLETION
|
3833 MAC_STATUS_LNKSTATE_CHANGED
));
3836 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3838 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3842 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3844 /* Some third-party PHYs need to be reset on link going
3847 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3848 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3849 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3850 netif_carrier_ok(tp
->dev
)) {
3851 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3852 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3853 !(bmsr
& BMSR_LSTATUS
))
3859 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3860 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3861 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3862 !tg3_flag(tp
, INIT_COMPLETE
))
3865 if (!(bmsr
& BMSR_LSTATUS
)) {
3866 err
= tg3_init_5401phy_dsp(tp
);
3870 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3871 for (i
= 0; i
< 1000; i
++) {
3873 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3874 (bmsr
& BMSR_LSTATUS
)) {
3880 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3881 TG3_PHY_REV_BCM5401_B0
&&
3882 !(bmsr
& BMSR_LSTATUS
) &&
3883 tp
->link_config
.active_speed
== SPEED_1000
) {
3884 err
= tg3_phy_reset(tp
);
3886 err
= tg3_init_5401phy_dsp(tp
);
3891 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3892 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3893 /* 5701 {A0,B0} CRC bug workaround */
3894 tg3_writephy(tp
, 0x15, 0x0a75);
3895 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3896 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3897 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3900 /* Clear pending interrupts... */
3901 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3902 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3904 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3905 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3906 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3907 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3909 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3910 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3911 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3912 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3913 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3915 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3918 current_link_up
= 0;
3919 current_speed
= SPEED_INVALID
;
3920 current_duplex
= DUPLEX_INVALID
;
3921 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
3922 tp
->link_config
.rmt_adv
= 0;
3924 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3925 err
= tg3_phy_auxctl_read(tp
,
3926 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3928 if (!err
&& !(val
& (1 << 10))) {
3929 tg3_phy_auxctl_write(tp
,
3930 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3937 for (i
= 0; i
< 100; i
++) {
3938 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3939 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3940 (bmsr
& BMSR_LSTATUS
))
3945 if (bmsr
& BMSR_LSTATUS
) {
3948 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3949 for (i
= 0; i
< 2000; i
++) {
3951 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3956 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3961 for (i
= 0; i
< 200; i
++) {
3962 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3963 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3965 if (bmcr
&& bmcr
!= 0x7fff)
3973 tp
->link_config
.active_speed
= current_speed
;
3974 tp
->link_config
.active_duplex
= current_duplex
;
3976 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3977 if ((bmcr
& BMCR_ANENABLE
) &&
3978 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
3979 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
3980 current_link_up
= 1;
3982 if (!(bmcr
& BMCR_ANENABLE
) &&
3983 tp
->link_config
.speed
== current_speed
&&
3984 tp
->link_config
.duplex
== current_duplex
&&
3985 tp
->link_config
.flowctrl
==
3986 tp
->link_config
.active_flowctrl
) {
3987 current_link_up
= 1;
3991 if (current_link_up
== 1 &&
3992 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3995 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3996 reg
= MII_TG3_FET_GEN_STAT
;
3997 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
3999 reg
= MII_TG3_EXT_STAT
;
4000 bit
= MII_TG3_EXT_STAT_MDIX
;
4003 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4004 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4006 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4011 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4012 tg3_phy_copper_begin(tp
);
4014 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4015 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4016 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4017 current_link_up
= 1;
4020 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4021 if (current_link_up
== 1) {
4022 if (tp
->link_config
.active_speed
== SPEED_100
||
4023 tp
->link_config
.active_speed
== SPEED_10
)
4024 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4026 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4027 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4028 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4030 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4032 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4033 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4034 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4037 if (current_link_up
== 1 &&
4038 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4039 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4041 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4044 /* ??? Without this setting Netgear GA302T PHY does not
4045 * ??? send/receive packets...
4047 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4048 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4049 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4050 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4054 tw32_f(MAC_MODE
, tp
->mac_mode
);
4057 tg3_phy_eee_adjust(tp
, current_link_up
);
4059 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4060 /* Polled via timer. */
4061 tw32_f(MAC_EVENT
, 0);
4063 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4067 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4068 current_link_up
== 1 &&
4069 tp
->link_config
.active_speed
== SPEED_1000
&&
4070 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4073 (MAC_STATUS_SYNC_CHANGED
|
4074 MAC_STATUS_CFG_CHANGED
));
4077 NIC_SRAM_FIRMWARE_MBOX
,
4078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4081 /* Prevent send BD corruption. */
4082 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4083 u16 oldlnkctl
, newlnkctl
;
4085 pci_read_config_word(tp
->pdev
,
4086 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
4088 if (tp
->link_config
.active_speed
== SPEED_100
||
4089 tp
->link_config
.active_speed
== SPEED_10
)
4090 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4092 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
4093 if (newlnkctl
!= oldlnkctl
)
4094 pci_write_config_word(tp
->pdev
,
4095 pci_pcie_cap(tp
->pdev
) +
4096 PCI_EXP_LNKCTL
, newlnkctl
);
4099 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4100 if (current_link_up
)
4101 netif_carrier_on(tp
->dev
);
4103 netif_carrier_off(tp
->dev
);
4104 tg3_link_report(tp
);
4110 struct tg3_fiber_aneginfo
{
4112 #define ANEG_STATE_UNKNOWN 0
4113 #define ANEG_STATE_AN_ENABLE 1
4114 #define ANEG_STATE_RESTART_INIT 2
4115 #define ANEG_STATE_RESTART 3
4116 #define ANEG_STATE_DISABLE_LINK_OK 4
4117 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4118 #define ANEG_STATE_ABILITY_DETECT 6
4119 #define ANEG_STATE_ACK_DETECT_INIT 7
4120 #define ANEG_STATE_ACK_DETECT 8
4121 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4122 #define ANEG_STATE_COMPLETE_ACK 10
4123 #define ANEG_STATE_IDLE_DETECT_INIT 11
4124 #define ANEG_STATE_IDLE_DETECT 12
4125 #define ANEG_STATE_LINK_OK 13
4126 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4127 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4130 #define MR_AN_ENABLE 0x00000001
4131 #define MR_RESTART_AN 0x00000002
4132 #define MR_AN_COMPLETE 0x00000004
4133 #define MR_PAGE_RX 0x00000008
4134 #define MR_NP_LOADED 0x00000010
4135 #define MR_TOGGLE_TX 0x00000020
4136 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4137 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4138 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4139 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4140 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4141 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4142 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4143 #define MR_TOGGLE_RX 0x00002000
4144 #define MR_NP_RX 0x00004000
4146 #define MR_LINK_OK 0x80000000
4148 unsigned long link_time
, cur_time
;
4150 u32 ability_match_cfg
;
4151 int ability_match_count
;
4153 char ability_match
, idle_match
, ack_match
;
4155 u32 txconfig
, rxconfig
;
4156 #define ANEG_CFG_NP 0x00000080
4157 #define ANEG_CFG_ACK 0x00000040
4158 #define ANEG_CFG_RF2 0x00000020
4159 #define ANEG_CFG_RF1 0x00000010
4160 #define ANEG_CFG_PS2 0x00000001
4161 #define ANEG_CFG_PS1 0x00008000
4162 #define ANEG_CFG_HD 0x00004000
4163 #define ANEG_CFG_FD 0x00002000
4164 #define ANEG_CFG_INVAL 0x00001f06
4169 #define ANEG_TIMER_ENAB 2
4170 #define ANEG_FAILED -1
4172 #define ANEG_STATE_SETTLE_TIME 10000
4174 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4175 struct tg3_fiber_aneginfo
*ap
)
4178 unsigned long delta
;
4182 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4186 ap
->ability_match_cfg
= 0;
4187 ap
->ability_match_count
= 0;
4188 ap
->ability_match
= 0;
4194 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4195 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4197 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4198 ap
->ability_match_cfg
= rx_cfg_reg
;
4199 ap
->ability_match
= 0;
4200 ap
->ability_match_count
= 0;
4202 if (++ap
->ability_match_count
> 1) {
4203 ap
->ability_match
= 1;
4204 ap
->ability_match_cfg
= rx_cfg_reg
;
4207 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4215 ap
->ability_match_cfg
= 0;
4216 ap
->ability_match_count
= 0;
4217 ap
->ability_match
= 0;
4223 ap
->rxconfig
= rx_cfg_reg
;
4226 switch (ap
->state
) {
4227 case ANEG_STATE_UNKNOWN
:
4228 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4229 ap
->state
= ANEG_STATE_AN_ENABLE
;
4232 case ANEG_STATE_AN_ENABLE
:
4233 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4234 if (ap
->flags
& MR_AN_ENABLE
) {
4237 ap
->ability_match_cfg
= 0;
4238 ap
->ability_match_count
= 0;
4239 ap
->ability_match
= 0;
4243 ap
->state
= ANEG_STATE_RESTART_INIT
;
4245 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4249 case ANEG_STATE_RESTART_INIT
:
4250 ap
->link_time
= ap
->cur_time
;
4251 ap
->flags
&= ~(MR_NP_LOADED
);
4253 tw32(MAC_TX_AUTO_NEG
, 0);
4254 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4255 tw32_f(MAC_MODE
, tp
->mac_mode
);
4258 ret
= ANEG_TIMER_ENAB
;
4259 ap
->state
= ANEG_STATE_RESTART
;
4262 case ANEG_STATE_RESTART
:
4263 delta
= ap
->cur_time
- ap
->link_time
;
4264 if (delta
> ANEG_STATE_SETTLE_TIME
)
4265 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4267 ret
= ANEG_TIMER_ENAB
;
4270 case ANEG_STATE_DISABLE_LINK_OK
:
4274 case ANEG_STATE_ABILITY_DETECT_INIT
:
4275 ap
->flags
&= ~(MR_TOGGLE_TX
);
4276 ap
->txconfig
= ANEG_CFG_FD
;
4277 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4278 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4279 ap
->txconfig
|= ANEG_CFG_PS1
;
4280 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4281 ap
->txconfig
|= ANEG_CFG_PS2
;
4282 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4283 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4284 tw32_f(MAC_MODE
, tp
->mac_mode
);
4287 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4290 case ANEG_STATE_ABILITY_DETECT
:
4291 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4292 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4295 case ANEG_STATE_ACK_DETECT_INIT
:
4296 ap
->txconfig
|= ANEG_CFG_ACK
;
4297 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4298 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4299 tw32_f(MAC_MODE
, tp
->mac_mode
);
4302 ap
->state
= ANEG_STATE_ACK_DETECT
;
4305 case ANEG_STATE_ACK_DETECT
:
4306 if (ap
->ack_match
!= 0) {
4307 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4308 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4309 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4311 ap
->state
= ANEG_STATE_AN_ENABLE
;
4313 } else if (ap
->ability_match
!= 0 &&
4314 ap
->rxconfig
== 0) {
4315 ap
->state
= ANEG_STATE_AN_ENABLE
;
4319 case ANEG_STATE_COMPLETE_ACK_INIT
:
4320 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4324 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4325 MR_LP_ADV_HALF_DUPLEX
|
4326 MR_LP_ADV_SYM_PAUSE
|
4327 MR_LP_ADV_ASYM_PAUSE
|
4328 MR_LP_ADV_REMOTE_FAULT1
|
4329 MR_LP_ADV_REMOTE_FAULT2
|
4330 MR_LP_ADV_NEXT_PAGE
|
4333 if (ap
->rxconfig
& ANEG_CFG_FD
)
4334 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4335 if (ap
->rxconfig
& ANEG_CFG_HD
)
4336 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4337 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4338 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4339 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4340 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4341 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4342 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4343 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4344 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4345 if (ap
->rxconfig
& ANEG_CFG_NP
)
4346 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4348 ap
->link_time
= ap
->cur_time
;
4350 ap
->flags
^= (MR_TOGGLE_TX
);
4351 if (ap
->rxconfig
& 0x0008)
4352 ap
->flags
|= MR_TOGGLE_RX
;
4353 if (ap
->rxconfig
& ANEG_CFG_NP
)
4354 ap
->flags
|= MR_NP_RX
;
4355 ap
->flags
|= MR_PAGE_RX
;
4357 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4358 ret
= ANEG_TIMER_ENAB
;
4361 case ANEG_STATE_COMPLETE_ACK
:
4362 if (ap
->ability_match
!= 0 &&
4363 ap
->rxconfig
== 0) {
4364 ap
->state
= ANEG_STATE_AN_ENABLE
;
4367 delta
= ap
->cur_time
- ap
->link_time
;
4368 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4369 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4370 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4372 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4373 !(ap
->flags
& MR_NP_RX
)) {
4374 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4382 case ANEG_STATE_IDLE_DETECT_INIT
:
4383 ap
->link_time
= ap
->cur_time
;
4384 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4385 tw32_f(MAC_MODE
, tp
->mac_mode
);
4388 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4389 ret
= ANEG_TIMER_ENAB
;
4392 case ANEG_STATE_IDLE_DETECT
:
4393 if (ap
->ability_match
!= 0 &&
4394 ap
->rxconfig
== 0) {
4395 ap
->state
= ANEG_STATE_AN_ENABLE
;
4398 delta
= ap
->cur_time
- ap
->link_time
;
4399 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4400 /* XXX another gem from the Broadcom driver :( */
4401 ap
->state
= ANEG_STATE_LINK_OK
;
4405 case ANEG_STATE_LINK_OK
:
4406 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4410 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4411 /* ??? unimplemented */
4414 case ANEG_STATE_NEXT_PAGE_WAIT
:
4415 /* ??? unimplemented */
4426 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4429 struct tg3_fiber_aneginfo aninfo
;
4430 int status
= ANEG_FAILED
;
4434 tw32_f(MAC_TX_AUTO_NEG
, 0);
4436 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4437 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4440 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4443 memset(&aninfo
, 0, sizeof(aninfo
));
4444 aninfo
.flags
|= MR_AN_ENABLE
;
4445 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4446 aninfo
.cur_time
= 0;
4448 while (++tick
< 195000) {
4449 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4450 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4456 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4457 tw32_f(MAC_MODE
, tp
->mac_mode
);
4460 *txflags
= aninfo
.txconfig
;
4461 *rxflags
= aninfo
.flags
;
4463 if (status
== ANEG_DONE
&&
4464 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4465 MR_LP_ADV_FULL_DUPLEX
)))
4471 static void tg3_init_bcm8002(struct tg3
*tp
)
4473 u32 mac_status
= tr32(MAC_STATUS
);
4476 /* Reset when initting first time or we have a link. */
4477 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4478 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4481 /* Set PLL lock range. */
4482 tg3_writephy(tp
, 0x16, 0x8007);
4485 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4487 /* Wait for reset to complete. */
4488 /* XXX schedule_timeout() ... */
4489 for (i
= 0; i
< 500; i
++)
4492 /* Config mode; select PMA/Ch 1 regs. */
4493 tg3_writephy(tp
, 0x10, 0x8411);
4495 /* Enable auto-lock and comdet, select txclk for tx. */
4496 tg3_writephy(tp
, 0x11, 0x0a10);
4498 tg3_writephy(tp
, 0x18, 0x00a0);
4499 tg3_writephy(tp
, 0x16, 0x41ff);
4501 /* Assert and deassert POR. */
4502 tg3_writephy(tp
, 0x13, 0x0400);
4504 tg3_writephy(tp
, 0x13, 0x0000);
4506 tg3_writephy(tp
, 0x11, 0x0a50);
4508 tg3_writephy(tp
, 0x11, 0x0a10);
4510 /* Wait for signal to stabilize */
4511 /* XXX schedule_timeout() ... */
4512 for (i
= 0; i
< 15000; i
++)
4515 /* Deselect the channel register so we can read the PHYID
4518 tg3_writephy(tp
, 0x10, 0x8011);
4521 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4524 u32 sg_dig_ctrl
, sg_dig_status
;
4525 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4526 int workaround
, port_a
;
4527 int current_link_up
;
4530 expected_sg_dig_ctrl
= 0;
4533 current_link_up
= 0;
4535 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4536 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4538 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4541 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4542 /* preserve bits 20-23 for voltage regulator */
4543 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4546 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4548 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4549 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4551 u32 val
= serdes_cfg
;
4557 tw32_f(MAC_SERDES_CFG
, val
);
4560 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4562 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4563 tg3_setup_flow_control(tp
, 0, 0);
4564 current_link_up
= 1;
4569 /* Want auto-negotiation. */
4570 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4572 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4573 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4574 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4575 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4576 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4578 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4579 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4580 tp
->serdes_counter
&&
4581 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4582 MAC_STATUS_RCVD_CFG
)) ==
4583 MAC_STATUS_PCS_SYNCED
)) {
4584 tp
->serdes_counter
--;
4585 current_link_up
= 1;
4590 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4591 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4593 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4595 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4596 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4597 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4598 MAC_STATUS_SIGNAL_DET
)) {
4599 sg_dig_status
= tr32(SG_DIG_STATUS
);
4600 mac_status
= tr32(MAC_STATUS
);
4602 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4603 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4604 u32 local_adv
= 0, remote_adv
= 0;
4606 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4607 local_adv
|= ADVERTISE_1000XPAUSE
;
4608 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4609 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4611 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4612 remote_adv
|= LPA_1000XPAUSE
;
4613 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4614 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4616 tp
->link_config
.rmt_adv
=
4617 mii_adv_to_ethtool_adv_x(remote_adv
);
4619 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4620 current_link_up
= 1;
4621 tp
->serdes_counter
= 0;
4622 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4623 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4624 if (tp
->serdes_counter
)
4625 tp
->serdes_counter
--;
4628 u32 val
= serdes_cfg
;
4635 tw32_f(MAC_SERDES_CFG
, val
);
4638 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4641 /* Link parallel detection - link is up */
4642 /* only if we have PCS_SYNC and not */
4643 /* receiving config code words */
4644 mac_status
= tr32(MAC_STATUS
);
4645 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4646 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4647 tg3_setup_flow_control(tp
, 0, 0);
4648 current_link_up
= 1;
4650 TG3_PHYFLG_PARALLEL_DETECT
;
4651 tp
->serdes_counter
=
4652 SERDES_PARALLEL_DET_TIMEOUT
;
4654 goto restart_autoneg
;
4658 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4659 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4663 return current_link_up
;
4666 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4668 int current_link_up
= 0;
4670 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4673 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4674 u32 txflags
, rxflags
;
4677 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4678 u32 local_adv
= 0, remote_adv
= 0;
4680 if (txflags
& ANEG_CFG_PS1
)
4681 local_adv
|= ADVERTISE_1000XPAUSE
;
4682 if (txflags
& ANEG_CFG_PS2
)
4683 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4685 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4686 remote_adv
|= LPA_1000XPAUSE
;
4687 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4688 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4690 tp
->link_config
.rmt_adv
=
4691 mii_adv_to_ethtool_adv_x(remote_adv
);
4693 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4695 current_link_up
= 1;
4697 for (i
= 0; i
< 30; i
++) {
4700 (MAC_STATUS_SYNC_CHANGED
|
4701 MAC_STATUS_CFG_CHANGED
));
4703 if ((tr32(MAC_STATUS
) &
4704 (MAC_STATUS_SYNC_CHANGED
|
4705 MAC_STATUS_CFG_CHANGED
)) == 0)
4709 mac_status
= tr32(MAC_STATUS
);
4710 if (current_link_up
== 0 &&
4711 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4712 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4713 current_link_up
= 1;
4715 tg3_setup_flow_control(tp
, 0, 0);
4717 /* Forcing 1000FD link up. */
4718 current_link_up
= 1;
4720 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4723 tw32_f(MAC_MODE
, tp
->mac_mode
);
4728 return current_link_up
;
4731 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4734 u16 orig_active_speed
;
4735 u8 orig_active_duplex
;
4737 int current_link_up
;
4740 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4741 orig_active_speed
= tp
->link_config
.active_speed
;
4742 orig_active_duplex
= tp
->link_config
.active_duplex
;
4744 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4745 netif_carrier_ok(tp
->dev
) &&
4746 tg3_flag(tp
, INIT_COMPLETE
)) {
4747 mac_status
= tr32(MAC_STATUS
);
4748 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4749 MAC_STATUS_SIGNAL_DET
|
4750 MAC_STATUS_CFG_CHANGED
|
4751 MAC_STATUS_RCVD_CFG
);
4752 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4753 MAC_STATUS_SIGNAL_DET
)) {
4754 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4755 MAC_STATUS_CFG_CHANGED
));
4760 tw32_f(MAC_TX_AUTO_NEG
, 0);
4762 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4763 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4764 tw32_f(MAC_MODE
, tp
->mac_mode
);
4767 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4768 tg3_init_bcm8002(tp
);
4770 /* Enable link change event even when serdes polling. */
4771 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4774 current_link_up
= 0;
4775 tp
->link_config
.rmt_adv
= 0;
4776 mac_status
= tr32(MAC_STATUS
);
4778 if (tg3_flag(tp
, HW_AUTONEG
))
4779 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4781 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4783 tp
->napi
[0].hw_status
->status
=
4784 (SD_STATUS_UPDATED
|
4785 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4787 for (i
= 0; i
< 100; i
++) {
4788 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4789 MAC_STATUS_CFG_CHANGED
));
4791 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4792 MAC_STATUS_CFG_CHANGED
|
4793 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4797 mac_status
= tr32(MAC_STATUS
);
4798 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4799 current_link_up
= 0;
4800 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4801 tp
->serdes_counter
== 0) {
4802 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4803 MAC_MODE_SEND_CONFIGS
));
4805 tw32_f(MAC_MODE
, tp
->mac_mode
);
4809 if (current_link_up
== 1) {
4810 tp
->link_config
.active_speed
= SPEED_1000
;
4811 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4812 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4813 LED_CTRL_LNKLED_OVERRIDE
|
4814 LED_CTRL_1000MBPS_ON
));
4816 tp
->link_config
.active_speed
= SPEED_INVALID
;
4817 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4818 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4819 LED_CTRL_LNKLED_OVERRIDE
|
4820 LED_CTRL_TRAFFIC_OVERRIDE
));
4823 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4824 if (current_link_up
)
4825 netif_carrier_on(tp
->dev
);
4827 netif_carrier_off(tp
->dev
);
4828 tg3_link_report(tp
);
4830 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4831 if (orig_pause_cfg
!= now_pause_cfg
||
4832 orig_active_speed
!= tp
->link_config
.active_speed
||
4833 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4834 tg3_link_report(tp
);
4840 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4842 int current_link_up
, err
= 0;
4846 u32 local_adv
, remote_adv
;
4848 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4849 tw32_f(MAC_MODE
, tp
->mac_mode
);
4855 (MAC_STATUS_SYNC_CHANGED
|
4856 MAC_STATUS_CFG_CHANGED
|
4857 MAC_STATUS_MI_COMPLETION
|
4858 MAC_STATUS_LNKSTATE_CHANGED
));
4864 current_link_up
= 0;
4865 current_speed
= SPEED_INVALID
;
4866 current_duplex
= DUPLEX_INVALID
;
4867 tp
->link_config
.rmt_adv
= 0;
4869 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4870 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4871 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4872 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4873 bmsr
|= BMSR_LSTATUS
;
4875 bmsr
&= ~BMSR_LSTATUS
;
4878 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4880 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4881 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4882 /* do nothing, just check for link up at the end */
4883 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4886 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4887 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4888 ADVERTISE_1000XPAUSE
|
4889 ADVERTISE_1000XPSE_ASYM
|
4892 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4893 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
4895 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4896 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
4897 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4898 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4900 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4901 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4902 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4909 bmcr
&= ~BMCR_SPEED1000
;
4910 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4912 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4913 new_bmcr
|= BMCR_FULLDPLX
;
4915 if (new_bmcr
!= bmcr
) {
4916 /* BMCR_SPEED1000 is a reserved bit that needs
4917 * to be set on write.
4919 new_bmcr
|= BMCR_SPEED1000
;
4921 /* Force a linkdown */
4922 if (netif_carrier_ok(tp
->dev
)) {
4925 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4926 adv
&= ~(ADVERTISE_1000XFULL
|
4927 ADVERTISE_1000XHALF
|
4929 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4930 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4934 netif_carrier_off(tp
->dev
);
4936 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4938 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4939 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4940 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4942 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4943 bmsr
|= BMSR_LSTATUS
;
4945 bmsr
&= ~BMSR_LSTATUS
;
4947 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4951 if (bmsr
& BMSR_LSTATUS
) {
4952 current_speed
= SPEED_1000
;
4953 current_link_up
= 1;
4954 if (bmcr
& BMCR_FULLDPLX
)
4955 current_duplex
= DUPLEX_FULL
;
4957 current_duplex
= DUPLEX_HALF
;
4962 if (bmcr
& BMCR_ANENABLE
) {
4965 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4966 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4967 common
= local_adv
& remote_adv
;
4968 if (common
& (ADVERTISE_1000XHALF
|
4969 ADVERTISE_1000XFULL
)) {
4970 if (common
& ADVERTISE_1000XFULL
)
4971 current_duplex
= DUPLEX_FULL
;
4973 current_duplex
= DUPLEX_HALF
;
4975 tp
->link_config
.rmt_adv
=
4976 mii_adv_to_ethtool_adv_x(remote_adv
);
4977 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4978 /* Link is up via parallel detect */
4980 current_link_up
= 0;
4985 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4986 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4988 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4989 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4990 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4992 tw32_f(MAC_MODE
, tp
->mac_mode
);
4995 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4997 tp
->link_config
.active_speed
= current_speed
;
4998 tp
->link_config
.active_duplex
= current_duplex
;
5000 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5001 if (current_link_up
)
5002 netif_carrier_on(tp
->dev
);
5004 netif_carrier_off(tp
->dev
);
5005 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5007 tg3_link_report(tp
);
5012 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5014 if (tp
->serdes_counter
) {
5015 /* Give autoneg time to complete. */
5016 tp
->serdes_counter
--;
5020 if (!netif_carrier_ok(tp
->dev
) &&
5021 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5024 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5025 if (bmcr
& BMCR_ANENABLE
) {
5028 /* Select shadow register 0x1f */
5029 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5030 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5032 /* Select expansion interrupt status register */
5033 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5034 MII_TG3_DSP_EXP1_INT_STAT
);
5035 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5036 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5038 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5039 /* We have signal detect and not receiving
5040 * config code words, link is up by parallel
5044 bmcr
&= ~BMCR_ANENABLE
;
5045 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5046 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5047 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5050 } else if (netif_carrier_ok(tp
->dev
) &&
5051 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5052 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5055 /* Select expansion interrupt status register */
5056 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5057 MII_TG3_DSP_EXP1_INT_STAT
);
5058 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5062 /* Config code words received, turn on autoneg. */
5063 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5064 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5066 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5072 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5077 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5078 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5079 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5080 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5082 err
= tg3_setup_copper_phy(tp
, force_reset
);
5084 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5087 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5088 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5090 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5095 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5096 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5097 tw32(GRC_MISC_CFG
, val
);
5100 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5101 (6 << TX_LENGTHS_IPG_SHIFT
);
5102 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5103 val
|= tr32(MAC_TX_LENGTHS
) &
5104 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5105 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5107 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5108 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5109 tw32(MAC_TX_LENGTHS
, val
|
5110 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5112 tw32(MAC_TX_LENGTHS
, val
|
5113 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5115 if (!tg3_flag(tp
, 5705_PLUS
)) {
5116 if (netif_carrier_ok(tp
->dev
)) {
5117 tw32(HOSTCC_STAT_COAL_TICKS
,
5118 tp
->coal
.stats_block_coalesce_usecs
);
5120 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5124 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5125 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5126 if (!netif_carrier_ok(tp
->dev
))
5127 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5130 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5131 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5137 static inline int tg3_irq_sync(struct tg3
*tp
)
5139 return tp
->irq_sync
;
5142 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5146 dst
= (u32
*)((u8
*)dst
+ off
);
5147 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5148 *dst
++ = tr32(off
+ i
);
5151 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5153 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5154 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5155 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5156 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5157 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5158 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5159 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5160 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5161 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5162 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5163 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5164 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5165 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5166 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5167 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5168 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5169 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5170 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5171 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5173 if (tg3_flag(tp
, SUPPORT_MSIX
))
5174 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5176 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5177 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5178 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5179 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5180 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5181 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5182 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5183 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5185 if (!tg3_flag(tp
, 5705_PLUS
)) {
5186 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5187 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5188 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5191 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5192 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5193 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5194 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5195 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5197 if (tg3_flag(tp
, NVRAM
))
5198 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5201 static void tg3_dump_state(struct tg3
*tp
)
5206 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5208 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5212 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5213 /* Read up to but not including private PCI registers */
5214 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5215 regs
[i
/ sizeof(u32
)] = tr32(i
);
5217 tg3_dump_legacy_regs(tp
, regs
);
5219 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5220 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5221 !regs
[i
+ 2] && !regs
[i
+ 3])
5224 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5226 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5231 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5232 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5234 /* SW status block */
5236 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5238 tnapi
->hw_status
->status
,
5239 tnapi
->hw_status
->status_tag
,
5240 tnapi
->hw_status
->rx_jumbo_consumer
,
5241 tnapi
->hw_status
->rx_consumer
,
5242 tnapi
->hw_status
->rx_mini_consumer
,
5243 tnapi
->hw_status
->idx
[0].rx_producer
,
5244 tnapi
->hw_status
->idx
[0].tx_consumer
);
5247 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5249 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5250 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5252 tnapi
->prodring
.rx_std_prod_idx
,
5253 tnapi
->prodring
.rx_std_cons_idx
,
5254 tnapi
->prodring
.rx_jmb_prod_idx
,
5255 tnapi
->prodring
.rx_jmb_cons_idx
);
5259 /* This is called whenever we suspect that the system chipset is re-
5260 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5261 * is bogus tx completions. We try to recover by setting the
5262 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5265 static void tg3_tx_recover(struct tg3
*tp
)
5267 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5268 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5270 netdev_warn(tp
->dev
,
5271 "The system may be re-ordering memory-mapped I/O "
5272 "cycles to the network device, attempting to recover. "
5273 "Please report the problem to the driver maintainer "
5274 "and include system chipset information.\n");
5276 spin_lock(&tp
->lock
);
5277 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5278 spin_unlock(&tp
->lock
);
5281 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5283 /* Tell compiler to fetch tx indices from memory. */
5285 return tnapi
->tx_pending
-
5286 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5289 /* Tigon3 never reports partial packet sends. So we do not
5290 * need special logic to handle SKBs that have not had all
5291 * of their frags sent yet, like SunGEM does.
5293 static void tg3_tx(struct tg3_napi
*tnapi
)
5295 struct tg3
*tp
= tnapi
->tp
;
5296 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5297 u32 sw_idx
= tnapi
->tx_cons
;
5298 struct netdev_queue
*txq
;
5299 int index
= tnapi
- tp
->napi
;
5300 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5302 if (tg3_flag(tp
, ENABLE_TSS
))
5305 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5307 while (sw_idx
!= hw_idx
) {
5308 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5309 struct sk_buff
*skb
= ri
->skb
;
5312 if (unlikely(skb
== NULL
)) {
5317 pci_unmap_single(tp
->pdev
,
5318 dma_unmap_addr(ri
, mapping
),
5324 while (ri
->fragmented
) {
5325 ri
->fragmented
= false;
5326 sw_idx
= NEXT_TX(sw_idx
);
5327 ri
= &tnapi
->tx_buffers
[sw_idx
];
5330 sw_idx
= NEXT_TX(sw_idx
);
5332 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5333 ri
= &tnapi
->tx_buffers
[sw_idx
];
5334 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5337 pci_unmap_page(tp
->pdev
,
5338 dma_unmap_addr(ri
, mapping
),
5339 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5342 while (ri
->fragmented
) {
5343 ri
->fragmented
= false;
5344 sw_idx
= NEXT_TX(sw_idx
);
5345 ri
= &tnapi
->tx_buffers
[sw_idx
];
5348 sw_idx
= NEXT_TX(sw_idx
);
5352 bytes_compl
+= skb
->len
;
5356 if (unlikely(tx_bug
)) {
5362 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
5364 tnapi
->tx_cons
= sw_idx
;
5366 /* Need to make the tx_cons update visible to tg3_start_xmit()
5367 * before checking for netif_queue_stopped(). Without the
5368 * memory barrier, there is a small possibility that tg3_start_xmit()
5369 * will miss it and cause the queue to be stopped forever.
5373 if (unlikely(netif_tx_queue_stopped(txq
) &&
5374 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5375 __netif_tx_lock(txq
, smp_processor_id());
5376 if (netif_tx_queue_stopped(txq
) &&
5377 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5378 netif_tx_wake_queue(txq
);
5379 __netif_tx_unlock(txq
);
5383 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5388 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5389 map_sz
, PCI_DMA_FROMDEVICE
);
5394 /* Returns size of skb allocated or < 0 on error.
5396 * We only need to fill in the address because the other members
5397 * of the RX descriptor are invariant, see tg3_init_rings.
5399 * Note the purposeful assymetry of cpu vs. chip accesses. For
5400 * posting buffers we only dirty the first cache line of the RX
5401 * descriptor (containing the address). Whereas for the RX status
5402 * buffers the cpu only reads the last cacheline of the RX descriptor
5403 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5405 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5406 u32 opaque_key
, u32 dest_idx_unmasked
)
5408 struct tg3_rx_buffer_desc
*desc
;
5409 struct ring_info
*map
;
5412 int skb_size
, data_size
, dest_idx
;
5414 switch (opaque_key
) {
5415 case RXD_OPAQUE_RING_STD
:
5416 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5417 desc
= &tpr
->rx_std
[dest_idx
];
5418 map
= &tpr
->rx_std_buffers
[dest_idx
];
5419 data_size
= tp
->rx_pkt_map_sz
;
5422 case RXD_OPAQUE_RING_JUMBO
:
5423 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5424 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5425 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5426 data_size
= TG3_RX_JMB_MAP_SZ
;
5433 /* Do not overwrite any of the map or rp information
5434 * until we are sure we can commit to a new buffer.
5436 * Callers depend upon this behavior and assume that
5437 * we leave everything unchanged if we fail.
5439 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
5440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5441 data
= kmalloc(skb_size
, GFP_ATOMIC
);
5445 mapping
= pci_map_single(tp
->pdev
,
5446 data
+ TG3_RX_OFFSET(tp
),
5448 PCI_DMA_FROMDEVICE
);
5449 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5455 dma_unmap_addr_set(map
, mapping
, mapping
);
5457 desc
->addr_hi
= ((u64
)mapping
>> 32);
5458 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5463 /* We only need to move over in the address because the other
5464 * members of the RX descriptor are invariant. See notes above
5465 * tg3_alloc_rx_data for full details.
5467 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5468 struct tg3_rx_prodring_set
*dpr
,
5469 u32 opaque_key
, int src_idx
,
5470 u32 dest_idx_unmasked
)
5472 struct tg3
*tp
= tnapi
->tp
;
5473 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5474 struct ring_info
*src_map
, *dest_map
;
5475 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5478 switch (opaque_key
) {
5479 case RXD_OPAQUE_RING_STD
:
5480 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5481 dest_desc
= &dpr
->rx_std
[dest_idx
];
5482 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5483 src_desc
= &spr
->rx_std
[src_idx
];
5484 src_map
= &spr
->rx_std_buffers
[src_idx
];
5487 case RXD_OPAQUE_RING_JUMBO
:
5488 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5489 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5490 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5491 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5492 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5499 dest_map
->data
= src_map
->data
;
5500 dma_unmap_addr_set(dest_map
, mapping
,
5501 dma_unmap_addr(src_map
, mapping
));
5502 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5503 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5505 /* Ensure that the update to the skb happens after the physical
5506 * addresses have been transferred to the new BD location.
5510 src_map
->data
= NULL
;
5513 /* The RX ring scheme is composed of multiple rings which post fresh
5514 * buffers to the chip, and one special ring the chip uses to report
5515 * status back to the host.
5517 * The special ring reports the status of received packets to the
5518 * host. The chip does not write into the original descriptor the
5519 * RX buffer was obtained from. The chip simply takes the original
5520 * descriptor as provided by the host, updates the status and length
5521 * field, then writes this into the next status ring entry.
5523 * Each ring the host uses to post buffers to the chip is described
5524 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5525 * it is first placed into the on-chip ram. When the packet's length
5526 * is known, it walks down the TG3_BDINFO entries to select the ring.
5527 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5528 * which is within the range of the new packet's length is chosen.
5530 * The "separate ring for rx status" scheme may sound queer, but it makes
5531 * sense from a cache coherency perspective. If only the host writes
5532 * to the buffer post rings, and only the chip writes to the rx status
5533 * rings, then cache lines never move beyond shared-modified state.
5534 * If both the host and chip were to write into the same ring, cache line
5535 * eviction could occur since both entities want it in an exclusive state.
5537 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5539 struct tg3
*tp
= tnapi
->tp
;
5540 u32 work_mask
, rx_std_posted
= 0;
5541 u32 std_prod_idx
, jmb_prod_idx
;
5542 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5545 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5547 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5549 * We need to order the read of hw_idx and the read of
5550 * the opaque cookie.
5555 std_prod_idx
= tpr
->rx_std_prod_idx
;
5556 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5557 while (sw_idx
!= hw_idx
&& budget
> 0) {
5558 struct ring_info
*ri
;
5559 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5561 struct sk_buff
*skb
;
5562 dma_addr_t dma_addr
;
5563 u32 opaque_key
, desc_idx
, *post_ptr
;
5566 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5567 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5568 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5569 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5570 dma_addr
= dma_unmap_addr(ri
, mapping
);
5572 post_ptr
= &std_prod_idx
;
5574 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5575 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5576 dma_addr
= dma_unmap_addr(ri
, mapping
);
5578 post_ptr
= &jmb_prod_idx
;
5580 goto next_pkt_nopost
;
5582 work_mask
|= opaque_key
;
5584 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5585 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5587 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5588 desc_idx
, *post_ptr
);
5590 /* Other statistics kept track of by card. */
5595 prefetch(data
+ TG3_RX_OFFSET(tp
));
5596 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5599 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5602 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
5607 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5608 PCI_DMA_FROMDEVICE
);
5610 skb
= build_skb(data
);
5613 goto drop_it_no_recycle
;
5615 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5616 /* Ensure that the update to the data happens
5617 * after the usage of the old DMA mapping.
5624 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5625 desc_idx
, *post_ptr
);
5627 skb
= netdev_alloc_skb(tp
->dev
,
5628 len
+ TG3_RAW_IP_ALIGN
);
5630 goto drop_it_no_recycle
;
5632 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
5633 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5635 data
+ TG3_RX_OFFSET(tp
),
5637 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5641 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5642 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5643 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5644 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5645 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5647 skb_checksum_none_assert(skb
);
5649 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5651 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5652 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5654 goto drop_it_no_recycle
;
5657 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5658 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5659 __vlan_hwaccel_put_tag(skb
,
5660 desc
->err_vlan
& RXD_VLAN_MASK
);
5662 napi_gro_receive(&tnapi
->napi
, skb
);
5670 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5671 tpr
->rx_std_prod_idx
= std_prod_idx
&
5672 tp
->rx_std_ring_mask
;
5673 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5674 tpr
->rx_std_prod_idx
);
5675 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5680 sw_idx
&= tp
->rx_ret_ring_mask
;
5682 /* Refresh hw_idx to see if there is new work */
5683 if (sw_idx
== hw_idx
) {
5684 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5689 /* ACK the status ring. */
5690 tnapi
->rx_rcb_ptr
= sw_idx
;
5691 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5693 /* Refill RX ring(s). */
5694 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5695 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5696 tpr
->rx_std_prod_idx
= std_prod_idx
&
5697 tp
->rx_std_ring_mask
;
5698 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5699 tpr
->rx_std_prod_idx
);
5701 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5702 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5703 tp
->rx_jmb_ring_mask
;
5704 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5705 tpr
->rx_jmb_prod_idx
);
5708 } else if (work_mask
) {
5709 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5710 * updated before the producer indices can be updated.
5714 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5715 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5717 if (tnapi
!= &tp
->napi
[1])
5718 napi_schedule(&tp
->napi
[1].napi
);
5724 static void tg3_poll_link(struct tg3
*tp
)
5726 /* handle link change and other phy events */
5727 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5728 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5730 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5731 sblk
->status
= SD_STATUS_UPDATED
|
5732 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5733 spin_lock(&tp
->lock
);
5734 if (tg3_flag(tp
, USE_PHYLIB
)) {
5736 (MAC_STATUS_SYNC_CHANGED
|
5737 MAC_STATUS_CFG_CHANGED
|
5738 MAC_STATUS_MI_COMPLETION
|
5739 MAC_STATUS_LNKSTATE_CHANGED
));
5742 tg3_setup_phy(tp
, 0);
5743 spin_unlock(&tp
->lock
);
5748 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5749 struct tg3_rx_prodring_set
*dpr
,
5750 struct tg3_rx_prodring_set
*spr
)
5752 u32 si
, di
, cpycnt
, src_prod_idx
;
5756 src_prod_idx
= spr
->rx_std_prod_idx
;
5758 /* Make sure updates to the rx_std_buffers[] entries and the
5759 * standard producer index are seen in the correct order.
5763 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5766 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5767 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5769 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5770 spr
->rx_std_cons_idx
;
5772 cpycnt
= min(cpycnt
,
5773 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5775 si
= spr
->rx_std_cons_idx
;
5776 di
= dpr
->rx_std_prod_idx
;
5778 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5779 if (dpr
->rx_std_buffers
[i
].data
) {
5789 /* Ensure that updates to the rx_std_buffers ring and the
5790 * shadowed hardware producer ring from tg3_recycle_skb() are
5791 * ordered correctly WRT the skb check above.
5795 memcpy(&dpr
->rx_std_buffers
[di
],
5796 &spr
->rx_std_buffers
[si
],
5797 cpycnt
* sizeof(struct ring_info
));
5799 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5800 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5801 sbd
= &spr
->rx_std
[si
];
5802 dbd
= &dpr
->rx_std
[di
];
5803 dbd
->addr_hi
= sbd
->addr_hi
;
5804 dbd
->addr_lo
= sbd
->addr_lo
;
5807 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5808 tp
->rx_std_ring_mask
;
5809 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5810 tp
->rx_std_ring_mask
;
5814 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5816 /* Make sure updates to the rx_jmb_buffers[] entries and
5817 * the jumbo producer index are seen in the correct order.
5821 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5824 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5825 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5827 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5828 spr
->rx_jmb_cons_idx
;
5830 cpycnt
= min(cpycnt
,
5831 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5833 si
= spr
->rx_jmb_cons_idx
;
5834 di
= dpr
->rx_jmb_prod_idx
;
5836 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5837 if (dpr
->rx_jmb_buffers
[i
].data
) {
5847 /* Ensure that updates to the rx_jmb_buffers ring and the
5848 * shadowed hardware producer ring from tg3_recycle_skb() are
5849 * ordered correctly WRT the skb check above.
5853 memcpy(&dpr
->rx_jmb_buffers
[di
],
5854 &spr
->rx_jmb_buffers
[si
],
5855 cpycnt
* sizeof(struct ring_info
));
5857 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5858 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5859 sbd
= &spr
->rx_jmb
[si
].std
;
5860 dbd
= &dpr
->rx_jmb
[di
].std
;
5861 dbd
->addr_hi
= sbd
->addr_hi
;
5862 dbd
->addr_lo
= sbd
->addr_lo
;
5865 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5866 tp
->rx_jmb_ring_mask
;
5867 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5868 tp
->rx_jmb_ring_mask
;
5874 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5876 struct tg3
*tp
= tnapi
->tp
;
5878 /* run TX completion thread */
5879 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5881 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5885 if (!tnapi
->rx_rcb_prod_idx
)
5888 /* run RX thread, within the bounds set by NAPI.
5889 * All RX "locking" is done by ensuring outside
5890 * code synchronizes with tg3->napi.poll()
5892 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5893 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5895 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5896 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5898 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5899 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5901 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5902 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5903 &tp
->napi
[i
].prodring
);
5907 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5908 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5909 dpr
->rx_std_prod_idx
);
5911 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5912 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5913 dpr
->rx_jmb_prod_idx
);
5918 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5924 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
5926 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
5927 schedule_work(&tp
->reset_task
);
5930 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
5932 cancel_work_sync(&tp
->reset_task
);
5933 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
5936 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5938 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5939 struct tg3
*tp
= tnapi
->tp
;
5941 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5944 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5946 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5949 if (unlikely(work_done
>= budget
))
5952 /* tp->last_tag is used in tg3_int_reenable() below
5953 * to tell the hw how much work has been processed,
5954 * so we must read it before checking for more work.
5956 tnapi
->last_tag
= sblk
->status_tag
;
5957 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5960 /* check for RX/TX work to do */
5961 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5962 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5963 napi_complete(napi
);
5964 /* Reenable interrupts. */
5965 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5974 /* work_done is guaranteed to be less than budget. */
5975 napi_complete(napi
);
5976 tg3_reset_task_schedule(tp
);
5980 static void tg3_process_error(struct tg3
*tp
)
5983 bool real_error
= false;
5985 if (tg3_flag(tp
, ERROR_PROCESSED
))
5988 /* Check Flow Attention register */
5989 val
= tr32(HOSTCC_FLOW_ATTN
);
5990 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5991 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5995 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5996 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6000 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6001 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6010 tg3_flag_set(tp
, ERROR_PROCESSED
);
6011 tg3_reset_task_schedule(tp
);
6014 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6016 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6017 struct tg3
*tp
= tnapi
->tp
;
6019 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6022 if (sblk
->status
& SD_STATUS_ERROR
)
6023 tg3_process_error(tp
);
6027 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6029 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6032 if (unlikely(work_done
>= budget
))
6035 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6036 /* tp->last_tag is used in tg3_int_reenable() below
6037 * to tell the hw how much work has been processed,
6038 * so we must read it before checking for more work.
6040 tnapi
->last_tag
= sblk
->status_tag
;
6041 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6044 sblk
->status
&= ~SD_STATUS_UPDATED
;
6046 if (likely(!tg3_has_work(tnapi
))) {
6047 napi_complete(napi
);
6048 tg3_int_reenable(tnapi
);
6056 /* work_done is guaranteed to be less than budget. */
6057 napi_complete(napi
);
6058 tg3_reset_task_schedule(tp
);
6062 static void tg3_napi_disable(struct tg3
*tp
)
6066 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6067 napi_disable(&tp
->napi
[i
].napi
);
6070 static void tg3_napi_enable(struct tg3
*tp
)
6074 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6075 napi_enable(&tp
->napi
[i
].napi
);
6078 static void tg3_napi_init(struct tg3
*tp
)
6082 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6083 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6084 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6087 static void tg3_napi_fini(struct tg3
*tp
)
6091 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6092 netif_napi_del(&tp
->napi
[i
].napi
);
6095 static inline void tg3_netif_stop(struct tg3
*tp
)
6097 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6098 tg3_napi_disable(tp
);
6099 netif_tx_disable(tp
->dev
);
6102 static inline void tg3_netif_start(struct tg3
*tp
)
6104 /* NOTE: unconditional netif_tx_wake_all_queues is only
6105 * appropriate so long as all callers are assured to
6106 * have free tx slots (such as after tg3_init_hw)
6108 netif_tx_wake_all_queues(tp
->dev
);
6110 tg3_napi_enable(tp
);
6111 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6112 tg3_enable_ints(tp
);
6115 static void tg3_irq_quiesce(struct tg3
*tp
)
6119 BUG_ON(tp
->irq_sync
);
6124 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6125 synchronize_irq(tp
->napi
[i
].irq_vec
);
6128 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6129 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6130 * with as well. Most of the time, this is not necessary except when
6131 * shutting down the device.
6133 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6135 spin_lock_bh(&tp
->lock
);
6137 tg3_irq_quiesce(tp
);
6140 static inline void tg3_full_unlock(struct tg3
*tp
)
6142 spin_unlock_bh(&tp
->lock
);
6145 /* One-shot MSI handler - Chip automatically disables interrupt
6146 * after sending MSI so driver doesn't have to do it.
6148 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6150 struct tg3_napi
*tnapi
= dev_id
;
6151 struct tg3
*tp
= tnapi
->tp
;
6153 prefetch(tnapi
->hw_status
);
6155 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6157 if (likely(!tg3_irq_sync(tp
)))
6158 napi_schedule(&tnapi
->napi
);
6163 /* MSI ISR - No need to check for interrupt sharing and no need to
6164 * flush status block and interrupt mailbox. PCI ordering rules
6165 * guarantee that MSI will arrive after the status block.
6167 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6169 struct tg3_napi
*tnapi
= dev_id
;
6170 struct tg3
*tp
= tnapi
->tp
;
6172 prefetch(tnapi
->hw_status
);
6174 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6176 * Writing any value to intr-mbox-0 clears PCI INTA# and
6177 * chip-internal interrupt pending events.
6178 * Writing non-zero to intr-mbox-0 additional tells the
6179 * NIC to stop sending us irqs, engaging "in-intr-handler"
6182 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6183 if (likely(!tg3_irq_sync(tp
)))
6184 napi_schedule(&tnapi
->napi
);
6186 return IRQ_RETVAL(1);
6189 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6191 struct tg3_napi
*tnapi
= dev_id
;
6192 struct tg3
*tp
= tnapi
->tp
;
6193 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6194 unsigned int handled
= 1;
6196 /* In INTx mode, it is possible for the interrupt to arrive at
6197 * the CPU before the status block posted prior to the interrupt.
6198 * Reading the PCI State register will confirm whether the
6199 * interrupt is ours and will flush the status block.
6201 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6202 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6203 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6210 * Writing any value to intr-mbox-0 clears PCI INTA# and
6211 * chip-internal interrupt pending events.
6212 * Writing non-zero to intr-mbox-0 additional tells the
6213 * NIC to stop sending us irqs, engaging "in-intr-handler"
6216 * Flush the mailbox to de-assert the IRQ immediately to prevent
6217 * spurious interrupts. The flush impacts performance but
6218 * excessive spurious interrupts can be worse in some cases.
6220 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6221 if (tg3_irq_sync(tp
))
6223 sblk
->status
&= ~SD_STATUS_UPDATED
;
6224 if (likely(tg3_has_work(tnapi
))) {
6225 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6226 napi_schedule(&tnapi
->napi
);
6228 /* No work, shared interrupt perhaps? re-enable
6229 * interrupts, and flush that PCI write
6231 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6235 return IRQ_RETVAL(handled
);
6238 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6240 struct tg3_napi
*tnapi
= dev_id
;
6241 struct tg3
*tp
= tnapi
->tp
;
6242 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6243 unsigned int handled
= 1;
6245 /* In INTx mode, it is possible for the interrupt to arrive at
6246 * the CPU before the status block posted prior to the interrupt.
6247 * Reading the PCI State register will confirm whether the
6248 * interrupt is ours and will flush the status block.
6250 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6251 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6252 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6259 * writing any value to intr-mbox-0 clears PCI INTA# and
6260 * chip-internal interrupt pending events.
6261 * writing non-zero to intr-mbox-0 additional tells the
6262 * NIC to stop sending us irqs, engaging "in-intr-handler"
6265 * Flush the mailbox to de-assert the IRQ immediately to prevent
6266 * spurious interrupts. The flush impacts performance but
6267 * excessive spurious interrupts can be worse in some cases.
6269 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6272 * In a shared interrupt configuration, sometimes other devices'
6273 * interrupts will scream. We record the current status tag here
6274 * so that the above check can report that the screaming interrupts
6275 * are unhandled. Eventually they will be silenced.
6277 tnapi
->last_irq_tag
= sblk
->status_tag
;
6279 if (tg3_irq_sync(tp
))
6282 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6284 napi_schedule(&tnapi
->napi
);
6287 return IRQ_RETVAL(handled
);
6290 /* ISR for interrupt test */
6291 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6293 struct tg3_napi
*tnapi
= dev_id
;
6294 struct tg3
*tp
= tnapi
->tp
;
6295 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6297 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6298 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6299 tg3_disable_ints(tp
);
6300 return IRQ_RETVAL(1);
6302 return IRQ_RETVAL(0);
6305 static int tg3_init_hw(struct tg3
*, int);
6306 static int tg3_halt(struct tg3
*, int, int);
6308 /* Restart hardware after configuration changes, self-test, etc.
6309 * Invoked with tp->lock held.
6311 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
6312 __releases(tp
->lock
)
6313 __acquires(tp
->lock
)
6317 err
= tg3_init_hw(tp
, reset_phy
);
6320 "Failed to re-initialize device, aborting\n");
6321 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6322 tg3_full_unlock(tp
);
6323 del_timer_sync(&tp
->timer
);
6325 tg3_napi_enable(tp
);
6327 tg3_full_lock(tp
, 0);
6332 #ifdef CONFIG_NET_POLL_CONTROLLER
6333 static void tg3_poll_controller(struct net_device
*dev
)
6336 struct tg3
*tp
= netdev_priv(dev
);
6338 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6339 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6343 static void tg3_reset_task(struct work_struct
*work
)
6345 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
6348 tg3_full_lock(tp
, 0);
6350 if (!netif_running(tp
->dev
)) {
6351 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6352 tg3_full_unlock(tp
);
6356 tg3_full_unlock(tp
);
6362 tg3_full_lock(tp
, 1);
6364 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
6365 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
6366 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
6367 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
6368 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6371 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
6372 err
= tg3_init_hw(tp
, 1);
6376 tg3_netif_start(tp
);
6379 tg3_full_unlock(tp
);
6384 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6387 static void tg3_tx_timeout(struct net_device
*dev
)
6389 struct tg3
*tp
= netdev_priv(dev
);
6391 if (netif_msg_tx_err(tp
)) {
6392 netdev_err(dev
, "transmit timed out, resetting\n");
6396 tg3_reset_task_schedule(tp
);
6399 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6400 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6402 u32 base
= (u32
) mapping
& 0xffffffff;
6404 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6407 /* Test for DMA addresses > 40-bit */
6408 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6411 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6412 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6413 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6420 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6421 dma_addr_t mapping
, u32 len
, u32 flags
,
6424 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6425 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6426 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6427 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6430 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6431 dma_addr_t map
, u32 len
, u32 flags
,
6434 struct tg3
*tp
= tnapi
->tp
;
6437 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6440 if (tg3_4g_overflow_test(map
, len
))
6443 if (tg3_40bit_overflow_test(tp
, map
, len
))
6446 if (tp
->dma_limit
) {
6447 u32 prvidx
= *entry
;
6448 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6449 while (len
> tp
->dma_limit
&& *budget
) {
6450 u32 frag_len
= tp
->dma_limit
;
6451 len
-= tp
->dma_limit
;
6453 /* Avoid the 8byte DMA problem */
6455 len
+= tp
->dma_limit
/ 2;
6456 frag_len
= tp
->dma_limit
/ 2;
6459 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6461 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6462 frag_len
, tmp_flag
, mss
, vlan
);
6465 *entry
= NEXT_TX(*entry
);
6472 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6473 len
, flags
, mss
, vlan
);
6475 *entry
= NEXT_TX(*entry
);
6478 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6482 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6483 len
, flags
, mss
, vlan
);
6484 *entry
= NEXT_TX(*entry
);
6490 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6493 struct sk_buff
*skb
;
6494 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6499 pci_unmap_single(tnapi
->tp
->pdev
,
6500 dma_unmap_addr(txb
, mapping
),
6504 while (txb
->fragmented
) {
6505 txb
->fragmented
= false;
6506 entry
= NEXT_TX(entry
);
6507 txb
= &tnapi
->tx_buffers
[entry
];
6510 for (i
= 0; i
<= last
; i
++) {
6511 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6513 entry
= NEXT_TX(entry
);
6514 txb
= &tnapi
->tx_buffers
[entry
];
6516 pci_unmap_page(tnapi
->tp
->pdev
,
6517 dma_unmap_addr(txb
, mapping
),
6518 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6520 while (txb
->fragmented
) {
6521 txb
->fragmented
= false;
6522 entry
= NEXT_TX(entry
);
6523 txb
= &tnapi
->tx_buffers
[entry
];
6528 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6529 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6530 struct sk_buff
**pskb
,
6531 u32
*entry
, u32
*budget
,
6532 u32 base_flags
, u32 mss
, u32 vlan
)
6534 struct tg3
*tp
= tnapi
->tp
;
6535 struct sk_buff
*new_skb
, *skb
= *pskb
;
6536 dma_addr_t new_addr
= 0;
6539 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6540 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6542 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6544 new_skb
= skb_copy_expand(skb
,
6545 skb_headroom(skb
) + more_headroom
,
6546 skb_tailroom(skb
), GFP_ATOMIC
);
6552 /* New SKB is guaranteed to be linear. */
6553 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6555 /* Make sure the mapping succeeded */
6556 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6557 dev_kfree_skb(new_skb
);
6560 u32 save_entry
= *entry
;
6562 base_flags
|= TXD_FLAG_END
;
6564 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6565 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6568 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6569 new_skb
->len
, base_flags
,
6571 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6572 dev_kfree_skb(new_skb
);
6583 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586 * TSO header is greater than 80 bytes.
6588 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6590 struct sk_buff
*segs
, *nskb
;
6591 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6593 /* Estimate the number of fragments in the worst case */
6594 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6595 netif_stop_queue(tp
->dev
);
6597 /* netif_tx_stop_queue() must be done before checking
6598 * checking tx index in tg3_tx_avail() below, because in
6599 * tg3_tx(), we update tx index before checking for
6600 * netif_tx_queue_stopped().
6603 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6604 return NETDEV_TX_BUSY
;
6606 netif_wake_queue(tp
->dev
);
6609 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6611 goto tg3_tso_bug_end
;
6617 tg3_start_xmit(nskb
, tp
->dev
);
6623 return NETDEV_TX_OK
;
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6629 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6631 struct tg3
*tp
= netdev_priv(dev
);
6632 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6634 int i
= -1, would_hit_hwbug
;
6636 struct tg3_napi
*tnapi
;
6637 struct netdev_queue
*txq
;
6640 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6641 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6642 if (tg3_flag(tp
, ENABLE_TSS
))
6645 budget
= tg3_tx_avail(tnapi
);
6647 /* We are running in BH disabled context with netif_tx_lock
6648 * and TX reclaim runs via tp->napi.poll inside of a software
6649 * interrupt. Furthermore, IRQ processing runs lockless so we have
6650 * no IRQ context deadlocks to worry about either. Rejoice!
6652 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6653 if (!netif_tx_queue_stopped(txq
)) {
6654 netif_tx_stop_queue(txq
);
6656 /* This is a hard error, log it. */
6658 "BUG! Tx Ring full when queue awake!\n");
6660 return NETDEV_TX_BUSY
;
6663 entry
= tnapi
->tx_prod
;
6665 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6666 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6668 mss
= skb_shinfo(skb
)->gso_size
;
6671 u32 tcp_opt_len
, hdr_len
;
6673 if (skb_header_cloned(skb
) &&
6674 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6678 tcp_opt_len
= tcp_optlen(skb
);
6680 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
6682 if (!skb_is_gso_v6(skb
)) {
6684 iph
->tot_len
= htons(mss
+ hdr_len
);
6687 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6688 tg3_flag(tp
, TSO_BUG
))
6689 return tg3_tso_bug(tp
, skb
);
6691 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6692 TXD_FLAG_CPU_POST_DMA
);
6694 if (tg3_flag(tp
, HW_TSO_1
) ||
6695 tg3_flag(tp
, HW_TSO_2
) ||
6696 tg3_flag(tp
, HW_TSO_3
)) {
6697 tcp_hdr(skb
)->check
= 0;
6698 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6700 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6705 if (tg3_flag(tp
, HW_TSO_3
)) {
6706 mss
|= (hdr_len
& 0xc) << 12;
6708 base_flags
|= 0x00000010;
6709 base_flags
|= (hdr_len
& 0x3e0) << 5;
6710 } else if (tg3_flag(tp
, HW_TSO_2
))
6711 mss
|= hdr_len
<< 9;
6712 else if (tg3_flag(tp
, HW_TSO_1
) ||
6713 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6714 if (tcp_opt_len
|| iph
->ihl
> 5) {
6717 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6718 mss
|= (tsflags
<< 11);
6721 if (tcp_opt_len
|| iph
->ihl
> 5) {
6724 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6725 base_flags
|= tsflags
<< 12;
6730 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6731 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6732 base_flags
|= TXD_FLAG_JMB_PKT
;
6734 if (vlan_tx_tag_present(skb
)) {
6735 base_flags
|= TXD_FLAG_VLAN
;
6736 vlan
= vlan_tx_tag_get(skb
);
6739 len
= skb_headlen(skb
);
6741 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6742 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6746 tnapi
->tx_buffers
[entry
].skb
= skb
;
6747 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6749 would_hit_hwbug
= 0;
6751 if (tg3_flag(tp
, 5701_DMA_BUG
))
6752 would_hit_hwbug
= 1;
6754 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6755 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6757 would_hit_hwbug
= 1;
6758 /* Now loop through additional data fragments, and queue them. */
6759 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
6762 if (!tg3_flag(tp
, HW_TSO_1
) &&
6763 !tg3_flag(tp
, HW_TSO_2
) &&
6764 !tg3_flag(tp
, HW_TSO_3
))
6767 last
= skb_shinfo(skb
)->nr_frags
- 1;
6768 for (i
= 0; i
<= last
; i
++) {
6769 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6771 len
= skb_frag_size(frag
);
6772 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
6773 len
, DMA_TO_DEVICE
);
6775 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6776 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6778 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
6782 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
6784 ((i
== last
) ? TXD_FLAG_END
: 0),
6786 would_hit_hwbug
= 1;
6792 if (would_hit_hwbug
) {
6793 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6795 /* If the workaround fails due to memory/mapping
6796 * failure, silently drop this packet.
6798 entry
= tnapi
->tx_prod
;
6799 budget
= tg3_tx_avail(tnapi
);
6800 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
6801 base_flags
, mss
, vlan
))
6805 skb_tx_timestamp(skb
);
6806 netdev_tx_sent_queue(txq
, skb
->len
);
6808 /* Packets are ready, update Tx producer idx local and on card. */
6809 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6811 tnapi
->tx_prod
= entry
;
6812 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6813 netif_tx_stop_queue(txq
);
6815 /* netif_tx_stop_queue() must be done before checking
6816 * checking tx index in tg3_tx_avail() below, because in
6817 * tg3_tx(), we update tx index before checking for
6818 * netif_tx_queue_stopped().
6821 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6822 netif_tx_wake_queue(txq
);
6826 return NETDEV_TX_OK
;
6829 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
6830 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6835 return NETDEV_TX_OK
;
6838 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
6841 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
6842 MAC_MODE_PORT_MODE_MASK
);
6844 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6846 if (!tg3_flag(tp
, 5705_PLUS
))
6847 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
6849 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
6850 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6852 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6854 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6856 if (tg3_flag(tp
, 5705_PLUS
) ||
6857 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
6858 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
6859 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6862 tw32(MAC_MODE
, tp
->mac_mode
);
6866 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
6868 u32 val
, bmcr
, mac_mode
, ptest
= 0;
6870 tg3_phy_toggle_apd(tp
, false);
6871 tg3_phy_toggle_automdix(tp
, 0);
6873 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
6876 bmcr
= BMCR_FULLDPLX
;
6881 bmcr
|= BMCR_SPEED100
;
6885 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
6887 bmcr
|= BMCR_SPEED100
;
6890 bmcr
|= BMCR_SPEED1000
;
6895 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
6896 tg3_readphy(tp
, MII_CTRL1000
, &val
);
6897 val
|= CTL1000_AS_MASTER
|
6898 CTL1000_ENABLE_MASTER
;
6899 tg3_writephy(tp
, MII_CTRL1000
, val
);
6901 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
6902 MII_TG3_FET_PTEST_TRIM_2
;
6903 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
6906 bmcr
|= BMCR_LOOPBACK
;
6908 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6910 /* The write needs to be flushed for the FETs */
6911 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
6912 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6916 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
6917 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
6918 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
6919 MII_TG3_FET_PTEST_FRC_TX_LINK
|
6920 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
6922 /* The write needs to be flushed for the AC131 */
6923 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
6926 /* Reset to prevent losing 1st rx packet intermittently */
6927 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
6928 tg3_flag(tp
, 5780_CLASS
)) {
6929 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6931 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6934 mac_mode
= tp
->mac_mode
&
6935 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
6936 if (speed
== SPEED_1000
)
6937 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6939 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6941 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
6942 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
6944 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
6945 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6946 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
6947 mac_mode
|= MAC_MODE_LINK_POLARITY
;
6949 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
6950 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
6953 tw32(MAC_MODE
, mac_mode
);
6959 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
6961 struct tg3
*tp
= netdev_priv(dev
);
6963 if (features
& NETIF_F_LOOPBACK
) {
6964 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6967 spin_lock_bh(&tp
->lock
);
6968 tg3_mac_loopback(tp
, true);
6969 netif_carrier_on(tp
->dev
);
6970 spin_unlock_bh(&tp
->lock
);
6971 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6973 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6976 spin_lock_bh(&tp
->lock
);
6977 tg3_mac_loopback(tp
, false);
6978 /* Force link status check */
6979 tg3_setup_phy(tp
, 1);
6980 spin_unlock_bh(&tp
->lock
);
6981 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6985 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
6986 netdev_features_t features
)
6988 struct tg3
*tp
= netdev_priv(dev
);
6990 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6991 features
&= ~NETIF_F_ALL_TSO
;
6996 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
6998 netdev_features_t changed
= dev
->features
^ features
;
7000 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7001 tg3_set_loopback(dev
, features
);
7006 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
7011 if (new_mtu
> ETH_DATA_LEN
) {
7012 if (tg3_flag(tp
, 5780_CLASS
)) {
7013 netdev_update_features(dev
);
7014 tg3_flag_clear(tp
, TSO_CAPABLE
);
7016 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
7019 if (tg3_flag(tp
, 5780_CLASS
)) {
7020 tg3_flag_set(tp
, TSO_CAPABLE
);
7021 netdev_update_features(dev
);
7023 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
7027 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
7029 struct tg3
*tp
= netdev_priv(dev
);
7032 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
7035 if (!netif_running(dev
)) {
7036 /* We'll just catch it later when the
7039 tg3_set_mtu(dev
, tp
, new_mtu
);
7047 tg3_full_lock(tp
, 1);
7049 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7051 tg3_set_mtu(dev
, tp
, new_mtu
);
7053 err
= tg3_restart_hw(tp
, 0);
7056 tg3_netif_start(tp
);
7058 tg3_full_unlock(tp
);
7066 static void tg3_rx_prodring_free(struct tg3
*tp
,
7067 struct tg3_rx_prodring_set
*tpr
)
7071 if (tpr
!= &tp
->napi
[0].prodring
) {
7072 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7073 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7074 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7077 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7078 for (i
= tpr
->rx_jmb_cons_idx
;
7079 i
!= tpr
->rx_jmb_prod_idx
;
7080 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7081 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7089 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7090 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7093 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7094 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7095 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7100 /* Initialize rx rings for packet processing.
7102 * The chip has been shut down and the driver detached from
7103 * the networking, so no interrupts or new tx packets will
7104 * end up in the driver. tp->{tx,}lock are held and thus
7107 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7108 struct tg3_rx_prodring_set
*tpr
)
7110 u32 i
, rx_pkt_dma_sz
;
7112 tpr
->rx_std_cons_idx
= 0;
7113 tpr
->rx_std_prod_idx
= 0;
7114 tpr
->rx_jmb_cons_idx
= 0;
7115 tpr
->rx_jmb_prod_idx
= 0;
7117 if (tpr
!= &tp
->napi
[0].prodring
) {
7118 memset(&tpr
->rx_std_buffers
[0], 0,
7119 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7120 if (tpr
->rx_jmb_buffers
)
7121 memset(&tpr
->rx_jmb_buffers
[0], 0,
7122 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7126 /* Zero out all descriptors. */
7127 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7129 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7130 if (tg3_flag(tp
, 5780_CLASS
) &&
7131 tp
->dev
->mtu
> ETH_DATA_LEN
)
7132 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7133 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7135 /* Initialize invariants of the rings, we only set this
7136 * stuff once. This works because the card does not
7137 * write into the rx buffer posting rings.
7139 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7140 struct tg3_rx_buffer_desc
*rxd
;
7142 rxd
= &tpr
->rx_std
[i
];
7143 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7144 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7145 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7146 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7149 /* Now allocate fresh SKBs for each rx ring. */
7150 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7151 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
7152 netdev_warn(tp
->dev
,
7153 "Using a smaller RX standard ring. Only "
7154 "%d out of %d buffers were allocated "
7155 "successfully\n", i
, tp
->rx_pending
);
7163 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7166 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7168 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7171 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7172 struct tg3_rx_buffer_desc
*rxd
;
7174 rxd
= &tpr
->rx_jmb
[i
].std
;
7175 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7176 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7178 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7179 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7182 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7183 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
7184 netdev_warn(tp
->dev
,
7185 "Using a smaller RX jumbo ring. Only %d "
7186 "out of %d buffers were allocated "
7187 "successfully\n", i
, tp
->rx_jumbo_pending
);
7190 tp
->rx_jumbo_pending
= i
;
7199 tg3_rx_prodring_free(tp
, tpr
);
7203 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7204 struct tg3_rx_prodring_set
*tpr
)
7206 kfree(tpr
->rx_std_buffers
);
7207 tpr
->rx_std_buffers
= NULL
;
7208 kfree(tpr
->rx_jmb_buffers
);
7209 tpr
->rx_jmb_buffers
= NULL
;
7211 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7212 tpr
->rx_std
, tpr
->rx_std_mapping
);
7216 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7217 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7222 static int tg3_rx_prodring_init(struct tg3
*tp
,
7223 struct tg3_rx_prodring_set
*tpr
)
7225 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7227 if (!tpr
->rx_std_buffers
)
7230 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7231 TG3_RX_STD_RING_BYTES(tp
),
7232 &tpr
->rx_std_mapping
,
7237 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7238 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7240 if (!tpr
->rx_jmb_buffers
)
7243 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7244 TG3_RX_JMB_RING_BYTES(tp
),
7245 &tpr
->rx_jmb_mapping
,
7254 tg3_rx_prodring_fini(tp
, tpr
);
7258 /* Free up pending packets in all rx/tx rings.
7260 * The chip has been shut down and the driver detached from
7261 * the networking, so no interrupts or new tx packets will
7262 * end up in the driver. tp->{tx,}lock is not held and we are not
7263 * in an interrupt context and thus may sleep.
7265 static void tg3_free_rings(struct tg3
*tp
)
7269 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7270 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7272 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7274 if (!tnapi
->tx_buffers
)
7277 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7278 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7283 tg3_tx_skb_unmap(tnapi
, i
,
7284 skb_shinfo(skb
)->nr_frags
- 1);
7286 dev_kfree_skb_any(skb
);
7288 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7292 /* Initialize tx/rx rings for packet processing.
7294 * The chip has been shut down and the driver detached from
7295 * the networking, so no interrupts or new tx packets will
7296 * end up in the driver. tp->{tx,}lock are held and thus
7299 static int tg3_init_rings(struct tg3
*tp
)
7303 /* Free up all the SKBs. */
7306 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7307 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7309 tnapi
->last_tag
= 0;
7310 tnapi
->last_irq_tag
= 0;
7311 tnapi
->hw_status
->status
= 0;
7312 tnapi
->hw_status
->status_tag
= 0;
7313 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7318 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7320 tnapi
->rx_rcb_ptr
= 0;
7322 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7324 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7334 * Must not be invoked with interrupt sources disabled and
7335 * the hardware shutdown down.
7337 static void tg3_free_consistent(struct tg3
*tp
)
7341 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7342 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7344 if (tnapi
->tx_ring
) {
7345 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7346 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7347 tnapi
->tx_ring
= NULL
;
7350 kfree(tnapi
->tx_buffers
);
7351 tnapi
->tx_buffers
= NULL
;
7353 if (tnapi
->rx_rcb
) {
7354 dma_free_coherent(&tp
->pdev
->dev
,
7355 TG3_RX_RCB_RING_BYTES(tp
),
7357 tnapi
->rx_rcb_mapping
);
7358 tnapi
->rx_rcb
= NULL
;
7361 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7363 if (tnapi
->hw_status
) {
7364 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7366 tnapi
->status_mapping
);
7367 tnapi
->hw_status
= NULL
;
7372 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7373 tp
->hw_stats
, tp
->stats_mapping
);
7374 tp
->hw_stats
= NULL
;
7379 * Must not be invoked with interrupt sources disabled and
7380 * the hardware shutdown down. Can sleep.
7382 static int tg3_alloc_consistent(struct tg3
*tp
)
7386 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7387 sizeof(struct tg3_hw_stats
),
7393 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7395 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7396 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7397 struct tg3_hw_status
*sblk
;
7399 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7401 &tnapi
->status_mapping
,
7403 if (!tnapi
->hw_status
)
7406 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7407 sblk
= tnapi
->hw_status
;
7409 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7412 /* If multivector TSS is enabled, vector 0 does not handle
7413 * tx interrupts. Don't allocate any resources for it.
7415 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7416 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7417 tnapi
->tx_buffers
= kzalloc(
7418 sizeof(struct tg3_tx_ring_info
) *
7419 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7420 if (!tnapi
->tx_buffers
)
7423 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7425 &tnapi
->tx_desc_mapping
,
7427 if (!tnapi
->tx_ring
)
7432 * When RSS is enabled, the status block format changes
7433 * slightly. The "rx_jumbo_consumer", "reserved",
7434 * and "rx_mini_consumer" members get mapped to the
7435 * other three rx return ring producer indexes.
7439 if (tg3_flag(tp
, ENABLE_RSS
)) {
7440 tnapi
->rx_rcb_prod_idx
= NULL
;
7445 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7448 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7451 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7454 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7459 * If multivector RSS is enabled, vector 0 does not handle
7460 * rx or tx interrupts. Don't allocate any resources for it.
7462 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7465 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7466 TG3_RX_RCB_RING_BYTES(tp
),
7467 &tnapi
->rx_rcb_mapping
,
7472 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7478 tg3_free_consistent(tp
);
7482 #define MAX_WAIT_CNT 1000
7484 /* To stop a block, clear the enable bit and poll till it
7485 * clears. tp->lock is held.
7487 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7492 if (tg3_flag(tp
, 5705_PLUS
)) {
7499 /* We can't enable/disable these bits of the
7500 * 5705/5750, just say success.
7513 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7516 if ((val
& enable_bit
) == 0)
7520 if (i
== MAX_WAIT_CNT
&& !silent
) {
7521 dev_err(&tp
->pdev
->dev
,
7522 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7530 /* tp->lock is held. */
7531 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7535 tg3_disable_ints(tp
);
7537 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7538 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7541 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7542 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7543 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7544 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7545 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7546 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7548 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7549 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7550 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7551 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7552 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7553 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7554 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7556 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7557 tw32_f(MAC_MODE
, tp
->mac_mode
);
7560 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7561 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7563 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7565 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7568 if (i
>= MAX_WAIT_CNT
) {
7569 dev_err(&tp
->pdev
->dev
,
7570 "%s timed out, TX_MODE_ENABLE will not clear "
7571 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7575 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7576 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7577 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7579 tw32(FTQ_RESET
, 0xffffffff);
7580 tw32(FTQ_RESET
, 0x00000000);
7582 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7583 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7585 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7586 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7587 if (tnapi
->hw_status
)
7588 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7594 /* Save PCI command register before chip reset */
7595 static void tg3_save_pci_state(struct tg3
*tp
)
7597 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7600 /* Restore PCI state after chip reset */
7601 static void tg3_restore_pci_state(struct tg3
*tp
)
7605 /* Re-enable indirect register accesses. */
7606 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7607 tp
->misc_host_ctrl
);
7609 /* Set MAX PCI retry to zero. */
7610 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7611 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7612 tg3_flag(tp
, PCIX_MODE
))
7613 val
|= PCISTATE_RETRY_SAME_DMA
;
7614 /* Allow reads and writes to the APE register and memory space. */
7615 if (tg3_flag(tp
, ENABLE_APE
))
7616 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7617 PCISTATE_ALLOW_APE_SHMEM_WR
|
7618 PCISTATE_ALLOW_APE_PSPACE_WR
;
7619 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7621 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7623 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
7624 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7625 tp
->pci_cacheline_sz
);
7626 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7630 /* Make sure PCI-X relaxed ordering bit is clear. */
7631 if (tg3_flag(tp
, PCIX_MODE
)) {
7634 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7636 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7637 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7641 if (tg3_flag(tp
, 5780_CLASS
)) {
7643 /* Chip reset on 5780 will reset MSI enable bit,
7644 * so need to restore it.
7646 if (tg3_flag(tp
, USING_MSI
)) {
7649 pci_read_config_word(tp
->pdev
,
7650 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7652 pci_write_config_word(tp
->pdev
,
7653 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7654 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7655 val
= tr32(MSGINT_MODE
);
7656 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7661 /* tp->lock is held. */
7662 static int tg3_chip_reset(struct tg3
*tp
)
7665 void (*write_op
)(struct tg3
*, u32
, u32
);
7670 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7672 /* No matching tg3_nvram_unlock() after this because
7673 * chip reset below will undo the nvram lock.
7675 tp
->nvram_lock_cnt
= 0;
7677 /* GRC_MISC_CFG core clock reset will clear the memory
7678 * enable bit in PCI register 4 and the MSI enable bit
7679 * on some chips, so we save relevant registers here.
7681 tg3_save_pci_state(tp
);
7683 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7684 tg3_flag(tp
, 5755_PLUS
))
7685 tw32(GRC_FASTBOOT_PC
, 0);
7688 * We must avoid the readl() that normally takes place.
7689 * It locks machines, causes machine checks, and other
7690 * fun things. So, temporarily disable the 5701
7691 * hardware workaround, while we do the reset.
7693 write_op
= tp
->write32
;
7694 if (write_op
== tg3_write_flush_reg32
)
7695 tp
->write32
= tg3_write32
;
7697 /* Prevent the irq handler from reading or writing PCI registers
7698 * during chip reset when the memory enable bit in the PCI command
7699 * register may be cleared. The chip does not generate interrupt
7700 * at this time, but the irq handler may still be called due to irq
7701 * sharing or irqpoll.
7703 tg3_flag_set(tp
, CHIP_RESETTING
);
7704 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7705 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7706 if (tnapi
->hw_status
) {
7707 tnapi
->hw_status
->status
= 0;
7708 tnapi
->hw_status
->status_tag
= 0;
7710 tnapi
->last_tag
= 0;
7711 tnapi
->last_irq_tag
= 0;
7715 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7716 synchronize_irq(tp
->napi
[i
].irq_vec
);
7718 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7719 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7720 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7724 val
= GRC_MISC_CFG_CORECLK_RESET
;
7726 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7727 /* Force PCIe 1.0a mode */
7728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7729 !tg3_flag(tp
, 57765_PLUS
) &&
7730 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7731 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7732 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7734 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7735 tw32(GRC_MISC_CFG
, (1 << 29));
7740 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7741 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7742 tw32(GRC_VCPU_EXT_CTRL
,
7743 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7746 /* Manage gphy power for all CPMU absent PCIe devices. */
7747 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7748 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7750 tw32(GRC_MISC_CFG
, val
);
7752 /* restore 5701 hardware bug workaround write method */
7753 tp
->write32
= write_op
;
7755 /* Unfortunately, we have to delay before the PCI read back.
7756 * Some 575X chips even will not respond to a PCI cfg access
7757 * when the reset command is given to the chip.
7759 * How do these hardware designers expect things to work
7760 * properly if the PCI write is posted for a long period
7761 * of time? It is always necessary to have some method by
7762 * which a register read back can occur to push the write
7763 * out which does the reset.
7765 * For most tg3 variants the trick below was working.
7770 /* Flush PCI posted writes. The normal MMIO registers
7771 * are inaccessible at this time so this is the only
7772 * way to make this reliably (actually, this is no longer
7773 * the case, see above). I tried to use indirect
7774 * register read/write but this upset some 5701 variants.
7776 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7780 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7783 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7787 /* Wait for link training to complete. */
7788 for (i
= 0; i
< 5000; i
++)
7791 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7792 pci_write_config_dword(tp
->pdev
, 0xc4,
7793 cfg_val
| (1 << 15));
7796 /* Clear the "no snoop" and "relaxed ordering" bits. */
7797 pci_read_config_word(tp
->pdev
,
7798 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7800 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7801 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7803 * Older PCIe devices only support the 128 byte
7804 * MPS setting. Enforce the restriction.
7806 if (!tg3_flag(tp
, CPMU_PRESENT
))
7807 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7808 pci_write_config_word(tp
->pdev
,
7809 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7812 /* Clear error status */
7813 pci_write_config_word(tp
->pdev
,
7814 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7815 PCI_EXP_DEVSTA_CED
|
7816 PCI_EXP_DEVSTA_NFED
|
7817 PCI_EXP_DEVSTA_FED
|
7818 PCI_EXP_DEVSTA_URD
);
7821 tg3_restore_pci_state(tp
);
7823 tg3_flag_clear(tp
, CHIP_RESETTING
);
7824 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7827 if (tg3_flag(tp
, 5780_CLASS
))
7828 val
= tr32(MEMARB_MODE
);
7829 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7831 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7833 tw32(0x5000, 0x400);
7836 tw32(GRC_MODE
, tp
->grc_mode
);
7838 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7841 tw32(0xc4, val
| (1 << 15));
7844 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7845 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7846 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7847 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7848 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7849 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7852 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7853 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7855 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7856 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7861 tw32_f(MAC_MODE
, val
);
7864 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7866 err
= tg3_poll_fw(tp
);
7872 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7873 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7874 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7875 !tg3_flag(tp
, 57765_PLUS
)) {
7878 tw32(0x7c00, val
| (1 << 25));
7881 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7882 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7883 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7886 /* Reprobe ASF enable state. */
7887 tg3_flag_clear(tp
, ENABLE_ASF
);
7888 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7889 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7890 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7893 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7894 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7895 tg3_flag_set(tp
, ENABLE_ASF
);
7896 tp
->last_event_jiffies
= jiffies
;
7897 if (tg3_flag(tp
, 5750_PLUS
))
7898 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7905 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
7906 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
7908 /* tp->lock is held. */
7909 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7915 tg3_write_sig_pre_reset(tp
, kind
);
7917 tg3_abort_hw(tp
, silent
);
7918 err
= tg3_chip_reset(tp
);
7920 __tg3_set_mac_addr(tp
, 0);
7922 tg3_write_sig_legacy(tp
, kind
);
7923 tg3_write_sig_post_reset(tp
, kind
);
7926 /* Save the stats across chip resets... */
7927 tg3_get_nstats(tp
, &tp
->net_stats_prev
),
7928 tg3_get_estats(tp
, &tp
->estats_prev
);
7930 /* And make sure the next sample is new data */
7931 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7940 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7942 struct tg3
*tp
= netdev_priv(dev
);
7943 struct sockaddr
*addr
= p
;
7944 int err
= 0, skip_mac_1
= 0;
7946 if (!is_valid_ether_addr(addr
->sa_data
))
7949 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7951 if (!netif_running(dev
))
7954 if (tg3_flag(tp
, ENABLE_ASF
)) {
7955 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7957 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7958 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7959 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7960 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7962 /* Skip MAC addr 1 if ASF is using it. */
7963 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7964 !(addr1_high
== 0 && addr1_low
== 0))
7967 spin_lock_bh(&tp
->lock
);
7968 __tg3_set_mac_addr(tp
, skip_mac_1
);
7969 spin_unlock_bh(&tp
->lock
);
7974 /* tp->lock is held. */
7975 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7976 dma_addr_t mapping
, u32 maxlen_flags
,
7980 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7981 ((u64
) mapping
>> 32));
7983 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7984 ((u64
) mapping
& 0xffffffff));
7986 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7989 if (!tg3_flag(tp
, 5705_PLUS
))
7991 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7995 static void __tg3_set_rx_mode(struct net_device
*);
7996 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8000 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8001 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8002 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8003 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8005 tw32(HOSTCC_TXCOL_TICKS
, 0);
8006 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8007 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8010 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8011 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8012 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8013 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8015 tw32(HOSTCC_RXCOL_TICKS
, 0);
8016 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8017 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8020 if (!tg3_flag(tp
, 5705_PLUS
)) {
8021 u32 val
= ec
->stats_block_coalesce_usecs
;
8023 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8024 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8026 if (!netif_carrier_ok(tp
->dev
))
8029 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8032 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8035 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8036 tw32(reg
, ec
->rx_coalesce_usecs
);
8037 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8038 tw32(reg
, ec
->rx_max_coalesced_frames
);
8039 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8040 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8042 if (tg3_flag(tp
, ENABLE_TSS
)) {
8043 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8044 tw32(reg
, ec
->tx_coalesce_usecs
);
8045 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8046 tw32(reg
, ec
->tx_max_coalesced_frames
);
8047 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8048 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8052 for (; i
< tp
->irq_max
- 1; i
++) {
8053 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8054 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8055 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8057 if (tg3_flag(tp
, ENABLE_TSS
)) {
8058 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8059 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8060 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8065 /* tp->lock is held. */
8066 static void tg3_rings_reset(struct tg3
*tp
)
8069 u32 stblk
, txrcb
, rxrcb
, limit
;
8070 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8072 /* Disable all transmit rings but the first. */
8073 if (!tg3_flag(tp
, 5705_PLUS
))
8074 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8075 else if (tg3_flag(tp
, 5717_PLUS
))
8076 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8077 else if (tg3_flag(tp
, 57765_CLASS
))
8078 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8080 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8082 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8083 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8084 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8085 BDINFO_FLAGS_DISABLED
);
8088 /* Disable all receive return rings but the first. */
8089 if (tg3_flag(tp
, 5717_PLUS
))
8090 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8091 else if (!tg3_flag(tp
, 5705_PLUS
))
8092 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8093 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8094 tg3_flag(tp
, 57765_CLASS
))
8095 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8097 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8099 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8100 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8101 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8102 BDINFO_FLAGS_DISABLED
);
8104 /* Disable interrupts */
8105 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8106 tp
->napi
[0].chk_msi_cnt
= 0;
8107 tp
->napi
[0].last_rx_cons
= 0;
8108 tp
->napi
[0].last_tx_cons
= 0;
8110 /* Zero mailbox registers. */
8111 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8112 for (i
= 1; i
< tp
->irq_max
; i
++) {
8113 tp
->napi
[i
].tx_prod
= 0;
8114 tp
->napi
[i
].tx_cons
= 0;
8115 if (tg3_flag(tp
, ENABLE_TSS
))
8116 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8117 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8118 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8119 tp
->napi
[i
].chk_msi_cnt
= 0;
8120 tp
->napi
[i
].last_rx_cons
= 0;
8121 tp
->napi
[i
].last_tx_cons
= 0;
8123 if (!tg3_flag(tp
, ENABLE_TSS
))
8124 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8126 tp
->napi
[0].tx_prod
= 0;
8127 tp
->napi
[0].tx_cons
= 0;
8128 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8129 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8132 /* Make sure the NIC-based send BD rings are disabled. */
8133 if (!tg3_flag(tp
, 5705_PLUS
)) {
8134 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8135 for (i
= 0; i
< 16; i
++)
8136 tw32_tx_mbox(mbox
+ i
* 8, 0);
8139 txrcb
= NIC_SRAM_SEND_RCB
;
8140 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8142 /* Clear status block in ram. */
8143 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8145 /* Set status block DMA address */
8146 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8147 ((u64
) tnapi
->status_mapping
>> 32));
8148 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8149 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8151 if (tnapi
->tx_ring
) {
8152 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8153 (TG3_TX_RING_SIZE
<<
8154 BDINFO_FLAGS_MAXLEN_SHIFT
),
8155 NIC_SRAM_TX_BUFFER_DESC
);
8156 txrcb
+= TG3_BDINFO_SIZE
;
8159 if (tnapi
->rx_rcb
) {
8160 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8161 (tp
->rx_ret_ring_mask
+ 1) <<
8162 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8163 rxrcb
+= TG3_BDINFO_SIZE
;
8166 stblk
= HOSTCC_STATBLCK_RING1
;
8168 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8169 u64 mapping
= (u64
)tnapi
->status_mapping
;
8170 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8171 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8173 /* Clear status block in ram. */
8174 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8176 if (tnapi
->tx_ring
) {
8177 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8178 (TG3_TX_RING_SIZE
<<
8179 BDINFO_FLAGS_MAXLEN_SHIFT
),
8180 NIC_SRAM_TX_BUFFER_DESC
);
8181 txrcb
+= TG3_BDINFO_SIZE
;
8184 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8185 ((tp
->rx_ret_ring_mask
+ 1) <<
8186 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8189 rxrcb
+= TG3_BDINFO_SIZE
;
8193 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8195 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8197 if (!tg3_flag(tp
, 5750_PLUS
) ||
8198 tg3_flag(tp
, 5780_CLASS
) ||
8199 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8200 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8201 tg3_flag(tp
, 57765_PLUS
))
8202 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8203 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8204 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8205 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8207 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8209 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8210 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8212 val
= min(nic_rep_thresh
, host_rep_thresh
);
8213 tw32(RCVBDI_STD_THRESH
, val
);
8215 if (tg3_flag(tp
, 57765_PLUS
))
8216 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8218 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8221 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8223 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8225 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8226 tw32(RCVBDI_JUMBO_THRESH
, val
);
8228 if (tg3_flag(tp
, 57765_PLUS
))
8229 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8232 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
)
8236 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
8237 tp
->rss_ind_tbl
[i
] =
8238 ethtool_rxfh_indir_default(i
, tp
->irq_cnt
- 1);
8241 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
8245 if (!tg3_flag(tp
, SUPPORT_MSIX
))
8248 if (tp
->irq_cnt
<= 2) {
8249 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
8253 /* Validate table against current IRQ count */
8254 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8255 if (tp
->rss_ind_tbl
[i
] >= tp
->irq_cnt
- 1)
8259 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
8260 tg3_rss_init_dflt_indir_tbl(tp
);
8263 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
8266 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8268 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8269 u32 val
= tp
->rss_ind_tbl
[i
];
8271 for (; i
% 8; i
++) {
8273 val
|= tp
->rss_ind_tbl
[i
];
8280 /* tp->lock is held. */
8281 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8283 u32 val
, rdmac_mode
;
8285 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8287 tg3_disable_ints(tp
);
8291 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8293 if (tg3_flag(tp
, INIT_COMPLETE
))
8294 tg3_abort_hw(tp
, 1);
8296 /* Enable MAC control of LPI */
8297 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8298 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8299 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8300 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8302 tw32_f(TG3_CPMU_EEE_CTRL
,
8303 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8305 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8306 TG3_CPMU_EEEMD_LPI_IN_TX
|
8307 TG3_CPMU_EEEMD_LPI_IN_RX
|
8308 TG3_CPMU_EEEMD_EEE_ENABLE
;
8310 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8311 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8313 if (tg3_flag(tp
, ENABLE_APE
))
8314 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8316 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8318 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8319 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8320 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8322 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8323 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8324 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8330 err
= tg3_chip_reset(tp
);
8334 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8336 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8337 val
= tr32(TG3_CPMU_CTRL
);
8338 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8339 tw32(TG3_CPMU_CTRL
, val
);
8341 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8342 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8343 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8344 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8346 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8347 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8348 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8349 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8351 val
= tr32(TG3_CPMU_HST_ACC
);
8352 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8353 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8354 tw32(TG3_CPMU_HST_ACC
, val
);
8357 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8358 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8359 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8360 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8361 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8363 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8364 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8366 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8368 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8369 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8372 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8373 u32 grc_mode
= tr32(GRC_MODE
);
8375 /* Access the lower 1K of PL PCIE block registers. */
8376 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8377 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8379 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8380 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8381 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8383 tw32(GRC_MODE
, grc_mode
);
8386 if (tg3_flag(tp
, 57765_CLASS
)) {
8387 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8388 u32 grc_mode
= tr32(GRC_MODE
);
8390 /* Access the lower 1K of PL PCIE block registers. */
8391 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8392 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8394 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8395 TG3_PCIE_PL_LO_PHYCTL5
);
8396 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8397 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8399 tw32(GRC_MODE
, grc_mode
);
8402 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8403 u32 grc_mode
= tr32(GRC_MODE
);
8405 /* Access the lower 1K of DL PCIE block registers. */
8406 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8407 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8409 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8410 TG3_PCIE_DL_LO_FTSMAX
);
8411 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8412 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8413 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8415 tw32(GRC_MODE
, grc_mode
);
8418 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8419 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8420 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8421 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8424 /* This works around an issue with Athlon chipsets on
8425 * B3 tigon3 silicon. This bit has no effect on any
8426 * other revision. But do not set this on PCI Express
8427 * chips and don't even touch the clocks if the CPMU is present.
8429 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8430 if (!tg3_flag(tp
, PCI_EXPRESS
))
8431 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8432 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8435 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8436 tg3_flag(tp
, PCIX_MODE
)) {
8437 val
= tr32(TG3PCI_PCISTATE
);
8438 val
|= PCISTATE_RETRY_SAME_DMA
;
8439 tw32(TG3PCI_PCISTATE
, val
);
8442 if (tg3_flag(tp
, ENABLE_APE
)) {
8443 /* Allow reads and writes to the
8444 * APE register and memory space.
8446 val
= tr32(TG3PCI_PCISTATE
);
8447 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8448 PCISTATE_ALLOW_APE_SHMEM_WR
|
8449 PCISTATE_ALLOW_APE_PSPACE_WR
;
8450 tw32(TG3PCI_PCISTATE
, val
);
8453 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8454 /* Enable some hw fixes. */
8455 val
= tr32(TG3PCI_MSI_DATA
);
8456 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8457 tw32(TG3PCI_MSI_DATA
, val
);
8460 /* Descriptor ring init may make accesses to the
8461 * NIC SRAM area to setup the TX descriptors, so we
8462 * can only do this after the hardware has been
8463 * successfully reset.
8465 err
= tg3_init_rings(tp
);
8469 if (tg3_flag(tp
, 57765_PLUS
)) {
8470 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8471 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8472 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8473 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8474 if (!tg3_flag(tp
, 57765_CLASS
) &&
8475 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8476 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8477 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8478 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8479 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8480 /* This value is determined during the probe time DMA
8481 * engine test, tg3_test_dma.
8483 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8486 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8487 GRC_MODE_4X_NIC_SEND_RINGS
|
8488 GRC_MODE_NO_TX_PHDR_CSUM
|
8489 GRC_MODE_NO_RX_PHDR_CSUM
);
8490 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8492 /* Pseudo-header checksum is done by hardware logic and not
8493 * the offload processers, so make the chip do the pseudo-
8494 * header checksums on receive. For transmit it is more
8495 * convenient to do the pseudo-header checksum in software
8496 * as Linux does that on transmit for us in all cases.
8498 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8502 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8504 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8505 val
= tr32(GRC_MISC_CFG
);
8507 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8508 tw32(GRC_MISC_CFG
, val
);
8510 /* Initialize MBUF/DESC pool. */
8511 if (tg3_flag(tp
, 5750_PLUS
)) {
8513 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8514 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8515 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8516 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8518 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8519 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8520 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8521 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8524 fw_len
= tp
->fw_len
;
8525 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8526 tw32(BUFMGR_MB_POOL_ADDR
,
8527 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8528 tw32(BUFMGR_MB_POOL_SIZE
,
8529 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8532 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8533 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8534 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8535 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8536 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8537 tw32(BUFMGR_MB_HIGH_WATER
,
8538 tp
->bufmgr_config
.mbuf_high_water
);
8540 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8541 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8542 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8543 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8544 tw32(BUFMGR_MB_HIGH_WATER
,
8545 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8547 tw32(BUFMGR_DMA_LOW_WATER
,
8548 tp
->bufmgr_config
.dma_low_water
);
8549 tw32(BUFMGR_DMA_HIGH_WATER
,
8550 tp
->bufmgr_config
.dma_high_water
);
8552 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8553 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8554 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8555 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8556 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8557 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8558 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8559 tw32(BUFMGR_MODE
, val
);
8560 for (i
= 0; i
< 2000; i
++) {
8561 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8566 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8570 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8571 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8573 tg3_setup_rxbd_thresholds(tp
);
8575 /* Initialize TG3_BDINFO's at:
8576 * RCVDBDI_STD_BD: standard eth size rx ring
8577 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8578 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8581 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8582 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8583 * ring attribute flags
8584 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8586 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8587 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8589 * The size of each ring is fixed in the firmware, but the location is
8592 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8593 ((u64
) tpr
->rx_std_mapping
>> 32));
8594 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8595 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8596 if (!tg3_flag(tp
, 5717_PLUS
))
8597 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8598 NIC_SRAM_RX_BUFFER_DESC
);
8600 /* Disable the mini ring */
8601 if (!tg3_flag(tp
, 5705_PLUS
))
8602 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8603 BDINFO_FLAGS_DISABLED
);
8605 /* Program the jumbo buffer descriptor ring control
8606 * blocks on those devices that have them.
8608 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8609 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8611 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8612 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8613 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8614 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8615 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8616 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8617 BDINFO_FLAGS_MAXLEN_SHIFT
;
8618 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8619 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8620 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8621 tg3_flag(tp
, 57765_CLASS
))
8622 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8623 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8625 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8626 BDINFO_FLAGS_DISABLED
);
8629 if (tg3_flag(tp
, 57765_PLUS
)) {
8630 val
= TG3_RX_STD_RING_SIZE(tp
);
8631 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8632 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8634 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8636 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8638 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8640 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8641 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8643 tpr
->rx_jmb_prod_idx
=
8644 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8645 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8647 tg3_rings_reset(tp
);
8649 /* Initialize MAC address and backoff seed. */
8650 __tg3_set_mac_addr(tp
, 0);
8652 /* MTU + ethernet header + FCS + optional VLAN tag */
8653 tw32(MAC_RX_MTU_SIZE
,
8654 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8656 /* The slot time is changed by tg3_setup_phy if we
8657 * run at gigabit with half duplex.
8659 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8660 (6 << TX_LENGTHS_IPG_SHIFT
) |
8661 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8663 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8664 val
|= tr32(MAC_TX_LENGTHS
) &
8665 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8666 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8668 tw32(MAC_TX_LENGTHS
, val
);
8670 /* Receive rules. */
8671 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8672 tw32(RCVLPC_CONFIG
, 0x0181);
8674 /* Calculate RDMAC_MODE setting early, we need it to determine
8675 * the RCVLPC_STATE_ENABLE mask.
8677 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8678 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8679 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8680 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8681 RDMAC_MODE_LNGREAD_ENAB
);
8683 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8684 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8686 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8687 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8688 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8689 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8690 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8691 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8694 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8695 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8696 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8697 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8698 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8699 !tg3_flag(tp
, IS_5788
)) {
8700 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8704 if (tg3_flag(tp
, PCI_EXPRESS
))
8705 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8707 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
8708 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
8710 if (tg3_flag(tp
, HW_TSO_1
) ||
8711 tg3_flag(tp
, HW_TSO_2
) ||
8712 tg3_flag(tp
, HW_TSO_3
))
8713 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8715 if (tg3_flag(tp
, 57765_PLUS
) ||
8716 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8717 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8718 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8720 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8721 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8723 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8724 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8725 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8726 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8727 tg3_flag(tp
, 57765_PLUS
)) {
8728 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8729 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8730 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8731 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8732 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8733 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8734 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8735 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8736 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8738 tw32(TG3_RDMA_RSRVCTRL_REG
,
8739 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8742 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8743 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8744 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8745 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8746 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8747 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8750 /* Receive/send statistics. */
8751 if (tg3_flag(tp
, 5750_PLUS
)) {
8752 val
= tr32(RCVLPC_STATS_ENABLE
);
8753 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8754 tw32(RCVLPC_STATS_ENABLE
, val
);
8755 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8756 tg3_flag(tp
, TSO_CAPABLE
)) {
8757 val
= tr32(RCVLPC_STATS_ENABLE
);
8758 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8759 tw32(RCVLPC_STATS_ENABLE
, val
);
8761 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8763 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8764 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8765 tw32(SNDDATAI_STATSCTRL
,
8766 (SNDDATAI_SCTRL_ENABLE
|
8767 SNDDATAI_SCTRL_FASTUPD
));
8769 /* Setup host coalescing engine. */
8770 tw32(HOSTCC_MODE
, 0);
8771 for (i
= 0; i
< 2000; i
++) {
8772 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8777 __tg3_set_coalesce(tp
, &tp
->coal
);
8779 if (!tg3_flag(tp
, 5705_PLUS
)) {
8780 /* Status/statistics block address. See tg3_timer,
8781 * the tg3_periodic_fetch_stats call there, and
8782 * tg3_get_stats to see how this works for 5705/5750 chips.
8784 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8785 ((u64
) tp
->stats_mapping
>> 32));
8786 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8787 ((u64
) tp
->stats_mapping
& 0xffffffff));
8788 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8790 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8792 /* Clear statistics and status block memory areas */
8793 for (i
= NIC_SRAM_STATS_BLK
;
8794 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8796 tg3_write_mem(tp
, i
, 0);
8801 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8803 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8804 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8805 if (!tg3_flag(tp
, 5705_PLUS
))
8806 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8808 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8809 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8810 /* reset to prevent losing 1st rx packet intermittently */
8811 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8815 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8816 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8817 MAC_MODE_FHDE_ENABLE
;
8818 if (tg3_flag(tp
, ENABLE_APE
))
8819 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8820 if (!tg3_flag(tp
, 5705_PLUS
) &&
8821 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8822 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8823 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8824 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8827 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8828 * If TG3_FLAG_IS_NIC is zero, we should read the
8829 * register to preserve the GPIO settings for LOMs. The GPIOs,
8830 * whether used as inputs or outputs, are set by boot code after
8833 if (!tg3_flag(tp
, IS_NIC
)) {
8836 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8837 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8838 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8840 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8841 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8842 GRC_LCLCTRL_GPIO_OUTPUT3
;
8844 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8845 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8847 tp
->grc_local_ctrl
&= ~gpio_mask
;
8848 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8850 /* GPIO1 must be driven high for eeprom write protect */
8851 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8852 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8853 GRC_LCLCTRL_GPIO_OUTPUT1
);
8855 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8858 if (tg3_flag(tp
, USING_MSIX
)) {
8859 val
= tr32(MSGINT_MODE
);
8860 val
|= MSGINT_MODE_ENABLE
;
8861 if (tp
->irq_cnt
> 1)
8862 val
|= MSGINT_MODE_MULTIVEC_EN
;
8863 if (!tg3_flag(tp
, 1SHOT_MSI
))
8864 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
8865 tw32(MSGINT_MODE
, val
);
8868 if (!tg3_flag(tp
, 5705_PLUS
)) {
8869 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8873 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8874 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8875 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8876 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8877 WDMAC_MODE_LNGREAD_ENAB
);
8879 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8880 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8881 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8882 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8883 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8885 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8886 !tg3_flag(tp
, IS_5788
)) {
8887 val
|= WDMAC_MODE_RX_ACCEL
;
8891 /* Enable host coalescing bug fix */
8892 if (tg3_flag(tp
, 5755_PLUS
))
8893 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8895 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8896 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8898 tw32_f(WDMAC_MODE
, val
);
8901 if (tg3_flag(tp
, PCIX_MODE
)) {
8904 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8906 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8907 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8908 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8909 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8910 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8911 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8913 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8917 tw32_f(RDMAC_MODE
, rdmac_mode
);
8920 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8921 if (!tg3_flag(tp
, 5705_PLUS
))
8922 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8924 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8926 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8928 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8930 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8931 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8932 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8933 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8934 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8935 tw32(RCVDBDI_MODE
, val
);
8936 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8937 if (tg3_flag(tp
, HW_TSO_1
) ||
8938 tg3_flag(tp
, HW_TSO_2
) ||
8939 tg3_flag(tp
, HW_TSO_3
))
8940 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8941 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8942 if (tg3_flag(tp
, ENABLE_TSS
))
8943 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8944 tw32(SNDBDI_MODE
, val
);
8945 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8947 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8948 err
= tg3_load_5701_a0_firmware_fix(tp
);
8953 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8954 err
= tg3_load_tso_firmware(tp
);
8959 tp
->tx_mode
= TX_MODE_ENABLE
;
8961 if (tg3_flag(tp
, 5755_PLUS
) ||
8962 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8963 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8965 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8966 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8967 tp
->tx_mode
&= ~val
;
8968 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8971 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8974 if (tg3_flag(tp
, ENABLE_RSS
)) {
8975 tg3_rss_write_indir_tbl(tp
);
8977 /* Setup the "secret" hash key. */
8978 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8979 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8980 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8981 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8982 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8983 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8984 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8985 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8986 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8987 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8990 tp
->rx_mode
= RX_MODE_ENABLE
;
8991 if (tg3_flag(tp
, 5755_PLUS
))
8992 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8994 if (tg3_flag(tp
, ENABLE_RSS
))
8995 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8996 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8997 RX_MODE_RSS_IPV6_HASH_EN
|
8998 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8999 RX_MODE_RSS_IPV4_HASH_EN
|
9000 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9002 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9005 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9007 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9008 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9009 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9012 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9015 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9016 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9017 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9018 /* Set drive transmission level to 1.2V */
9019 /* only if the signal pre-emphasis bit is not set */
9020 val
= tr32(MAC_SERDES_CFG
);
9023 tw32(MAC_SERDES_CFG
, val
);
9025 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9026 tw32(MAC_SERDES_CFG
, 0x616000);
9029 /* Prevent chip from dropping frames when flow control
9032 if (tg3_flag(tp
, 57765_CLASS
))
9036 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9038 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9039 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9040 /* Use hardware link auto-negotiation */
9041 tg3_flag_set(tp
, HW_AUTONEG
);
9044 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9045 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9048 tmp
= tr32(SERDES_RX_CTRL
);
9049 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9050 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9051 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9052 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9055 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9056 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
9057 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9058 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
9059 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
9060 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
9063 err
= tg3_setup_phy(tp
, 0);
9067 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9068 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9071 /* Clear CRC stats. */
9072 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9073 tg3_writephy(tp
, MII_TG3_TEST1
,
9074 tmp
| MII_TG3_TEST1_CRC_EN
);
9075 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9080 __tg3_set_rx_mode(tp
->dev
);
9082 /* Initialize receive rules. */
9083 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9084 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9085 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9086 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9088 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9092 if (tg3_flag(tp
, ENABLE_ASF
))
9096 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9098 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9100 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9102 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9104 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9106 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9108 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9110 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9112 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9114 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9116 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9118 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9120 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9122 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9130 if (tg3_flag(tp
, ENABLE_APE
))
9131 /* Write our heartbeat update interval to APE. */
9132 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9133 APE_HOST_HEARTBEAT_INT_DISABLE
);
9135 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9140 /* Called at device open time to get the chip ready for
9141 * packet processing. Invoked with tp->lock held.
9143 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9145 tg3_switch_clocks(tp
);
9147 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9149 return tg3_reset_hw(tp
, reset_phy
);
9152 #define TG3_STAT_ADD32(PSTAT, REG) \
9153 do { u32 __val = tr32(REG); \
9154 (PSTAT)->low += __val; \
9155 if ((PSTAT)->low < __val) \
9156 (PSTAT)->high += 1; \
9159 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9161 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9163 if (!netif_carrier_ok(tp
->dev
))
9166 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9167 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9168 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9169 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9170 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9171 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9172 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9173 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9174 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9175 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9176 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9177 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9178 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9180 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9181 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9182 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9183 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9184 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9185 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9186 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9187 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9188 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9189 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9190 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9191 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9192 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9193 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9195 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9196 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9197 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9198 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9199 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9201 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9202 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9204 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9205 sp
->rx_discards
.low
+= val
;
9206 if (sp
->rx_discards
.low
< val
)
9207 sp
->rx_discards
.high
+= 1;
9209 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9211 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9214 static void tg3_chk_missed_msi(struct tg3
*tp
)
9218 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9219 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9221 if (tg3_has_work(tnapi
)) {
9222 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9223 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9224 if (tnapi
->chk_msi_cnt
< 1) {
9225 tnapi
->chk_msi_cnt
++;
9231 tnapi
->chk_msi_cnt
= 0;
9232 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9233 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9237 static void tg3_timer(unsigned long __opaque
)
9239 struct tg3
*tp
= (struct tg3
*) __opaque
;
9241 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9244 spin_lock(&tp
->lock
);
9246 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9247 tg3_flag(tp
, 57765_CLASS
))
9248 tg3_chk_missed_msi(tp
);
9250 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9251 /* All of this garbage is because when using non-tagged
9252 * IRQ status the mailbox/status_block protocol the chip
9253 * uses with the cpu is race prone.
9255 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9256 tw32(GRC_LOCAL_CTRL
,
9257 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9259 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9260 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9263 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9264 spin_unlock(&tp
->lock
);
9265 tg3_reset_task_schedule(tp
);
9270 /* This part only runs once per second. */
9271 if (!--tp
->timer_counter
) {
9272 if (tg3_flag(tp
, 5705_PLUS
))
9273 tg3_periodic_fetch_stats(tp
);
9275 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9276 tg3_phy_eee_enable(tp
);
9278 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9282 mac_stat
= tr32(MAC_STATUS
);
9285 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9286 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9288 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9292 tg3_setup_phy(tp
, 0);
9293 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9294 u32 mac_stat
= tr32(MAC_STATUS
);
9297 if (netif_carrier_ok(tp
->dev
) &&
9298 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9301 if (!netif_carrier_ok(tp
->dev
) &&
9302 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9303 MAC_STATUS_SIGNAL_DET
))) {
9307 if (!tp
->serdes_counter
) {
9310 ~MAC_MODE_PORT_MODE_MASK
));
9312 tw32_f(MAC_MODE
, tp
->mac_mode
);
9315 tg3_setup_phy(tp
, 0);
9317 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9318 tg3_flag(tp
, 5780_CLASS
)) {
9319 tg3_serdes_parallel_detect(tp
);
9322 tp
->timer_counter
= tp
->timer_multiplier
;
9325 /* Heartbeat is only sent once every 2 seconds.
9327 * The heartbeat is to tell the ASF firmware that the host
9328 * driver is still alive. In the event that the OS crashes,
9329 * ASF needs to reset the hardware to free up the FIFO space
9330 * that may be filled with rx packets destined for the host.
9331 * If the FIFO is full, ASF will no longer function properly.
9333 * Unintended resets have been reported on real time kernels
9334 * where the timer doesn't run on time. Netpoll will also have
9337 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9338 * to check the ring condition when the heartbeat is expiring
9339 * before doing the reset. This will prevent most unintended
9342 if (!--tp
->asf_counter
) {
9343 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9344 tg3_wait_for_event_ack(tp
);
9346 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9347 FWCMD_NICDRV_ALIVE3
);
9348 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9349 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9350 TG3_FW_UPDATE_TIMEOUT_SEC
);
9352 tg3_generate_fw_event(tp
);
9354 tp
->asf_counter
= tp
->asf_multiplier
;
9357 spin_unlock(&tp
->lock
);
9360 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9361 add_timer(&tp
->timer
);
9364 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9367 unsigned long flags
;
9369 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9371 if (tp
->irq_cnt
== 1)
9372 name
= tp
->dev
->name
;
9374 name
= &tnapi
->irq_lbl
[0];
9375 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9376 name
[IFNAMSIZ
-1] = 0;
9379 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9381 if (tg3_flag(tp
, 1SHOT_MSI
))
9386 if (tg3_flag(tp
, TAGGED_STATUS
))
9387 fn
= tg3_interrupt_tagged
;
9388 flags
= IRQF_SHARED
;
9391 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9394 static int tg3_test_interrupt(struct tg3
*tp
)
9396 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9397 struct net_device
*dev
= tp
->dev
;
9398 int err
, i
, intr_ok
= 0;
9401 if (!netif_running(dev
))
9404 tg3_disable_ints(tp
);
9406 free_irq(tnapi
->irq_vec
, tnapi
);
9409 * Turn off MSI one shot mode. Otherwise this test has no
9410 * observable way to know whether the interrupt was delivered.
9412 if (tg3_flag(tp
, 57765_PLUS
)) {
9413 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9414 tw32(MSGINT_MODE
, val
);
9417 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9418 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9422 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9423 tg3_enable_ints(tp
);
9425 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9428 for (i
= 0; i
< 5; i
++) {
9429 u32 int_mbox
, misc_host_ctrl
;
9431 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9432 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9434 if ((int_mbox
!= 0) ||
9435 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9440 if (tg3_flag(tp
, 57765_PLUS
) &&
9441 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9442 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9447 tg3_disable_ints(tp
);
9449 free_irq(tnapi
->irq_vec
, tnapi
);
9451 err
= tg3_request_irq(tp
, 0);
9457 /* Reenable MSI one shot mode. */
9458 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
9459 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9460 tw32(MSGINT_MODE
, val
);
9468 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9469 * successfully restored
9471 static int tg3_test_msi(struct tg3
*tp
)
9476 if (!tg3_flag(tp
, USING_MSI
))
9479 /* Turn off SERR reporting in case MSI terminates with Master
9482 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9483 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9484 pci_cmd
& ~PCI_COMMAND_SERR
);
9486 err
= tg3_test_interrupt(tp
);
9488 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9493 /* other failures */
9497 /* MSI test failed, go back to INTx mode */
9498 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9499 "to INTx mode. Please report this failure to the PCI "
9500 "maintainer and include system chipset information\n");
9502 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9504 pci_disable_msi(tp
->pdev
);
9506 tg3_flag_clear(tp
, USING_MSI
);
9507 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9509 err
= tg3_request_irq(tp
, 0);
9513 /* Need to reset the chip because the MSI cycle may have terminated
9514 * with Master Abort.
9516 tg3_full_lock(tp
, 1);
9518 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9519 err
= tg3_init_hw(tp
, 1);
9521 tg3_full_unlock(tp
);
9524 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9529 static int tg3_request_firmware(struct tg3
*tp
)
9531 const __be32
*fw_data
;
9533 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9534 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9539 fw_data
= (void *)tp
->fw
->data
;
9541 /* Firmware blob starts with version numbers, followed by
9542 * start address and _full_ length including BSS sections
9543 * (which must be longer than the actual data, of course
9546 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9547 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9548 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9549 tp
->fw_len
, tp
->fw_needed
);
9550 release_firmware(tp
->fw
);
9555 /* We no longer need firmware; we have it. */
9556 tp
->fw_needed
= NULL
;
9560 static bool tg3_enable_msix(struct tg3
*tp
)
9563 struct msix_entry msix_ent
[tp
->irq_max
];
9565 tp
->irq_cnt
= num_online_cpus();
9566 if (tp
->irq_cnt
> 1) {
9567 /* We want as many rx rings enabled as there are cpus.
9568 * In multiqueue MSI-X mode, the first MSI-X vector
9569 * only deals with link interrupts, etc, so we add
9570 * one to the number of vectors we are requesting.
9572 tp
->irq_cnt
= min_t(unsigned, tp
->irq_cnt
+ 1, tp
->irq_max
);
9575 for (i
= 0; i
< tp
->irq_max
; i
++) {
9576 msix_ent
[i
].entry
= i
;
9577 msix_ent
[i
].vector
= 0;
9580 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9583 } else if (rc
!= 0) {
9584 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9586 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9591 for (i
= 0; i
< tp
->irq_max
; i
++)
9592 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9594 netif_set_real_num_tx_queues(tp
->dev
, 1);
9595 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9596 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9597 pci_disable_msix(tp
->pdev
);
9601 if (tp
->irq_cnt
> 1) {
9602 tg3_flag_set(tp
, ENABLE_RSS
);
9604 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9605 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9606 tg3_flag_set(tp
, ENABLE_TSS
);
9607 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9614 static void tg3_ints_init(struct tg3
*tp
)
9616 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9617 !tg3_flag(tp
, TAGGED_STATUS
)) {
9618 /* All MSI supporting chips should support tagged
9619 * status. Assert that this is the case.
9621 netdev_warn(tp
->dev
,
9622 "MSI without TAGGED_STATUS? Not using MSI\n");
9626 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9627 tg3_flag_set(tp
, USING_MSIX
);
9628 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9629 tg3_flag_set(tp
, USING_MSI
);
9631 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9632 u32 msi_mode
= tr32(MSGINT_MODE
);
9633 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9634 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9635 if (!tg3_flag(tp
, 1SHOT_MSI
))
9636 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9637 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9640 if (!tg3_flag(tp
, USING_MSIX
)) {
9642 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9643 netif_set_real_num_tx_queues(tp
->dev
, 1);
9644 netif_set_real_num_rx_queues(tp
->dev
, 1);
9648 static void tg3_ints_fini(struct tg3
*tp
)
9650 if (tg3_flag(tp
, USING_MSIX
))
9651 pci_disable_msix(tp
->pdev
);
9652 else if (tg3_flag(tp
, USING_MSI
))
9653 pci_disable_msi(tp
->pdev
);
9654 tg3_flag_clear(tp
, USING_MSI
);
9655 tg3_flag_clear(tp
, USING_MSIX
);
9656 tg3_flag_clear(tp
, ENABLE_RSS
);
9657 tg3_flag_clear(tp
, ENABLE_TSS
);
9660 static int tg3_open(struct net_device
*dev
)
9662 struct tg3
*tp
= netdev_priv(dev
);
9665 if (tp
->fw_needed
) {
9666 err
= tg3_request_firmware(tp
);
9667 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9671 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9672 tg3_flag_clear(tp
, TSO_CAPABLE
);
9673 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9674 netdev_notice(tp
->dev
, "TSO capability restored\n");
9675 tg3_flag_set(tp
, TSO_CAPABLE
);
9679 netif_carrier_off(tp
->dev
);
9681 err
= tg3_power_up(tp
);
9685 tg3_full_lock(tp
, 0);
9687 tg3_disable_ints(tp
);
9688 tg3_flag_clear(tp
, INIT_COMPLETE
);
9690 tg3_full_unlock(tp
);
9693 * Setup interrupts first so we know how
9694 * many NAPI resources to allocate
9698 tg3_rss_check_indir_tbl(tp
);
9700 /* The placement of this call is tied
9701 * to the setup and use of Host TX descriptors.
9703 err
= tg3_alloc_consistent(tp
);
9709 tg3_napi_enable(tp
);
9711 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9712 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9713 err
= tg3_request_irq(tp
, i
);
9715 for (i
--; i
>= 0; i
--) {
9716 tnapi
= &tp
->napi
[i
];
9717 free_irq(tnapi
->irq_vec
, tnapi
);
9723 tg3_full_lock(tp
, 0);
9725 err
= tg3_init_hw(tp
, 1);
9727 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9730 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9731 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9732 !tg3_flag(tp
, 57765_CLASS
))
9733 tp
->timer_offset
= HZ
;
9735 tp
->timer_offset
= HZ
/ 10;
9737 BUG_ON(tp
->timer_offset
> HZ
);
9738 tp
->timer_counter
= tp
->timer_multiplier
=
9739 (HZ
/ tp
->timer_offset
);
9740 tp
->asf_counter
= tp
->asf_multiplier
=
9741 ((HZ
/ tp
->timer_offset
) * 2);
9743 init_timer(&tp
->timer
);
9744 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9745 tp
->timer
.data
= (unsigned long) tp
;
9746 tp
->timer
.function
= tg3_timer
;
9749 tg3_full_unlock(tp
);
9754 if (tg3_flag(tp
, USING_MSI
)) {
9755 err
= tg3_test_msi(tp
);
9758 tg3_full_lock(tp
, 0);
9759 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9761 tg3_full_unlock(tp
);
9766 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9767 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9769 tw32(PCIE_TRANSACTION_CFG
,
9770 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9776 tg3_full_lock(tp
, 0);
9778 add_timer(&tp
->timer
);
9779 tg3_flag_set(tp
, INIT_COMPLETE
);
9780 tg3_enable_ints(tp
);
9782 tg3_full_unlock(tp
);
9784 netif_tx_start_all_queues(dev
);
9787 * Reset loopback feature if it was turned on while the device was down
9788 * make sure that it's installed properly now.
9790 if (dev
->features
& NETIF_F_LOOPBACK
)
9791 tg3_set_loopback(dev
, dev
->features
);
9796 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9797 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9798 free_irq(tnapi
->irq_vec
, tnapi
);
9802 tg3_napi_disable(tp
);
9804 tg3_free_consistent(tp
);
9808 tg3_frob_aux_power(tp
, false);
9809 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9813 static int tg3_close(struct net_device
*dev
)
9816 struct tg3
*tp
= netdev_priv(dev
);
9818 tg3_napi_disable(tp
);
9819 tg3_reset_task_cancel(tp
);
9821 netif_tx_stop_all_queues(dev
);
9823 del_timer_sync(&tp
->timer
);
9827 tg3_full_lock(tp
, 1);
9829 tg3_disable_ints(tp
);
9831 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9833 tg3_flag_clear(tp
, INIT_COMPLETE
);
9835 tg3_full_unlock(tp
);
9837 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9838 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9839 free_irq(tnapi
->irq_vec
, tnapi
);
9844 /* Clear stats across close / open calls */
9845 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
9846 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
9850 tg3_free_consistent(tp
);
9854 netif_carrier_off(tp
->dev
);
9859 static inline u64
get_stat64(tg3_stat64_t
*val
)
9861 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9864 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
9866 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9868 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9869 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9870 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9873 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9874 tg3_writephy(tp
, MII_TG3_TEST1
,
9875 val
| MII_TG3_TEST1_CRC_EN
);
9876 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9880 tp
->phy_crc_errors
+= val
;
9882 return tp
->phy_crc_errors
;
9885 return get_stat64(&hw_stats
->rx_fcs_errors
);
9888 #define ESTAT_ADD(member) \
9889 estats->member = old_estats->member + \
9890 get_stat64(&hw_stats->member)
9892 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
9894 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9895 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9900 ESTAT_ADD(rx_octets
);
9901 ESTAT_ADD(rx_fragments
);
9902 ESTAT_ADD(rx_ucast_packets
);
9903 ESTAT_ADD(rx_mcast_packets
);
9904 ESTAT_ADD(rx_bcast_packets
);
9905 ESTAT_ADD(rx_fcs_errors
);
9906 ESTAT_ADD(rx_align_errors
);
9907 ESTAT_ADD(rx_xon_pause_rcvd
);
9908 ESTAT_ADD(rx_xoff_pause_rcvd
);
9909 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9910 ESTAT_ADD(rx_xoff_entered
);
9911 ESTAT_ADD(rx_frame_too_long_errors
);
9912 ESTAT_ADD(rx_jabbers
);
9913 ESTAT_ADD(rx_undersize_packets
);
9914 ESTAT_ADD(rx_in_length_errors
);
9915 ESTAT_ADD(rx_out_length_errors
);
9916 ESTAT_ADD(rx_64_or_less_octet_packets
);
9917 ESTAT_ADD(rx_65_to_127_octet_packets
);
9918 ESTAT_ADD(rx_128_to_255_octet_packets
);
9919 ESTAT_ADD(rx_256_to_511_octet_packets
);
9920 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9921 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9922 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9923 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9924 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9925 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9927 ESTAT_ADD(tx_octets
);
9928 ESTAT_ADD(tx_collisions
);
9929 ESTAT_ADD(tx_xon_sent
);
9930 ESTAT_ADD(tx_xoff_sent
);
9931 ESTAT_ADD(tx_flow_control
);
9932 ESTAT_ADD(tx_mac_errors
);
9933 ESTAT_ADD(tx_single_collisions
);
9934 ESTAT_ADD(tx_mult_collisions
);
9935 ESTAT_ADD(tx_deferred
);
9936 ESTAT_ADD(tx_excessive_collisions
);
9937 ESTAT_ADD(tx_late_collisions
);
9938 ESTAT_ADD(tx_collide_2times
);
9939 ESTAT_ADD(tx_collide_3times
);
9940 ESTAT_ADD(tx_collide_4times
);
9941 ESTAT_ADD(tx_collide_5times
);
9942 ESTAT_ADD(tx_collide_6times
);
9943 ESTAT_ADD(tx_collide_7times
);
9944 ESTAT_ADD(tx_collide_8times
);
9945 ESTAT_ADD(tx_collide_9times
);
9946 ESTAT_ADD(tx_collide_10times
);
9947 ESTAT_ADD(tx_collide_11times
);
9948 ESTAT_ADD(tx_collide_12times
);
9949 ESTAT_ADD(tx_collide_13times
);
9950 ESTAT_ADD(tx_collide_14times
);
9951 ESTAT_ADD(tx_collide_15times
);
9952 ESTAT_ADD(tx_ucast_packets
);
9953 ESTAT_ADD(tx_mcast_packets
);
9954 ESTAT_ADD(tx_bcast_packets
);
9955 ESTAT_ADD(tx_carrier_sense_errors
);
9956 ESTAT_ADD(tx_discards
);
9957 ESTAT_ADD(tx_errors
);
9959 ESTAT_ADD(dma_writeq_full
);
9960 ESTAT_ADD(dma_write_prioq_full
);
9961 ESTAT_ADD(rxbds_empty
);
9962 ESTAT_ADD(rx_discards
);
9963 ESTAT_ADD(rx_errors
);
9964 ESTAT_ADD(rx_threshold_hit
);
9966 ESTAT_ADD(dma_readq_full
);
9967 ESTAT_ADD(dma_read_prioq_full
);
9968 ESTAT_ADD(tx_comp_queue_full
);
9970 ESTAT_ADD(ring_set_send_prod_index
);
9971 ESTAT_ADD(ring_status_update
);
9972 ESTAT_ADD(nic_irqs
);
9973 ESTAT_ADD(nic_avoided_irqs
);
9974 ESTAT_ADD(nic_tx_threshold_hit
);
9976 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9979 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
9981 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9982 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9984 stats
->rx_packets
= old_stats
->rx_packets
+
9985 get_stat64(&hw_stats
->rx_ucast_packets
) +
9986 get_stat64(&hw_stats
->rx_mcast_packets
) +
9987 get_stat64(&hw_stats
->rx_bcast_packets
);
9989 stats
->tx_packets
= old_stats
->tx_packets
+
9990 get_stat64(&hw_stats
->tx_ucast_packets
) +
9991 get_stat64(&hw_stats
->tx_mcast_packets
) +
9992 get_stat64(&hw_stats
->tx_bcast_packets
);
9994 stats
->rx_bytes
= old_stats
->rx_bytes
+
9995 get_stat64(&hw_stats
->rx_octets
);
9996 stats
->tx_bytes
= old_stats
->tx_bytes
+
9997 get_stat64(&hw_stats
->tx_octets
);
9999 stats
->rx_errors
= old_stats
->rx_errors
+
10000 get_stat64(&hw_stats
->rx_errors
);
10001 stats
->tx_errors
= old_stats
->tx_errors
+
10002 get_stat64(&hw_stats
->tx_errors
) +
10003 get_stat64(&hw_stats
->tx_mac_errors
) +
10004 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
10005 get_stat64(&hw_stats
->tx_discards
);
10007 stats
->multicast
= old_stats
->multicast
+
10008 get_stat64(&hw_stats
->rx_mcast_packets
);
10009 stats
->collisions
= old_stats
->collisions
+
10010 get_stat64(&hw_stats
->tx_collisions
);
10012 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10013 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10014 get_stat64(&hw_stats
->rx_undersize_packets
);
10016 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10017 get_stat64(&hw_stats
->rxbds_empty
);
10018 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10019 get_stat64(&hw_stats
->rx_align_errors
);
10020 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10021 get_stat64(&hw_stats
->tx_discards
);
10022 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10023 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10025 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10026 tg3_calc_crc_errors(tp
);
10028 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10029 get_stat64(&hw_stats
->rx_discards
);
10031 stats
->rx_dropped
= tp
->rx_dropped
;
10032 stats
->tx_dropped
= tp
->tx_dropped
;
10035 static inline u32
calc_crc(unsigned char *buf
, int len
)
10043 for (j
= 0; j
< len
; j
++) {
10046 for (k
= 0; k
< 8; k
++) {
10059 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
10061 /* accept or reject all multicast frames */
10062 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
10063 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
10064 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
10065 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
10068 static void __tg3_set_rx_mode(struct net_device
*dev
)
10070 struct tg3
*tp
= netdev_priv(dev
);
10073 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
10074 RX_MODE_KEEP_VLAN_TAG
);
10076 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10077 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10080 if (!tg3_flag(tp
, ENABLE_ASF
))
10081 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
10084 if (dev
->flags
& IFF_PROMISC
) {
10085 /* Promiscuous mode. */
10086 rx_mode
|= RX_MODE_PROMISC
;
10087 } else if (dev
->flags
& IFF_ALLMULTI
) {
10088 /* Accept all multicast. */
10089 tg3_set_multi(tp
, 1);
10090 } else if (netdev_mc_empty(dev
)) {
10091 /* Reject all multicast. */
10092 tg3_set_multi(tp
, 0);
10094 /* Accept one or more multicast(s). */
10095 struct netdev_hw_addr
*ha
;
10096 u32 mc_filter
[4] = { 0, };
10101 netdev_for_each_mc_addr(ha
, dev
) {
10102 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
10104 regidx
= (bit
& 0x60) >> 5;
10106 mc_filter
[regidx
] |= (1 << bit
);
10109 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
10110 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
10111 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
10112 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
10115 if (rx_mode
!= tp
->rx_mode
) {
10116 tp
->rx_mode
= rx_mode
;
10117 tw32_f(MAC_RX_MODE
, rx_mode
);
10122 static void tg3_set_rx_mode(struct net_device
*dev
)
10124 struct tg3
*tp
= netdev_priv(dev
);
10126 if (!netif_running(dev
))
10129 tg3_full_lock(tp
, 0);
10130 __tg3_set_rx_mode(dev
);
10131 tg3_full_unlock(tp
);
10134 static int tg3_get_regs_len(struct net_device
*dev
)
10136 return TG3_REG_BLK_SIZE
;
10139 static void tg3_get_regs(struct net_device
*dev
,
10140 struct ethtool_regs
*regs
, void *_p
)
10142 struct tg3
*tp
= netdev_priv(dev
);
10146 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10148 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10151 tg3_full_lock(tp
, 0);
10153 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10155 tg3_full_unlock(tp
);
10158 static int tg3_get_eeprom_len(struct net_device
*dev
)
10160 struct tg3
*tp
= netdev_priv(dev
);
10162 return tp
->nvram_size
;
10165 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10167 struct tg3
*tp
= netdev_priv(dev
);
10170 u32 i
, offset
, len
, b_offset
, b_count
;
10173 if (tg3_flag(tp
, NO_NVRAM
))
10176 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10179 offset
= eeprom
->offset
;
10183 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10186 /* adjustments to start on required 4 byte boundary */
10187 b_offset
= offset
& 3;
10188 b_count
= 4 - b_offset
;
10189 if (b_count
> len
) {
10190 /* i.e. offset=1 len=2 */
10193 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10196 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10199 eeprom
->len
+= b_count
;
10202 /* read bytes up to the last 4 byte boundary */
10203 pd
= &data
[eeprom
->len
];
10204 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10205 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10210 memcpy(pd
+ i
, &val
, 4);
10215 /* read last bytes not ending on 4 byte boundary */
10216 pd
= &data
[eeprom
->len
];
10218 b_offset
= offset
+ len
- b_count
;
10219 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10222 memcpy(pd
, &val
, b_count
);
10223 eeprom
->len
+= b_count
;
10228 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10230 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10232 struct tg3
*tp
= netdev_priv(dev
);
10234 u32 offset
, len
, b_offset
, odd_len
;
10238 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10241 if (tg3_flag(tp
, NO_NVRAM
) ||
10242 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10245 offset
= eeprom
->offset
;
10248 if ((b_offset
= (offset
& 3))) {
10249 /* adjustments to start on required 4 byte boundary */
10250 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10261 /* adjustments to end on required 4 byte boundary */
10263 len
= (len
+ 3) & ~3;
10264 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10270 if (b_offset
|| odd_len
) {
10271 buf
= kmalloc(len
, GFP_KERNEL
);
10275 memcpy(buf
, &start
, 4);
10277 memcpy(buf
+len
-4, &end
, 4);
10278 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10281 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10289 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10291 struct tg3
*tp
= netdev_priv(dev
);
10293 if (tg3_flag(tp
, USE_PHYLIB
)) {
10294 struct phy_device
*phydev
;
10295 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10297 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10298 return phy_ethtool_gset(phydev
, cmd
);
10301 cmd
->supported
= (SUPPORTED_Autoneg
);
10303 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10304 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10305 SUPPORTED_1000baseT_Full
);
10307 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10308 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10309 SUPPORTED_100baseT_Full
|
10310 SUPPORTED_10baseT_Half
|
10311 SUPPORTED_10baseT_Full
|
10313 cmd
->port
= PORT_TP
;
10315 cmd
->supported
|= SUPPORTED_FIBRE
;
10316 cmd
->port
= PORT_FIBRE
;
10319 cmd
->advertising
= tp
->link_config
.advertising
;
10320 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10321 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10322 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10323 cmd
->advertising
|= ADVERTISED_Pause
;
10325 cmd
->advertising
|= ADVERTISED_Pause
|
10326 ADVERTISED_Asym_Pause
;
10328 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10329 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10332 if (netif_running(dev
) && netif_carrier_ok(dev
)) {
10333 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10334 cmd
->duplex
= tp
->link_config
.active_duplex
;
10335 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
10336 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10337 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
10338 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
10340 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
10343 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10344 cmd
->duplex
= DUPLEX_INVALID
;
10345 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
10347 cmd
->phy_address
= tp
->phy_addr
;
10348 cmd
->transceiver
= XCVR_INTERNAL
;
10349 cmd
->autoneg
= tp
->link_config
.autoneg
;
10355 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10357 struct tg3
*tp
= netdev_priv(dev
);
10358 u32 speed
= ethtool_cmd_speed(cmd
);
10360 if (tg3_flag(tp
, USE_PHYLIB
)) {
10361 struct phy_device
*phydev
;
10362 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10364 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10365 return phy_ethtool_sset(phydev
, cmd
);
10368 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10369 cmd
->autoneg
!= AUTONEG_DISABLE
)
10372 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10373 cmd
->duplex
!= DUPLEX_FULL
&&
10374 cmd
->duplex
!= DUPLEX_HALF
)
10377 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10378 u32 mask
= ADVERTISED_Autoneg
|
10380 ADVERTISED_Asym_Pause
;
10382 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10383 mask
|= ADVERTISED_1000baseT_Half
|
10384 ADVERTISED_1000baseT_Full
;
10386 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10387 mask
|= ADVERTISED_100baseT_Half
|
10388 ADVERTISED_100baseT_Full
|
10389 ADVERTISED_10baseT_Half
|
10390 ADVERTISED_10baseT_Full
|
10393 mask
|= ADVERTISED_FIBRE
;
10395 if (cmd
->advertising
& ~mask
)
10398 mask
&= (ADVERTISED_1000baseT_Half
|
10399 ADVERTISED_1000baseT_Full
|
10400 ADVERTISED_100baseT_Half
|
10401 ADVERTISED_100baseT_Full
|
10402 ADVERTISED_10baseT_Half
|
10403 ADVERTISED_10baseT_Full
);
10405 cmd
->advertising
&= mask
;
10407 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10408 if (speed
!= SPEED_1000
)
10411 if (cmd
->duplex
!= DUPLEX_FULL
)
10414 if (speed
!= SPEED_100
&&
10420 tg3_full_lock(tp
, 0);
10422 tp
->link_config
.autoneg
= cmd
->autoneg
;
10423 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10424 tp
->link_config
.advertising
= (cmd
->advertising
|
10425 ADVERTISED_Autoneg
);
10426 tp
->link_config
.speed
= SPEED_INVALID
;
10427 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10429 tp
->link_config
.advertising
= 0;
10430 tp
->link_config
.speed
= speed
;
10431 tp
->link_config
.duplex
= cmd
->duplex
;
10434 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10435 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10436 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10438 if (netif_running(dev
))
10439 tg3_setup_phy(tp
, 1);
10441 tg3_full_unlock(tp
);
10446 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10448 struct tg3
*tp
= netdev_priv(dev
);
10450 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
10451 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
10452 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
10453 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
10456 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10458 struct tg3
*tp
= netdev_priv(dev
);
10460 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10461 wol
->supported
= WAKE_MAGIC
;
10463 wol
->supported
= 0;
10465 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10466 wol
->wolopts
= WAKE_MAGIC
;
10467 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10470 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10472 struct tg3
*tp
= netdev_priv(dev
);
10473 struct device
*dp
= &tp
->pdev
->dev
;
10475 if (wol
->wolopts
& ~WAKE_MAGIC
)
10477 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10478 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10481 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10483 spin_lock_bh(&tp
->lock
);
10484 if (device_may_wakeup(dp
))
10485 tg3_flag_set(tp
, WOL_ENABLE
);
10487 tg3_flag_clear(tp
, WOL_ENABLE
);
10488 spin_unlock_bh(&tp
->lock
);
10493 static u32
tg3_get_msglevel(struct net_device
*dev
)
10495 struct tg3
*tp
= netdev_priv(dev
);
10496 return tp
->msg_enable
;
10499 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10501 struct tg3
*tp
= netdev_priv(dev
);
10502 tp
->msg_enable
= value
;
10505 static int tg3_nway_reset(struct net_device
*dev
)
10507 struct tg3
*tp
= netdev_priv(dev
);
10510 if (!netif_running(dev
))
10513 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10516 if (tg3_flag(tp
, USE_PHYLIB
)) {
10517 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10519 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10523 spin_lock_bh(&tp
->lock
);
10525 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10526 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10527 ((bmcr
& BMCR_ANENABLE
) ||
10528 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10529 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10533 spin_unlock_bh(&tp
->lock
);
10539 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10541 struct tg3
*tp
= netdev_priv(dev
);
10543 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10544 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10545 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10547 ering
->rx_jumbo_max_pending
= 0;
10549 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10551 ering
->rx_pending
= tp
->rx_pending
;
10552 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10553 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10555 ering
->rx_jumbo_pending
= 0;
10557 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10560 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10562 struct tg3
*tp
= netdev_priv(dev
);
10563 int i
, irq_sync
= 0, err
= 0;
10565 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10566 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10567 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10568 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10569 (tg3_flag(tp
, TSO_BUG
) &&
10570 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10573 if (netif_running(dev
)) {
10575 tg3_netif_stop(tp
);
10579 tg3_full_lock(tp
, irq_sync
);
10581 tp
->rx_pending
= ering
->rx_pending
;
10583 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10584 tp
->rx_pending
> 63)
10585 tp
->rx_pending
= 63;
10586 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10588 for (i
= 0; i
< tp
->irq_max
; i
++)
10589 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10591 if (netif_running(dev
)) {
10592 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10593 err
= tg3_restart_hw(tp
, 1);
10595 tg3_netif_start(tp
);
10598 tg3_full_unlock(tp
);
10600 if (irq_sync
&& !err
)
10606 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10608 struct tg3
*tp
= netdev_priv(dev
);
10610 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10612 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
10613 epause
->rx_pause
= 1;
10615 epause
->rx_pause
= 0;
10617 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
10618 epause
->tx_pause
= 1;
10620 epause
->tx_pause
= 0;
10623 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10625 struct tg3
*tp
= netdev_priv(dev
);
10628 if (tg3_flag(tp
, USE_PHYLIB
)) {
10630 struct phy_device
*phydev
;
10632 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10634 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10635 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10636 (epause
->rx_pause
!= epause
->tx_pause
)))
10639 tp
->link_config
.flowctrl
= 0;
10640 if (epause
->rx_pause
) {
10641 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10643 if (epause
->tx_pause
) {
10644 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10645 newadv
= ADVERTISED_Pause
;
10647 newadv
= ADVERTISED_Pause
|
10648 ADVERTISED_Asym_Pause
;
10649 } else if (epause
->tx_pause
) {
10650 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10651 newadv
= ADVERTISED_Asym_Pause
;
10655 if (epause
->autoneg
)
10656 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10658 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10660 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10661 u32 oldadv
= phydev
->advertising
&
10662 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10663 if (oldadv
!= newadv
) {
10664 phydev
->advertising
&=
10665 ~(ADVERTISED_Pause
|
10666 ADVERTISED_Asym_Pause
);
10667 phydev
->advertising
|= newadv
;
10668 if (phydev
->autoneg
) {
10670 * Always renegotiate the link to
10671 * inform our link partner of our
10672 * flow control settings, even if the
10673 * flow control is forced. Let
10674 * tg3_adjust_link() do the final
10675 * flow control setup.
10677 return phy_start_aneg(phydev
);
10681 if (!epause
->autoneg
)
10682 tg3_setup_flow_control(tp
, 0, 0);
10684 tp
->link_config
.orig_advertising
&=
10685 ~(ADVERTISED_Pause
|
10686 ADVERTISED_Asym_Pause
);
10687 tp
->link_config
.orig_advertising
|= newadv
;
10692 if (netif_running(dev
)) {
10693 tg3_netif_stop(tp
);
10697 tg3_full_lock(tp
, irq_sync
);
10699 if (epause
->autoneg
)
10700 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10702 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10703 if (epause
->rx_pause
)
10704 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10706 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10707 if (epause
->tx_pause
)
10708 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10710 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10712 if (netif_running(dev
)) {
10713 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10714 err
= tg3_restart_hw(tp
, 1);
10716 tg3_netif_start(tp
);
10719 tg3_full_unlock(tp
);
10725 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10729 return TG3_NUM_TEST
;
10731 return TG3_NUM_STATS
;
10733 return -EOPNOTSUPP
;
10737 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
10738 u32
*rules __always_unused
)
10740 struct tg3
*tp
= netdev_priv(dev
);
10742 if (!tg3_flag(tp
, SUPPORT_MSIX
))
10743 return -EOPNOTSUPP
;
10745 switch (info
->cmd
) {
10746 case ETHTOOL_GRXRINGS
:
10747 if (netif_running(tp
->dev
))
10748 info
->data
= tp
->irq_cnt
;
10750 info
->data
= num_online_cpus();
10751 if (info
->data
> TG3_IRQ_MAX_VECS_RSS
)
10752 info
->data
= TG3_IRQ_MAX_VECS_RSS
;
10755 /* The first interrupt vector only
10756 * handles link interrupts.
10762 return -EOPNOTSUPP
;
10766 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
10769 struct tg3
*tp
= netdev_priv(dev
);
10771 if (tg3_flag(tp
, SUPPORT_MSIX
))
10772 size
= TG3_RSS_INDIR_TBL_SIZE
;
10777 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
10779 struct tg3
*tp
= netdev_priv(dev
);
10782 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
10783 indir
[i
] = tp
->rss_ind_tbl
[i
];
10788 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
10790 struct tg3
*tp
= netdev_priv(dev
);
10793 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
10794 tp
->rss_ind_tbl
[i
] = indir
[i
];
10796 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
10799 /* It is legal to write the indirection
10800 * table while the device is running.
10802 tg3_full_lock(tp
, 0);
10803 tg3_rss_write_indir_tbl(tp
);
10804 tg3_full_unlock(tp
);
10809 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10811 switch (stringset
) {
10813 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10816 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10819 WARN_ON(1); /* we need a WARN() */
10824 static int tg3_set_phys_id(struct net_device
*dev
,
10825 enum ethtool_phys_id_state state
)
10827 struct tg3
*tp
= netdev_priv(dev
);
10829 if (!netif_running(tp
->dev
))
10833 case ETHTOOL_ID_ACTIVE
:
10834 return 1; /* cycle on/off once per second */
10836 case ETHTOOL_ID_ON
:
10837 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10838 LED_CTRL_1000MBPS_ON
|
10839 LED_CTRL_100MBPS_ON
|
10840 LED_CTRL_10MBPS_ON
|
10841 LED_CTRL_TRAFFIC_OVERRIDE
|
10842 LED_CTRL_TRAFFIC_BLINK
|
10843 LED_CTRL_TRAFFIC_LED
);
10846 case ETHTOOL_ID_OFF
:
10847 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10848 LED_CTRL_TRAFFIC_OVERRIDE
);
10851 case ETHTOOL_ID_INACTIVE
:
10852 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10859 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10860 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10862 struct tg3
*tp
= netdev_priv(dev
);
10864 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
10867 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10871 u32 offset
= 0, len
= 0;
10874 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10877 if (magic
== TG3_EEPROM_MAGIC
) {
10878 for (offset
= TG3_NVM_DIR_START
;
10879 offset
< TG3_NVM_DIR_END
;
10880 offset
+= TG3_NVM_DIRENT_SIZE
) {
10881 if (tg3_nvram_read(tp
, offset
, &val
))
10884 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10885 TG3_NVM_DIRTYPE_EXTVPD
)
10889 if (offset
!= TG3_NVM_DIR_END
) {
10890 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10891 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10894 offset
= tg3_nvram_logical_addr(tp
, offset
);
10898 if (!offset
|| !len
) {
10899 offset
= TG3_NVM_VPD_OFF
;
10900 len
= TG3_NVM_VPD_LEN
;
10903 buf
= kmalloc(len
, GFP_KERNEL
);
10907 if (magic
== TG3_EEPROM_MAGIC
) {
10908 for (i
= 0; i
< len
; i
+= 4) {
10909 /* The data is in little-endian format in NVRAM.
10910 * Use the big-endian read routines to preserve
10911 * the byte order as it exists in NVRAM.
10913 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10919 unsigned int pos
= 0;
10921 ptr
= (u8
*)&buf
[0];
10922 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10923 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10925 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10943 #define NVRAM_TEST_SIZE 0x100
10944 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10945 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10946 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10947 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10948 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10949 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10950 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10951 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10953 static int tg3_test_nvram(struct tg3
*tp
)
10955 u32 csum
, magic
, len
;
10957 int i
, j
, k
, err
= 0, size
;
10959 if (tg3_flag(tp
, NO_NVRAM
))
10962 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10965 if (magic
== TG3_EEPROM_MAGIC
)
10966 size
= NVRAM_TEST_SIZE
;
10967 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10968 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10969 TG3_EEPROM_SB_FORMAT_1
) {
10970 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10971 case TG3_EEPROM_SB_REVISION_0
:
10972 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10974 case TG3_EEPROM_SB_REVISION_2
:
10975 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10977 case TG3_EEPROM_SB_REVISION_3
:
10978 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10980 case TG3_EEPROM_SB_REVISION_4
:
10981 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10983 case TG3_EEPROM_SB_REVISION_5
:
10984 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10986 case TG3_EEPROM_SB_REVISION_6
:
10987 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10994 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10995 size
= NVRAM_SELFBOOT_HW_SIZE
;
10999 buf
= kmalloc(size
, GFP_KERNEL
);
11004 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
11005 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
11012 /* Selfboot format */
11013 magic
= be32_to_cpu(buf
[0]);
11014 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
11015 TG3_EEPROM_MAGIC_FW
) {
11016 u8
*buf8
= (u8
*) buf
, csum8
= 0;
11018 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
11019 TG3_EEPROM_SB_REVISION_2
) {
11020 /* For rev 2, the csum doesn't include the MBA. */
11021 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
11023 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
11026 for (i
= 0; i
< size
; i
++)
11039 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
11040 TG3_EEPROM_MAGIC_HW
) {
11041 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
11042 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
11043 u8
*buf8
= (u8
*) buf
;
11045 /* Separate the parity bits and the data bytes. */
11046 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
11047 if ((i
== 0) || (i
== 8)) {
11051 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
11052 parity
[k
++] = buf8
[i
] & msk
;
11054 } else if (i
== 16) {
11058 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
11059 parity
[k
++] = buf8
[i
] & msk
;
11062 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11063 parity
[k
++] = buf8
[i
] & msk
;
11066 data
[j
++] = buf8
[i
];
11070 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11071 u8 hw8
= hweight8(data
[i
]);
11073 if ((hw8
& 0x1) && parity
[i
])
11075 else if (!(hw8
& 0x1) && !parity
[i
])
11084 /* Bootstrap checksum at offset 0x10 */
11085 csum
= calc_crc((unsigned char *) buf
, 0x10);
11086 if (csum
!= le32_to_cpu(buf
[0x10/4]))
11089 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11090 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
11091 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11096 buf
= tg3_vpd_readblock(tp
, &len
);
11100 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11102 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11106 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11109 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11110 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11111 PCI_VPD_RO_KEYWORD_CHKSUM
);
11115 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11117 for (i
= 0; i
<= j
; i
++)
11118 csum8
+= ((u8
*)buf
)[i
];
11132 #define TG3_SERDES_TIMEOUT_SEC 2
11133 #define TG3_COPPER_TIMEOUT_SEC 6
11135 static int tg3_test_link(struct tg3
*tp
)
11139 if (!netif_running(tp
->dev
))
11142 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11143 max
= TG3_SERDES_TIMEOUT_SEC
;
11145 max
= TG3_COPPER_TIMEOUT_SEC
;
11147 for (i
= 0; i
< max
; i
++) {
11148 if (netif_carrier_ok(tp
->dev
))
11151 if (msleep_interruptible(1000))
11158 /* Only test the commonly used registers */
11159 static int tg3_test_registers(struct tg3
*tp
)
11161 int i
, is_5705
, is_5750
;
11162 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11166 #define TG3_FL_5705 0x1
11167 #define TG3_FL_NOT_5705 0x2
11168 #define TG3_FL_NOT_5788 0x4
11169 #define TG3_FL_NOT_5750 0x8
11173 /* MAC Control Registers */
11174 { MAC_MODE
, TG3_FL_NOT_5705
,
11175 0x00000000, 0x00ef6f8c },
11176 { MAC_MODE
, TG3_FL_5705
,
11177 0x00000000, 0x01ef6b8c },
11178 { MAC_STATUS
, TG3_FL_NOT_5705
,
11179 0x03800107, 0x00000000 },
11180 { MAC_STATUS
, TG3_FL_5705
,
11181 0x03800100, 0x00000000 },
11182 { MAC_ADDR_0_HIGH
, 0x0000,
11183 0x00000000, 0x0000ffff },
11184 { MAC_ADDR_0_LOW
, 0x0000,
11185 0x00000000, 0xffffffff },
11186 { MAC_RX_MTU_SIZE
, 0x0000,
11187 0x00000000, 0x0000ffff },
11188 { MAC_TX_MODE
, 0x0000,
11189 0x00000000, 0x00000070 },
11190 { MAC_TX_LENGTHS
, 0x0000,
11191 0x00000000, 0x00003fff },
11192 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11193 0x00000000, 0x000007fc },
11194 { MAC_RX_MODE
, TG3_FL_5705
,
11195 0x00000000, 0x000007dc },
11196 { MAC_HASH_REG_0
, 0x0000,
11197 0x00000000, 0xffffffff },
11198 { MAC_HASH_REG_1
, 0x0000,
11199 0x00000000, 0xffffffff },
11200 { MAC_HASH_REG_2
, 0x0000,
11201 0x00000000, 0xffffffff },
11202 { MAC_HASH_REG_3
, 0x0000,
11203 0x00000000, 0xffffffff },
11205 /* Receive Data and Receive BD Initiator Control Registers. */
11206 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11207 0x00000000, 0xffffffff },
11208 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11209 0x00000000, 0xffffffff },
11210 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11211 0x00000000, 0x00000003 },
11212 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11213 0x00000000, 0xffffffff },
11214 { RCVDBDI_STD_BD
+0, 0x0000,
11215 0x00000000, 0xffffffff },
11216 { RCVDBDI_STD_BD
+4, 0x0000,
11217 0x00000000, 0xffffffff },
11218 { RCVDBDI_STD_BD
+8, 0x0000,
11219 0x00000000, 0xffff0002 },
11220 { RCVDBDI_STD_BD
+0xc, 0x0000,
11221 0x00000000, 0xffffffff },
11223 /* Receive BD Initiator Control Registers. */
11224 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11225 0x00000000, 0xffffffff },
11226 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11227 0x00000000, 0x000003ff },
11228 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11229 0x00000000, 0xffffffff },
11231 /* Host Coalescing Control Registers. */
11232 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11233 0x00000000, 0x00000004 },
11234 { HOSTCC_MODE
, TG3_FL_5705
,
11235 0x00000000, 0x000000f6 },
11236 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11237 0x00000000, 0xffffffff },
11238 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11239 0x00000000, 0x000003ff },
11240 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11241 0x00000000, 0xffffffff },
11242 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11243 0x00000000, 0x000003ff },
11244 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11245 0x00000000, 0xffffffff },
11246 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11247 0x00000000, 0x000000ff },
11248 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11249 0x00000000, 0xffffffff },
11250 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11251 0x00000000, 0x000000ff },
11252 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11253 0x00000000, 0xffffffff },
11254 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11255 0x00000000, 0xffffffff },
11256 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11257 0x00000000, 0xffffffff },
11258 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11259 0x00000000, 0x000000ff },
11260 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11261 0x00000000, 0xffffffff },
11262 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11263 0x00000000, 0x000000ff },
11264 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11265 0x00000000, 0xffffffff },
11266 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11267 0x00000000, 0xffffffff },
11268 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11269 0x00000000, 0xffffffff },
11270 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11271 0x00000000, 0xffffffff },
11272 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11273 0x00000000, 0xffffffff },
11274 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11275 0xffffffff, 0x00000000 },
11276 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11277 0xffffffff, 0x00000000 },
11279 /* Buffer Manager Control Registers. */
11280 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11281 0x00000000, 0x007fff80 },
11282 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11283 0x00000000, 0x007fffff },
11284 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11285 0x00000000, 0x0000003f },
11286 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11287 0x00000000, 0x000001ff },
11288 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11289 0x00000000, 0x000001ff },
11290 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11291 0xffffffff, 0x00000000 },
11292 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11293 0xffffffff, 0x00000000 },
11295 /* Mailbox Registers */
11296 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11297 0x00000000, 0x000001ff },
11298 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11299 0x00000000, 0x000001ff },
11300 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11301 0x00000000, 0x000007ff },
11302 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11303 0x00000000, 0x000001ff },
11305 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11308 is_5705
= is_5750
= 0;
11309 if (tg3_flag(tp
, 5705_PLUS
)) {
11311 if (tg3_flag(tp
, 5750_PLUS
))
11315 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11316 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11319 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11322 if (tg3_flag(tp
, IS_5788
) &&
11323 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11326 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11329 offset
= (u32
) reg_tbl
[i
].offset
;
11330 read_mask
= reg_tbl
[i
].read_mask
;
11331 write_mask
= reg_tbl
[i
].write_mask
;
11333 /* Save the original register content */
11334 save_val
= tr32(offset
);
11336 /* Determine the read-only value. */
11337 read_val
= save_val
& read_mask
;
11339 /* Write zero to the register, then make sure the read-only bits
11340 * are not changed and the read/write bits are all zeros.
11344 val
= tr32(offset
);
11346 /* Test the read-only and read/write bits. */
11347 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11350 /* Write ones to all the bits defined by RdMask and WrMask, then
11351 * make sure the read-only bits are not changed and the
11352 * read/write bits are all ones.
11354 tw32(offset
, read_mask
| write_mask
);
11356 val
= tr32(offset
);
11358 /* Test the read-only bits. */
11359 if ((val
& read_mask
) != read_val
)
11362 /* Test the read/write bits. */
11363 if ((val
& write_mask
) != write_mask
)
11366 tw32(offset
, save_val
);
11372 if (netif_msg_hw(tp
))
11373 netdev_err(tp
->dev
,
11374 "Register test failed at offset %x\n", offset
);
11375 tw32(offset
, save_val
);
11379 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11381 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11385 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11386 for (j
= 0; j
< len
; j
+= 4) {
11389 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11390 tg3_read_mem(tp
, offset
+ j
, &val
);
11391 if (val
!= test_pattern
[i
])
11398 static int tg3_test_memory(struct tg3
*tp
)
11400 static struct mem_entry
{
11403 } mem_tbl_570x
[] = {
11404 { 0x00000000, 0x00b50},
11405 { 0x00002000, 0x1c000},
11406 { 0xffffffff, 0x00000}
11407 }, mem_tbl_5705
[] = {
11408 { 0x00000100, 0x0000c},
11409 { 0x00000200, 0x00008},
11410 { 0x00004000, 0x00800},
11411 { 0x00006000, 0x01000},
11412 { 0x00008000, 0x02000},
11413 { 0x00010000, 0x0e000},
11414 { 0xffffffff, 0x00000}
11415 }, mem_tbl_5755
[] = {
11416 { 0x00000200, 0x00008},
11417 { 0x00004000, 0x00800},
11418 { 0x00006000, 0x00800},
11419 { 0x00008000, 0x02000},
11420 { 0x00010000, 0x0c000},
11421 { 0xffffffff, 0x00000}
11422 }, mem_tbl_5906
[] = {
11423 { 0x00000200, 0x00008},
11424 { 0x00004000, 0x00400},
11425 { 0x00006000, 0x00400},
11426 { 0x00008000, 0x01000},
11427 { 0x00010000, 0x01000},
11428 { 0xffffffff, 0x00000}
11429 }, mem_tbl_5717
[] = {
11430 { 0x00000200, 0x00008},
11431 { 0x00010000, 0x0a000},
11432 { 0x00020000, 0x13c00},
11433 { 0xffffffff, 0x00000}
11434 }, mem_tbl_57765
[] = {
11435 { 0x00000200, 0x00008},
11436 { 0x00004000, 0x00800},
11437 { 0x00006000, 0x09800},
11438 { 0x00010000, 0x0a000},
11439 { 0xffffffff, 0x00000}
11441 struct mem_entry
*mem_tbl
;
11445 if (tg3_flag(tp
, 5717_PLUS
))
11446 mem_tbl
= mem_tbl_5717
;
11447 else if (tg3_flag(tp
, 57765_CLASS
))
11448 mem_tbl
= mem_tbl_57765
;
11449 else if (tg3_flag(tp
, 5755_PLUS
))
11450 mem_tbl
= mem_tbl_5755
;
11451 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11452 mem_tbl
= mem_tbl_5906
;
11453 else if (tg3_flag(tp
, 5705_PLUS
))
11454 mem_tbl
= mem_tbl_5705
;
11456 mem_tbl
= mem_tbl_570x
;
11458 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11459 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11467 #define TG3_TSO_MSS 500
11469 #define TG3_TSO_IP_HDR_LEN 20
11470 #define TG3_TSO_TCP_HDR_LEN 20
11471 #define TG3_TSO_TCP_OPT_LEN 12
11473 static const u8 tg3_tso_header
[] = {
11475 0x45, 0x00, 0x00, 0x00,
11476 0x00, 0x00, 0x40, 0x00,
11477 0x40, 0x06, 0x00, 0x00,
11478 0x0a, 0x00, 0x00, 0x01,
11479 0x0a, 0x00, 0x00, 0x02,
11480 0x0d, 0x00, 0xe0, 0x00,
11481 0x00, 0x00, 0x01, 0x00,
11482 0x00, 0x00, 0x02, 0x00,
11483 0x80, 0x10, 0x10, 0x00,
11484 0x14, 0x09, 0x00, 0x00,
11485 0x01, 0x01, 0x08, 0x0a,
11486 0x11, 0x11, 0x11, 0x11,
11487 0x11, 0x11, 0x11, 0x11,
11490 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11492 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11493 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11495 struct sk_buff
*skb
;
11496 u8
*tx_data
, *rx_data
;
11498 int num_pkts
, tx_len
, rx_len
, i
, err
;
11499 struct tg3_rx_buffer_desc
*desc
;
11500 struct tg3_napi
*tnapi
, *rnapi
;
11501 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11503 tnapi
= &tp
->napi
[0];
11504 rnapi
= &tp
->napi
[0];
11505 if (tp
->irq_cnt
> 1) {
11506 if (tg3_flag(tp
, ENABLE_RSS
))
11507 rnapi
= &tp
->napi
[1];
11508 if (tg3_flag(tp
, ENABLE_TSS
))
11509 tnapi
= &tp
->napi
[1];
11511 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11516 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11520 tx_data
= skb_put(skb
, tx_len
);
11521 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11522 memset(tx_data
+ 6, 0x0, 8);
11524 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11526 if (tso_loopback
) {
11527 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11529 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11530 TG3_TSO_TCP_OPT_LEN
;
11532 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11533 sizeof(tg3_tso_header
));
11536 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11537 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11539 /* Set the total length field in the IP header */
11540 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11542 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11543 TXD_FLAG_CPU_POST_DMA
);
11545 if (tg3_flag(tp
, HW_TSO_1
) ||
11546 tg3_flag(tp
, HW_TSO_2
) ||
11547 tg3_flag(tp
, HW_TSO_3
)) {
11549 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11550 th
= (struct tcphdr
*)&tx_data
[val
];
11553 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11555 if (tg3_flag(tp
, HW_TSO_3
)) {
11556 mss
|= (hdr_len
& 0xc) << 12;
11557 if (hdr_len
& 0x10)
11558 base_flags
|= 0x00000010;
11559 base_flags
|= (hdr_len
& 0x3e0) << 5;
11560 } else if (tg3_flag(tp
, HW_TSO_2
))
11561 mss
|= hdr_len
<< 9;
11562 else if (tg3_flag(tp
, HW_TSO_1
) ||
11563 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11564 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11566 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11569 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11572 data_off
= ETH_HLEN
;
11575 for (i
= data_off
; i
< tx_len
; i
++)
11576 tx_data
[i
] = (u8
) (i
& 0xff);
11578 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11579 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11580 dev_kfree_skb(skb
);
11584 val
= tnapi
->tx_prod
;
11585 tnapi
->tx_buffers
[val
].skb
= skb
;
11586 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11588 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11593 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11595 budget
= tg3_tx_avail(tnapi
);
11596 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11597 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11598 tnapi
->tx_buffers
[val
].skb
= NULL
;
11599 dev_kfree_skb(skb
);
11605 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11606 tr32_mailbox(tnapi
->prodmbox
);
11610 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11611 for (i
= 0; i
< 35; i
++) {
11612 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11617 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11618 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11619 if ((tx_idx
== tnapi
->tx_prod
) &&
11620 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11624 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
11625 dev_kfree_skb(skb
);
11627 if (tx_idx
!= tnapi
->tx_prod
)
11630 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11634 while (rx_idx
!= rx_start_idx
) {
11635 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11636 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11637 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11639 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11640 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11643 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11646 if (!tso_loopback
) {
11647 if (rx_len
!= tx_len
)
11650 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11651 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11654 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11657 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11658 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11659 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11663 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11664 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
11665 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11667 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11668 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
11669 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11674 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11675 PCI_DMA_FROMDEVICE
);
11677 rx_data
+= TG3_RX_OFFSET(tp
);
11678 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11679 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
11686 /* tg3_free_rings will unmap and free the rx_data */
11691 #define TG3_STD_LOOPBACK_FAILED 1
11692 #define TG3_JMB_LOOPBACK_FAILED 2
11693 #define TG3_TSO_LOOPBACK_FAILED 4
11694 #define TG3_LOOPBACK_FAILED \
11695 (TG3_STD_LOOPBACK_FAILED | \
11696 TG3_JMB_LOOPBACK_FAILED | \
11697 TG3_TSO_LOOPBACK_FAILED)
11699 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
11704 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11705 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11707 if (!netif_running(tp
->dev
)) {
11708 data
[0] = TG3_LOOPBACK_FAILED
;
11709 data
[1] = TG3_LOOPBACK_FAILED
;
11711 data
[2] = TG3_LOOPBACK_FAILED
;
11715 err
= tg3_reset_hw(tp
, 1);
11717 data
[0] = TG3_LOOPBACK_FAILED
;
11718 data
[1] = TG3_LOOPBACK_FAILED
;
11720 data
[2] = TG3_LOOPBACK_FAILED
;
11724 if (tg3_flag(tp
, ENABLE_RSS
)) {
11727 /* Reroute all rx packets to the 1st queue */
11728 for (i
= MAC_RSS_INDIR_TBL_0
;
11729 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11733 /* HW errata - mac loopback fails in some cases on 5780.
11734 * Normal traffic and PHY loopback are not affected by
11735 * errata. Also, the MAC loopback test is deprecated for
11736 * all newer ASIC revisions.
11738 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
11739 !tg3_flag(tp
, CPMU_PRESENT
)) {
11740 tg3_mac_loopback(tp
, true);
11742 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11743 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
11745 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11746 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11747 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
11749 tg3_mac_loopback(tp
, false);
11752 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11753 !tg3_flag(tp
, USE_PHYLIB
)) {
11756 tg3_phy_lpbk_set(tp
, 0, false);
11758 /* Wait for link */
11759 for (i
= 0; i
< 100; i
++) {
11760 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11765 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11766 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
11767 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11768 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11769 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
11770 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11771 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11772 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
11775 tg3_phy_lpbk_set(tp
, 0, true);
11777 /* All link indications report up, but the hardware
11778 * isn't really ready for about 20 msec. Double it
11783 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11784 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
11785 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11786 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11787 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
11788 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11789 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11790 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
11793 /* Re-enable gphy autopowerdown. */
11794 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11795 tg3_phy_toggle_apd(tp
, true);
11798 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
11801 tp
->phy_flags
|= eee_cap
;
11806 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11809 struct tg3
*tp
= netdev_priv(dev
);
11810 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
11812 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11813 tg3_power_up(tp
)) {
11814 etest
->flags
|= ETH_TEST_FL_FAILED
;
11815 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11819 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11821 if (tg3_test_nvram(tp
) != 0) {
11822 etest
->flags
|= ETH_TEST_FL_FAILED
;
11825 if (!doextlpbk
&& tg3_test_link(tp
)) {
11826 etest
->flags
|= ETH_TEST_FL_FAILED
;
11829 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11830 int err
, err2
= 0, irq_sync
= 0;
11832 if (netif_running(dev
)) {
11834 tg3_netif_stop(tp
);
11838 tg3_full_lock(tp
, irq_sync
);
11840 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11841 err
= tg3_nvram_lock(tp
);
11842 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11843 if (!tg3_flag(tp
, 5705_PLUS
))
11844 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11846 tg3_nvram_unlock(tp
);
11848 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11851 if (tg3_test_registers(tp
) != 0) {
11852 etest
->flags
|= ETH_TEST_FL_FAILED
;
11856 if (tg3_test_memory(tp
) != 0) {
11857 etest
->flags
|= ETH_TEST_FL_FAILED
;
11862 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
11864 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
11865 etest
->flags
|= ETH_TEST_FL_FAILED
;
11867 tg3_full_unlock(tp
);
11869 if (tg3_test_interrupt(tp
) != 0) {
11870 etest
->flags
|= ETH_TEST_FL_FAILED
;
11874 tg3_full_lock(tp
, 0);
11876 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11877 if (netif_running(dev
)) {
11878 tg3_flag_set(tp
, INIT_COMPLETE
);
11879 err2
= tg3_restart_hw(tp
, 1);
11881 tg3_netif_start(tp
);
11884 tg3_full_unlock(tp
);
11886 if (irq_sync
&& !err2
)
11889 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11890 tg3_power_down(tp
);
11894 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11896 struct mii_ioctl_data
*data
= if_mii(ifr
);
11897 struct tg3
*tp
= netdev_priv(dev
);
11900 if (tg3_flag(tp
, USE_PHYLIB
)) {
11901 struct phy_device
*phydev
;
11902 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11904 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11905 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11910 data
->phy_id
= tp
->phy_addr
;
11913 case SIOCGMIIREG
: {
11916 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11917 break; /* We have no PHY */
11919 if (!netif_running(dev
))
11922 spin_lock_bh(&tp
->lock
);
11923 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11924 spin_unlock_bh(&tp
->lock
);
11926 data
->val_out
= mii_regval
;
11932 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11933 break; /* We have no PHY */
11935 if (!netif_running(dev
))
11938 spin_lock_bh(&tp
->lock
);
11939 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11940 spin_unlock_bh(&tp
->lock
);
11948 return -EOPNOTSUPP
;
11951 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11953 struct tg3
*tp
= netdev_priv(dev
);
11955 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11959 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11961 struct tg3
*tp
= netdev_priv(dev
);
11962 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11963 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11965 if (!tg3_flag(tp
, 5705_PLUS
)) {
11966 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11967 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11968 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11969 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11972 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11973 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11974 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11975 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11976 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11977 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11978 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11979 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11980 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11981 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11984 /* No rx interrupts will be generated if both are zero */
11985 if ((ec
->rx_coalesce_usecs
== 0) &&
11986 (ec
->rx_max_coalesced_frames
== 0))
11989 /* No tx interrupts will be generated if both are zero */
11990 if ((ec
->tx_coalesce_usecs
== 0) &&
11991 (ec
->tx_max_coalesced_frames
== 0))
11994 /* Only copy relevant parameters, ignore all others. */
11995 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11996 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11997 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11998 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11999 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
12000 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
12001 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
12002 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
12003 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
12005 if (netif_running(dev
)) {
12006 tg3_full_lock(tp
, 0);
12007 __tg3_set_coalesce(tp
, &tp
->coal
);
12008 tg3_full_unlock(tp
);
12013 static const struct ethtool_ops tg3_ethtool_ops
= {
12014 .get_settings
= tg3_get_settings
,
12015 .set_settings
= tg3_set_settings
,
12016 .get_drvinfo
= tg3_get_drvinfo
,
12017 .get_regs_len
= tg3_get_regs_len
,
12018 .get_regs
= tg3_get_regs
,
12019 .get_wol
= tg3_get_wol
,
12020 .set_wol
= tg3_set_wol
,
12021 .get_msglevel
= tg3_get_msglevel
,
12022 .set_msglevel
= tg3_set_msglevel
,
12023 .nway_reset
= tg3_nway_reset
,
12024 .get_link
= ethtool_op_get_link
,
12025 .get_eeprom_len
= tg3_get_eeprom_len
,
12026 .get_eeprom
= tg3_get_eeprom
,
12027 .set_eeprom
= tg3_set_eeprom
,
12028 .get_ringparam
= tg3_get_ringparam
,
12029 .set_ringparam
= tg3_set_ringparam
,
12030 .get_pauseparam
= tg3_get_pauseparam
,
12031 .set_pauseparam
= tg3_set_pauseparam
,
12032 .self_test
= tg3_self_test
,
12033 .get_strings
= tg3_get_strings
,
12034 .set_phys_id
= tg3_set_phys_id
,
12035 .get_ethtool_stats
= tg3_get_ethtool_stats
,
12036 .get_coalesce
= tg3_get_coalesce
,
12037 .set_coalesce
= tg3_set_coalesce
,
12038 .get_sset_count
= tg3_get_sset_count
,
12039 .get_rxnfc
= tg3_get_rxnfc
,
12040 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
12041 .get_rxfh_indir
= tg3_get_rxfh_indir
,
12042 .set_rxfh_indir
= tg3_set_rxfh_indir
,
12045 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
12047 u32 cursize
, val
, magic
;
12049 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
12051 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12054 if ((magic
!= TG3_EEPROM_MAGIC
) &&
12055 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
12056 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
12060 * Size the chip by reading offsets at increasing powers of two.
12061 * When we encounter our validation signature, we know the addressing
12062 * has wrapped around, and thus have our chip size.
12066 while (cursize
< tp
->nvram_size
) {
12067 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
12076 tp
->nvram_size
= cursize
;
12079 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
12083 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
12086 /* Selfboot format */
12087 if (val
!= TG3_EEPROM_MAGIC
) {
12088 tg3_get_eeprom_size(tp
);
12092 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
12094 /* This is confusing. We want to operate on the
12095 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12096 * call will read from NVRAM and byteswap the data
12097 * according to the byteswapping settings for all
12098 * other register accesses. This ensures the data we
12099 * want will always reside in the lower 16-bits.
12100 * However, the data in NVRAM is in LE format, which
12101 * means the data from the NVRAM read will always be
12102 * opposite the endianness of the CPU. The 16-bit
12103 * byteswap then brings the data to CPU endianness.
12105 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12109 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12112 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12116 nvcfg1
= tr32(NVRAM_CFG1
);
12117 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12118 tg3_flag_set(tp
, FLASH
);
12120 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12121 tw32(NVRAM_CFG1
, nvcfg1
);
12124 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12125 tg3_flag(tp
, 5780_CLASS
)) {
12126 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12127 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12128 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12129 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12130 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12132 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12133 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12134 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12136 case FLASH_VENDOR_ATMEL_EEPROM
:
12137 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12138 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12139 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12141 case FLASH_VENDOR_ST
:
12142 tp
->nvram_jedecnum
= JEDEC_ST
;
12143 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12144 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12146 case FLASH_VENDOR_SAIFUN
:
12147 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12148 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12150 case FLASH_VENDOR_SST_SMALL
:
12151 case FLASH_VENDOR_SST_LARGE
:
12152 tp
->nvram_jedecnum
= JEDEC_SST
;
12153 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12157 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12158 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12159 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12163 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12165 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12166 case FLASH_5752PAGE_SIZE_256
:
12167 tp
->nvram_pagesize
= 256;
12169 case FLASH_5752PAGE_SIZE_512
:
12170 tp
->nvram_pagesize
= 512;
12172 case FLASH_5752PAGE_SIZE_1K
:
12173 tp
->nvram_pagesize
= 1024;
12175 case FLASH_5752PAGE_SIZE_2K
:
12176 tp
->nvram_pagesize
= 2048;
12178 case FLASH_5752PAGE_SIZE_4K
:
12179 tp
->nvram_pagesize
= 4096;
12181 case FLASH_5752PAGE_SIZE_264
:
12182 tp
->nvram_pagesize
= 264;
12184 case FLASH_5752PAGE_SIZE_528
:
12185 tp
->nvram_pagesize
= 528;
12190 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12194 nvcfg1
= tr32(NVRAM_CFG1
);
12196 /* NVRAM protection for TPM */
12197 if (nvcfg1
& (1 << 27))
12198 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12200 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12201 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12202 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12203 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12204 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12206 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12207 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12208 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12209 tg3_flag_set(tp
, FLASH
);
12211 case FLASH_5752VENDOR_ST_M45PE10
:
12212 case FLASH_5752VENDOR_ST_M45PE20
:
12213 case FLASH_5752VENDOR_ST_M45PE40
:
12214 tp
->nvram_jedecnum
= JEDEC_ST
;
12215 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12216 tg3_flag_set(tp
, FLASH
);
12220 if (tg3_flag(tp
, FLASH
)) {
12221 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12223 /* For eeprom, set pagesize to maximum eeprom size */
12224 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12226 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12227 tw32(NVRAM_CFG1
, nvcfg1
);
12231 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12233 u32 nvcfg1
, protect
= 0;
12235 nvcfg1
= tr32(NVRAM_CFG1
);
12237 /* NVRAM protection for TPM */
12238 if (nvcfg1
& (1 << 27)) {
12239 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12243 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12245 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12246 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12247 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12248 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12249 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12250 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12251 tg3_flag_set(tp
, FLASH
);
12252 tp
->nvram_pagesize
= 264;
12253 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12254 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12255 tp
->nvram_size
= (protect
? 0x3e200 :
12256 TG3_NVRAM_SIZE_512KB
);
12257 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12258 tp
->nvram_size
= (protect
? 0x1f200 :
12259 TG3_NVRAM_SIZE_256KB
);
12261 tp
->nvram_size
= (protect
? 0x1f200 :
12262 TG3_NVRAM_SIZE_128KB
);
12264 case FLASH_5752VENDOR_ST_M45PE10
:
12265 case FLASH_5752VENDOR_ST_M45PE20
:
12266 case FLASH_5752VENDOR_ST_M45PE40
:
12267 tp
->nvram_jedecnum
= JEDEC_ST
;
12268 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12269 tg3_flag_set(tp
, FLASH
);
12270 tp
->nvram_pagesize
= 256;
12271 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12272 tp
->nvram_size
= (protect
?
12273 TG3_NVRAM_SIZE_64KB
:
12274 TG3_NVRAM_SIZE_128KB
);
12275 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12276 tp
->nvram_size
= (protect
?
12277 TG3_NVRAM_SIZE_64KB
:
12278 TG3_NVRAM_SIZE_256KB
);
12280 tp
->nvram_size
= (protect
?
12281 TG3_NVRAM_SIZE_128KB
:
12282 TG3_NVRAM_SIZE_512KB
);
12287 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12291 nvcfg1
= tr32(NVRAM_CFG1
);
12293 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12294 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12295 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12296 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12297 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12298 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12299 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12300 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12302 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12303 tw32(NVRAM_CFG1
, nvcfg1
);
12305 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12306 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12307 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12308 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12309 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12310 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12311 tg3_flag_set(tp
, FLASH
);
12312 tp
->nvram_pagesize
= 264;
12314 case FLASH_5752VENDOR_ST_M45PE10
:
12315 case FLASH_5752VENDOR_ST_M45PE20
:
12316 case FLASH_5752VENDOR_ST_M45PE40
:
12317 tp
->nvram_jedecnum
= JEDEC_ST
;
12318 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12319 tg3_flag_set(tp
, FLASH
);
12320 tp
->nvram_pagesize
= 256;
12325 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12327 u32 nvcfg1
, protect
= 0;
12329 nvcfg1
= tr32(NVRAM_CFG1
);
12331 /* NVRAM protection for TPM */
12332 if (nvcfg1
& (1 << 27)) {
12333 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12337 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12339 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12340 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12341 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12342 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12343 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12344 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12345 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12346 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12347 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12348 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12349 tg3_flag_set(tp
, FLASH
);
12350 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12351 tp
->nvram_pagesize
= 256;
12353 case FLASH_5761VENDOR_ST_A_M45PE20
:
12354 case FLASH_5761VENDOR_ST_A_M45PE40
:
12355 case FLASH_5761VENDOR_ST_A_M45PE80
:
12356 case FLASH_5761VENDOR_ST_A_M45PE16
:
12357 case FLASH_5761VENDOR_ST_M_M45PE20
:
12358 case FLASH_5761VENDOR_ST_M_M45PE40
:
12359 case FLASH_5761VENDOR_ST_M_M45PE80
:
12360 case FLASH_5761VENDOR_ST_M_M45PE16
:
12361 tp
->nvram_jedecnum
= JEDEC_ST
;
12362 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12363 tg3_flag_set(tp
, FLASH
);
12364 tp
->nvram_pagesize
= 256;
12369 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12372 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12373 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12374 case FLASH_5761VENDOR_ST_A_M45PE16
:
12375 case FLASH_5761VENDOR_ST_M_M45PE16
:
12376 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12378 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12379 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12380 case FLASH_5761VENDOR_ST_A_M45PE80
:
12381 case FLASH_5761VENDOR_ST_M_M45PE80
:
12382 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12384 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12385 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12386 case FLASH_5761VENDOR_ST_A_M45PE40
:
12387 case FLASH_5761VENDOR_ST_M_M45PE40
:
12388 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12390 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12391 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12392 case FLASH_5761VENDOR_ST_A_M45PE20
:
12393 case FLASH_5761VENDOR_ST_M_M45PE20
:
12394 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12400 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12402 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12403 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12404 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12407 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12411 nvcfg1
= tr32(NVRAM_CFG1
);
12413 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12414 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12415 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12416 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12417 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12418 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12420 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12421 tw32(NVRAM_CFG1
, nvcfg1
);
12423 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12424 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12425 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12426 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12427 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12428 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12429 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12430 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12431 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12432 tg3_flag_set(tp
, FLASH
);
12434 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12435 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12436 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12437 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12438 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12440 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12441 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12442 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12444 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12445 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12446 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12450 case FLASH_5752VENDOR_ST_M45PE10
:
12451 case FLASH_5752VENDOR_ST_M45PE20
:
12452 case FLASH_5752VENDOR_ST_M45PE40
:
12453 tp
->nvram_jedecnum
= JEDEC_ST
;
12454 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12455 tg3_flag_set(tp
, FLASH
);
12457 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12458 case FLASH_5752VENDOR_ST_M45PE10
:
12459 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12461 case FLASH_5752VENDOR_ST_M45PE20
:
12462 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12464 case FLASH_5752VENDOR_ST_M45PE40
:
12465 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12470 tg3_flag_set(tp
, NO_NVRAM
);
12474 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12475 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12476 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12480 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12484 nvcfg1
= tr32(NVRAM_CFG1
);
12486 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12487 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12488 case FLASH_5717VENDOR_MICRO_EEPROM
:
12489 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12490 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12491 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12493 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12494 tw32(NVRAM_CFG1
, nvcfg1
);
12496 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12497 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12498 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12499 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12500 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12501 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12502 case FLASH_5717VENDOR_ATMEL_45USPT
:
12503 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12504 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12505 tg3_flag_set(tp
, FLASH
);
12507 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12508 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12509 /* Detect size with tg3_nvram_get_size() */
12511 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12512 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12513 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12516 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12520 case FLASH_5717VENDOR_ST_M_M25PE10
:
12521 case FLASH_5717VENDOR_ST_A_M25PE10
:
12522 case FLASH_5717VENDOR_ST_M_M45PE10
:
12523 case FLASH_5717VENDOR_ST_A_M45PE10
:
12524 case FLASH_5717VENDOR_ST_M_M25PE20
:
12525 case FLASH_5717VENDOR_ST_A_M25PE20
:
12526 case FLASH_5717VENDOR_ST_M_M45PE20
:
12527 case FLASH_5717VENDOR_ST_A_M45PE20
:
12528 case FLASH_5717VENDOR_ST_25USPT
:
12529 case FLASH_5717VENDOR_ST_45USPT
:
12530 tp
->nvram_jedecnum
= JEDEC_ST
;
12531 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12532 tg3_flag_set(tp
, FLASH
);
12534 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12535 case FLASH_5717VENDOR_ST_M_M25PE20
:
12536 case FLASH_5717VENDOR_ST_M_M45PE20
:
12537 /* Detect size with tg3_nvram_get_size() */
12539 case FLASH_5717VENDOR_ST_A_M25PE20
:
12540 case FLASH_5717VENDOR_ST_A_M45PE20
:
12541 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12544 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12549 tg3_flag_set(tp
, NO_NVRAM
);
12553 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12554 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12555 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12558 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12560 u32 nvcfg1
, nvmpinstrp
;
12562 nvcfg1
= tr32(NVRAM_CFG1
);
12563 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12565 switch (nvmpinstrp
) {
12566 case FLASH_5720_EEPROM_HD
:
12567 case FLASH_5720_EEPROM_LD
:
12568 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12569 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12571 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12572 tw32(NVRAM_CFG1
, nvcfg1
);
12573 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12574 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12576 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12578 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12579 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12580 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12581 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12582 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12583 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12584 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12585 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12586 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12587 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12588 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12589 case FLASH_5720VENDOR_ATMEL_45USPT
:
12590 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12591 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12592 tg3_flag_set(tp
, FLASH
);
12594 switch (nvmpinstrp
) {
12595 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12596 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12597 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12598 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12600 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12601 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12602 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12603 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12605 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12606 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12607 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12610 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12614 case FLASH_5720VENDOR_M_ST_M25PE10
:
12615 case FLASH_5720VENDOR_M_ST_M45PE10
:
12616 case FLASH_5720VENDOR_A_ST_M25PE10
:
12617 case FLASH_5720VENDOR_A_ST_M45PE10
:
12618 case FLASH_5720VENDOR_M_ST_M25PE20
:
12619 case FLASH_5720VENDOR_M_ST_M45PE20
:
12620 case FLASH_5720VENDOR_A_ST_M25PE20
:
12621 case FLASH_5720VENDOR_A_ST_M45PE20
:
12622 case FLASH_5720VENDOR_M_ST_M25PE40
:
12623 case FLASH_5720VENDOR_M_ST_M45PE40
:
12624 case FLASH_5720VENDOR_A_ST_M25PE40
:
12625 case FLASH_5720VENDOR_A_ST_M45PE40
:
12626 case FLASH_5720VENDOR_M_ST_M25PE80
:
12627 case FLASH_5720VENDOR_M_ST_M45PE80
:
12628 case FLASH_5720VENDOR_A_ST_M25PE80
:
12629 case FLASH_5720VENDOR_A_ST_M45PE80
:
12630 case FLASH_5720VENDOR_ST_25USPT
:
12631 case FLASH_5720VENDOR_ST_45USPT
:
12632 tp
->nvram_jedecnum
= JEDEC_ST
;
12633 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12634 tg3_flag_set(tp
, FLASH
);
12636 switch (nvmpinstrp
) {
12637 case FLASH_5720VENDOR_M_ST_M25PE20
:
12638 case FLASH_5720VENDOR_M_ST_M45PE20
:
12639 case FLASH_5720VENDOR_A_ST_M25PE20
:
12640 case FLASH_5720VENDOR_A_ST_M45PE20
:
12641 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12643 case FLASH_5720VENDOR_M_ST_M25PE40
:
12644 case FLASH_5720VENDOR_M_ST_M45PE40
:
12645 case FLASH_5720VENDOR_A_ST_M25PE40
:
12646 case FLASH_5720VENDOR_A_ST_M45PE40
:
12647 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12649 case FLASH_5720VENDOR_M_ST_M25PE80
:
12650 case FLASH_5720VENDOR_M_ST_M45PE80
:
12651 case FLASH_5720VENDOR_A_ST_M25PE80
:
12652 case FLASH_5720VENDOR_A_ST_M45PE80
:
12653 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12656 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12661 tg3_flag_set(tp
, NO_NVRAM
);
12665 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12666 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12667 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12670 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12671 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12673 tw32_f(GRC_EEPROM_ADDR
,
12674 (EEPROM_ADDR_FSM_RESET
|
12675 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12676 EEPROM_ADDR_CLKPERD_SHIFT
)));
12680 /* Enable seeprom accesses. */
12681 tw32_f(GRC_LOCAL_CTRL
,
12682 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12685 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12686 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12687 tg3_flag_set(tp
, NVRAM
);
12689 if (tg3_nvram_lock(tp
)) {
12690 netdev_warn(tp
->dev
,
12691 "Cannot get nvram lock, %s failed\n",
12695 tg3_enable_nvram_access(tp
);
12697 tp
->nvram_size
= 0;
12699 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12700 tg3_get_5752_nvram_info(tp
);
12701 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12702 tg3_get_5755_nvram_info(tp
);
12703 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12704 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12705 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12706 tg3_get_5787_nvram_info(tp
);
12707 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12708 tg3_get_5761_nvram_info(tp
);
12709 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12710 tg3_get_5906_nvram_info(tp
);
12711 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12712 tg3_flag(tp
, 57765_CLASS
))
12713 tg3_get_57780_nvram_info(tp
);
12714 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12715 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12716 tg3_get_5717_nvram_info(tp
);
12717 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12718 tg3_get_5720_nvram_info(tp
);
12720 tg3_get_nvram_info(tp
);
12722 if (tp
->nvram_size
== 0)
12723 tg3_get_nvram_size(tp
);
12725 tg3_disable_nvram_access(tp
);
12726 tg3_nvram_unlock(tp
);
12729 tg3_flag_clear(tp
, NVRAM
);
12730 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12732 tg3_get_eeprom_size(tp
);
12736 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12737 u32 offset
, u32 len
, u8
*buf
)
12742 for (i
= 0; i
< len
; i
+= 4) {
12748 memcpy(&data
, buf
+ i
, 4);
12751 * The SEEPROM interface expects the data to always be opposite
12752 * the native endian format. We accomplish this by reversing
12753 * all the operations that would have been performed on the
12754 * data from a call to tg3_nvram_read_be32().
12756 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12758 val
= tr32(GRC_EEPROM_ADDR
);
12759 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12761 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12763 tw32(GRC_EEPROM_ADDR
, val
|
12764 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12765 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12766 EEPROM_ADDR_START
|
12767 EEPROM_ADDR_WRITE
);
12769 for (j
= 0; j
< 1000; j
++) {
12770 val
= tr32(GRC_EEPROM_ADDR
);
12772 if (val
& EEPROM_ADDR_COMPLETE
)
12776 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12785 /* offset and length are dword aligned */
12786 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12790 u32 pagesize
= tp
->nvram_pagesize
;
12791 u32 pagemask
= pagesize
- 1;
12795 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12801 u32 phy_addr
, page_off
, size
;
12803 phy_addr
= offset
& ~pagemask
;
12805 for (j
= 0; j
< pagesize
; j
+= 4) {
12806 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12807 (__be32
*) (tmp
+ j
));
12814 page_off
= offset
& pagemask
;
12821 memcpy(tmp
+ page_off
, buf
, size
);
12823 offset
= offset
+ (pagesize
- page_off
);
12825 tg3_enable_nvram_access(tp
);
12828 * Before we can erase the flash page, we need
12829 * to issue a special "write enable" command.
12831 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12833 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12836 /* Erase the target page */
12837 tw32(NVRAM_ADDR
, phy_addr
);
12839 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12840 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12842 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12845 /* Issue another write enable to start the write. */
12846 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12848 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12851 for (j
= 0; j
< pagesize
; j
+= 4) {
12854 data
= *((__be32
*) (tmp
+ j
));
12856 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12858 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12860 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12864 nvram_cmd
|= NVRAM_CMD_FIRST
;
12865 else if (j
== (pagesize
- 4))
12866 nvram_cmd
|= NVRAM_CMD_LAST
;
12868 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12875 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12876 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12883 /* offset and length are dword aligned */
12884 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12889 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12890 u32 page_off
, phy_addr
, nvram_cmd
;
12893 memcpy(&data
, buf
+ i
, 4);
12894 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12896 page_off
= offset
% tp
->nvram_pagesize
;
12898 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12900 tw32(NVRAM_ADDR
, phy_addr
);
12902 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12904 if (page_off
== 0 || i
== 0)
12905 nvram_cmd
|= NVRAM_CMD_FIRST
;
12906 if (page_off
== (tp
->nvram_pagesize
- 4))
12907 nvram_cmd
|= NVRAM_CMD_LAST
;
12909 if (i
== (len
- 4))
12910 nvram_cmd
|= NVRAM_CMD_LAST
;
12912 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12913 !tg3_flag(tp
, 5755_PLUS
) &&
12914 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12915 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12917 if ((ret
= tg3_nvram_exec_cmd(tp
,
12918 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12923 if (!tg3_flag(tp
, FLASH
)) {
12924 /* We always do complete word writes to eeprom. */
12925 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12928 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12934 /* offset and length are dword aligned */
12935 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12939 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12940 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12941 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12945 if (!tg3_flag(tp
, NVRAM
)) {
12946 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12950 ret
= tg3_nvram_lock(tp
);
12954 tg3_enable_nvram_access(tp
);
12955 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12956 tw32(NVRAM_WRITE1
, 0x406);
12958 grc_mode
= tr32(GRC_MODE
);
12959 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12961 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12962 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12965 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12969 grc_mode
= tr32(GRC_MODE
);
12970 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12972 tg3_disable_nvram_access(tp
);
12973 tg3_nvram_unlock(tp
);
12976 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12977 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12984 struct subsys_tbl_ent
{
12985 u16 subsys_vendor
, subsys_devid
;
12989 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12990 /* Broadcom boards. */
12991 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12992 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12993 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12994 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12995 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12996 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12997 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12998 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12999 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13000 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
13001 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13002 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
13003 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13004 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
13005 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13006 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
13007 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13008 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
13009 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13010 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
13011 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13012 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
13015 { TG3PCI_SUBVENDOR_ID_3COM
,
13016 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
13017 { TG3PCI_SUBVENDOR_ID_3COM
,
13018 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
13019 { TG3PCI_SUBVENDOR_ID_3COM
,
13020 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
13021 { TG3PCI_SUBVENDOR_ID_3COM
,
13022 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
13023 { TG3PCI_SUBVENDOR_ID_3COM
,
13024 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
13027 { TG3PCI_SUBVENDOR_ID_DELL
,
13028 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
13029 { TG3PCI_SUBVENDOR_ID_DELL
,
13030 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
13031 { TG3PCI_SUBVENDOR_ID_DELL
,
13032 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
13033 { TG3PCI_SUBVENDOR_ID_DELL
,
13034 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
13036 /* Compaq boards. */
13037 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13038 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
13039 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13040 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
13041 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13042 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
13043 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13044 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
13045 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13046 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
13049 { TG3PCI_SUBVENDOR_ID_IBM
,
13050 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
13053 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
13057 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
13058 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
13059 tp
->pdev
->subsystem_vendor
) &&
13060 (subsys_id_to_phy_id
[i
].subsys_devid
==
13061 tp
->pdev
->subsystem_device
))
13062 return &subsys_id_to_phy_id
[i
];
13067 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13071 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13072 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13074 /* Assume an onboard device and WOL capable by default. */
13075 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13076 tg3_flag_set(tp
, WOL_CAP
);
13078 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13079 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13080 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13081 tg3_flag_set(tp
, IS_NIC
);
13083 val
= tr32(VCPU_CFGSHDW
);
13084 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13085 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13086 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13087 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13088 tg3_flag_set(tp
, WOL_ENABLE
);
13089 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13094 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13095 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13096 u32 nic_cfg
, led_cfg
;
13097 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13098 int eeprom_phy_serdes
= 0;
13100 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13101 tp
->nic_sram_data_cfg
= nic_cfg
;
13103 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13104 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13105 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13106 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13107 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13108 (ver
> 0) && (ver
< 0x100))
13109 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13111 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13112 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13114 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13115 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13116 eeprom_phy_serdes
= 1;
13118 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13119 if (nic_phy_id
!= 0) {
13120 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13121 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13123 eeprom_phy_id
= (id1
>> 16) << 10;
13124 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13125 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13129 tp
->phy_id
= eeprom_phy_id
;
13130 if (eeprom_phy_serdes
) {
13131 if (!tg3_flag(tp
, 5705_PLUS
))
13132 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13134 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13137 if (tg3_flag(tp
, 5750_PLUS
))
13138 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13139 SHASTA_EXT_LED_MODE_MASK
);
13141 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13145 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13146 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13149 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13150 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13153 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13154 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13156 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13157 * read on some older 5700/5701 bootcode.
13159 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13161 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13163 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13167 case SHASTA_EXT_LED_SHARED
:
13168 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13169 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13170 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13171 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13172 LED_CTRL_MODE_PHY_2
);
13175 case SHASTA_EXT_LED_MAC
:
13176 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13179 case SHASTA_EXT_LED_COMBO
:
13180 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13181 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13182 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13183 LED_CTRL_MODE_PHY_2
);
13188 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13189 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13190 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13191 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13193 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13194 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13196 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13197 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13198 if ((tp
->pdev
->subsystem_vendor
==
13199 PCI_VENDOR_ID_ARIMA
) &&
13200 (tp
->pdev
->subsystem_device
== 0x205a ||
13201 tp
->pdev
->subsystem_device
== 0x2063))
13202 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13204 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13205 tg3_flag_set(tp
, IS_NIC
);
13208 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13209 tg3_flag_set(tp
, ENABLE_ASF
);
13210 if (tg3_flag(tp
, 5750_PLUS
))
13211 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13214 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13215 tg3_flag(tp
, 5750_PLUS
))
13216 tg3_flag_set(tp
, ENABLE_APE
);
13218 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13219 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13220 tg3_flag_clear(tp
, WOL_CAP
);
13222 if (tg3_flag(tp
, WOL_CAP
) &&
13223 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13224 tg3_flag_set(tp
, WOL_ENABLE
);
13225 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13228 if (cfg2
& (1 << 17))
13229 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13231 /* serdes signal pre-emphasis in register 0x590 set by */
13232 /* bootcode if bit 18 is set */
13233 if (cfg2
& (1 << 18))
13234 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13236 if ((tg3_flag(tp
, 57765_PLUS
) ||
13237 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13238 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13239 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13240 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13242 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13243 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13244 !tg3_flag(tp
, 57765_PLUS
)) {
13247 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13248 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13249 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13252 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13253 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13254 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13255 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13256 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13257 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13260 if (tg3_flag(tp
, WOL_CAP
))
13261 device_set_wakeup_enable(&tp
->pdev
->dev
,
13262 tg3_flag(tp
, WOL_ENABLE
));
13264 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13267 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13272 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13273 tw32(OTP_CTRL
, cmd
);
13275 /* Wait for up to 1 ms for command to execute. */
13276 for (i
= 0; i
< 100; i
++) {
13277 val
= tr32(OTP_STATUS
);
13278 if (val
& OTP_STATUS_CMD_DONE
)
13283 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13286 /* Read the gphy configuration from the OTP region of the chip. The gphy
13287 * configuration is a 32-bit value that straddles the alignment boundary.
13288 * We do two 32-bit reads and then shift and merge the results.
13290 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13292 u32 bhalf_otp
, thalf_otp
;
13294 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13296 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13299 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13301 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13304 thalf_otp
= tr32(OTP_READ_DATA
);
13306 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13308 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13311 bhalf_otp
= tr32(OTP_READ_DATA
);
13313 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13316 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13318 u32 adv
= ADVERTISED_Autoneg
;
13320 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13321 adv
|= ADVERTISED_1000baseT_Half
|
13322 ADVERTISED_1000baseT_Full
;
13324 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13325 adv
|= ADVERTISED_100baseT_Half
|
13326 ADVERTISED_100baseT_Full
|
13327 ADVERTISED_10baseT_Half
|
13328 ADVERTISED_10baseT_Full
|
13331 adv
|= ADVERTISED_FIBRE
;
13333 tp
->link_config
.advertising
= adv
;
13334 tp
->link_config
.speed
= SPEED_INVALID
;
13335 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13336 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13337 tp
->link_config
.active_speed
= SPEED_INVALID
;
13338 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13339 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13340 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13341 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13344 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13346 u32 hw_phy_id_1
, hw_phy_id_2
;
13347 u32 hw_phy_id
, hw_phy_id_masked
;
13350 /* flow control autonegotiation is default behavior */
13351 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13352 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13354 if (tg3_flag(tp
, USE_PHYLIB
))
13355 return tg3_phy_init(tp
);
13357 /* Reading the PHY ID register can conflict with ASF
13358 * firmware access to the PHY hardware.
13361 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13362 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13364 /* Now read the physical PHY_ID from the chip and verify
13365 * that it is sane. If it doesn't look good, we fall back
13366 * to either the hard-coded table based PHY_ID and failing
13367 * that the value found in the eeprom area.
13369 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13370 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13372 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13373 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13374 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13376 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13379 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13380 tp
->phy_id
= hw_phy_id
;
13381 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13382 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13384 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13386 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13387 /* Do nothing, phy ID already set up in
13388 * tg3_get_eeprom_hw_cfg().
13391 struct subsys_tbl_ent
*p
;
13393 /* No eeprom signature? Try the hardcoded
13394 * subsys device table.
13396 p
= tg3_lookup_by_subsys(tp
);
13400 tp
->phy_id
= p
->phy_id
;
13402 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13403 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13407 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13408 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13409 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13410 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13411 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13412 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13413 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13414 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13416 tg3_phy_init_link_config(tp
);
13418 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13419 !tg3_flag(tp
, ENABLE_APE
) &&
13420 !tg3_flag(tp
, ENABLE_ASF
)) {
13423 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13424 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13425 (bmsr
& BMSR_LSTATUS
))
13426 goto skip_phy_reset
;
13428 err
= tg3_phy_reset(tp
);
13432 tg3_phy_set_wirespeed(tp
);
13434 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
13435 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13436 tp
->link_config
.flowctrl
);
13438 tg3_writephy(tp
, MII_BMCR
,
13439 BMCR_ANENABLE
| BMCR_ANRESTART
);
13444 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13445 err
= tg3_init_5401phy_dsp(tp
);
13449 err
= tg3_init_5401phy_dsp(tp
);
13455 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13458 unsigned int block_end
, rosize
, len
;
13462 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13466 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13468 goto out_not_found
;
13470 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13471 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13472 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13474 if (block_end
> vpdlen
)
13475 goto out_not_found
;
13477 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13478 PCI_VPD_RO_KEYWORD_MFR_ID
);
13480 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13482 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13483 if (j
+ len
> block_end
|| len
!= 4 ||
13484 memcmp(&vpd_data
[j
], "1028", 4))
13487 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13488 PCI_VPD_RO_KEYWORD_VENDOR0
);
13492 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13494 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13495 if (j
+ len
> block_end
)
13498 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13499 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13503 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13504 PCI_VPD_RO_KEYWORD_PARTNO
);
13506 goto out_not_found
;
13508 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13510 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13511 if (len
> TG3_BPN_SIZE
||
13512 (len
+ i
) > vpdlen
)
13513 goto out_not_found
;
13515 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13519 if (tp
->board_part_number
[0])
13523 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13524 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13525 strcpy(tp
->board_part_number
, "BCM5717");
13526 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13527 strcpy(tp
->board_part_number
, "BCM5718");
13530 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13531 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13532 strcpy(tp
->board_part_number
, "BCM57780");
13533 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13534 strcpy(tp
->board_part_number
, "BCM57760");
13535 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13536 strcpy(tp
->board_part_number
, "BCM57790");
13537 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13538 strcpy(tp
->board_part_number
, "BCM57788");
13541 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13542 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13543 strcpy(tp
->board_part_number
, "BCM57761");
13544 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13545 strcpy(tp
->board_part_number
, "BCM57765");
13546 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13547 strcpy(tp
->board_part_number
, "BCM57781");
13548 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13549 strcpy(tp
->board_part_number
, "BCM57785");
13550 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13551 strcpy(tp
->board_part_number
, "BCM57791");
13552 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13553 strcpy(tp
->board_part_number
, "BCM57795");
13556 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
) {
13557 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
13558 strcpy(tp
->board_part_number
, "BCM57762");
13559 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
13560 strcpy(tp
->board_part_number
, "BCM57766");
13561 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
13562 strcpy(tp
->board_part_number
, "BCM57782");
13563 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13564 strcpy(tp
->board_part_number
, "BCM57786");
13567 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13568 strcpy(tp
->board_part_number
, "BCM95906");
13571 strcpy(tp
->board_part_number
, "none");
13575 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13579 if (tg3_nvram_read(tp
, offset
, &val
) ||
13580 (val
& 0xfc000000) != 0x0c000000 ||
13581 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13588 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13590 u32 val
, offset
, start
, ver_offset
;
13592 bool newver
= false;
13594 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13595 tg3_nvram_read(tp
, 0x4, &start
))
13598 offset
= tg3_nvram_logical_addr(tp
, offset
);
13600 if (tg3_nvram_read(tp
, offset
, &val
))
13603 if ((val
& 0xfc000000) == 0x0c000000) {
13604 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13611 dst_off
= strlen(tp
->fw_ver
);
13614 if (TG3_VER_SIZE
- dst_off
< 16 ||
13615 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13618 offset
= offset
+ ver_offset
- start
;
13619 for (i
= 0; i
< 16; i
+= 4) {
13621 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13624 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13629 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13632 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13633 TG3_NVM_BCVER_MAJSFT
;
13634 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13635 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13636 "v%d.%02d", major
, minor
);
13640 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13642 u32 val
, major
, minor
;
13644 /* Use native endian representation */
13645 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13648 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13649 TG3_NVM_HWSB_CFG1_MAJSFT
;
13650 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13651 TG3_NVM_HWSB_CFG1_MINSFT
;
13653 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13656 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13658 u32 offset
, major
, minor
, build
;
13660 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13662 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13665 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13666 case TG3_EEPROM_SB_REVISION_0
:
13667 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13669 case TG3_EEPROM_SB_REVISION_2
:
13670 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13672 case TG3_EEPROM_SB_REVISION_3
:
13673 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13675 case TG3_EEPROM_SB_REVISION_4
:
13676 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13678 case TG3_EEPROM_SB_REVISION_5
:
13679 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13681 case TG3_EEPROM_SB_REVISION_6
:
13682 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13688 if (tg3_nvram_read(tp
, offset
, &val
))
13691 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13692 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13693 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13694 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13695 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13697 if (minor
> 99 || build
> 26)
13700 offset
= strlen(tp
->fw_ver
);
13701 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13702 " v%d.%02d", major
, minor
);
13705 offset
= strlen(tp
->fw_ver
);
13706 if (offset
< TG3_VER_SIZE
- 1)
13707 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13711 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13713 u32 val
, offset
, start
;
13716 for (offset
= TG3_NVM_DIR_START
;
13717 offset
< TG3_NVM_DIR_END
;
13718 offset
+= TG3_NVM_DIRENT_SIZE
) {
13719 if (tg3_nvram_read(tp
, offset
, &val
))
13722 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13726 if (offset
== TG3_NVM_DIR_END
)
13729 if (!tg3_flag(tp
, 5705_PLUS
))
13730 start
= 0x08000000;
13731 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13734 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13735 !tg3_fw_img_is_valid(tp
, offset
) ||
13736 tg3_nvram_read(tp
, offset
+ 8, &val
))
13739 offset
+= val
- start
;
13741 vlen
= strlen(tp
->fw_ver
);
13743 tp
->fw_ver
[vlen
++] = ',';
13744 tp
->fw_ver
[vlen
++] = ' ';
13746 for (i
= 0; i
< 4; i
++) {
13748 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13751 offset
+= sizeof(v
);
13753 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13754 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13758 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13763 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13769 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13772 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13773 if (apedata
!= APE_SEG_SIG_MAGIC
)
13776 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13777 if (!(apedata
& APE_FW_STATUS_READY
))
13780 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13782 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13783 tg3_flag_set(tp
, APE_HAS_NCSI
);
13789 vlen
= strlen(tp
->fw_ver
);
13791 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13793 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13794 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13795 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13796 (apedata
& APE_FW_VERSION_BLDMSK
));
13799 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13802 bool vpd_vers
= false;
13804 if (tp
->fw_ver
[0] != 0)
13807 if (tg3_flag(tp
, NO_NVRAM
)) {
13808 strcat(tp
->fw_ver
, "sb");
13812 if (tg3_nvram_read(tp
, 0, &val
))
13815 if (val
== TG3_EEPROM_MAGIC
)
13816 tg3_read_bc_ver(tp
);
13817 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13818 tg3_read_sb_ver(tp
, val
);
13819 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13820 tg3_read_hwsb_ver(tp
);
13827 if (tg3_flag(tp
, ENABLE_APE
)) {
13828 if (tg3_flag(tp
, ENABLE_ASF
))
13829 tg3_read_dash_ver(tp
);
13830 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13831 tg3_read_mgmtfw_ver(tp
);
13835 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13838 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13840 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13842 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13843 return TG3_RX_RET_MAX_SIZE_5717
;
13844 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13845 return TG3_RX_RET_MAX_SIZE_5700
;
13847 return TG3_RX_RET_MAX_SIZE_5705
;
13850 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13851 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13852 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13853 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13857 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13860 u32 pci_state_reg
, grc_misc_cfg
;
13865 /* Force memory write invalidate off. If we leave it on,
13866 * then on 5700_BX chips we have to enable a workaround.
13867 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13868 * to match the cacheline size. The Broadcom driver have this
13869 * workaround but turns MWI off all the times so never uses
13870 * it. This seems to suggest that the workaround is insufficient.
13872 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13873 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13874 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13876 /* Important! -- Make sure register accesses are byteswapped
13877 * correctly. Also, for those chips that require it, make
13878 * sure that indirect register accesses are enabled before
13879 * the first operation.
13881 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13883 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13884 MISC_HOST_CTRL_CHIPREV
);
13885 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13886 tp
->misc_host_ctrl
);
13888 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13889 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13890 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13891 u32 prod_id_asic_rev
;
13893 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13894 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13895 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13896 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13897 pci_read_config_dword(tp
->pdev
,
13898 TG3PCI_GEN2_PRODID_ASICREV
,
13899 &prod_id_asic_rev
);
13900 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13901 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13902 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13903 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13904 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13905 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
13906 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
13907 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
13908 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
13909 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13910 pci_read_config_dword(tp
->pdev
,
13911 TG3PCI_GEN15_PRODID_ASICREV
,
13912 &prod_id_asic_rev
);
13914 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13915 &prod_id_asic_rev
);
13917 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13920 /* Wrong chip ID in 5752 A0. This code can be removed later
13921 * as A0 is not in production.
13923 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13924 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13926 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13927 * we need to disable memory and use config. cycles
13928 * only to access all registers. The 5702/03 chips
13929 * can mistakenly decode the special cycles from the
13930 * ICH chipsets as memory write cycles, causing corruption
13931 * of register and memory space. Only certain ICH bridges
13932 * will drive special cycles with non-zero data during the
13933 * address phase which can fall within the 5703's address
13934 * range. This is not an ICH bug as the PCI spec allows
13935 * non-zero address during special cycles. However, only
13936 * these ICH bridges are known to drive non-zero addresses
13937 * during special cycles.
13939 * Since special cycles do not cross PCI bridges, we only
13940 * enable this workaround if the 5703 is on the secondary
13941 * bus of these ICH bridges.
13943 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13944 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13945 static struct tg3_dev_id
{
13949 } ich_chipsets
[] = {
13950 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13952 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13954 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13956 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13960 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13961 struct pci_dev
*bridge
= NULL
;
13963 while (pci_id
->vendor
!= 0) {
13964 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13970 if (pci_id
->rev
!= PCI_ANY_ID
) {
13971 if (bridge
->revision
> pci_id
->rev
)
13974 if (bridge
->subordinate
&&
13975 (bridge
->subordinate
->number
==
13976 tp
->pdev
->bus
->number
)) {
13977 tg3_flag_set(tp
, ICH_WORKAROUND
);
13978 pci_dev_put(bridge
);
13984 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13985 static struct tg3_dev_id
{
13988 } bridge_chipsets
[] = {
13989 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13990 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13993 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13994 struct pci_dev
*bridge
= NULL
;
13996 while (pci_id
->vendor
!= 0) {
13997 bridge
= pci_get_device(pci_id
->vendor
,
14004 if (bridge
->subordinate
&&
14005 (bridge
->subordinate
->number
<=
14006 tp
->pdev
->bus
->number
) &&
14007 (bridge
->subordinate
->subordinate
>=
14008 tp
->pdev
->bus
->number
)) {
14009 tg3_flag_set(tp
, 5701_DMA_BUG
);
14010 pci_dev_put(bridge
);
14016 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14017 * DMA addresses > 40-bit. This bridge may have other additional
14018 * 57xx devices behind it in some 4-port NIC designs for example.
14019 * Any tg3 device found behind the bridge will also need the 40-bit
14022 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
14023 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14024 tg3_flag_set(tp
, 5780_CLASS
);
14025 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14026 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
14028 struct pci_dev
*bridge
= NULL
;
14031 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
14032 PCI_DEVICE_ID_SERVERWORKS_EPB
,
14034 if (bridge
&& bridge
->subordinate
&&
14035 (bridge
->subordinate
->number
<=
14036 tp
->pdev
->bus
->number
) &&
14037 (bridge
->subordinate
->subordinate
>=
14038 tp
->pdev
->bus
->number
)) {
14039 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14040 pci_dev_put(bridge
);
14046 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14047 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14048 tp
->pdev_peer
= tg3_find_peer(tp
);
14050 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14051 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14052 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14053 tg3_flag_set(tp
, 5717_PLUS
);
14055 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
14056 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14057 tg3_flag_set(tp
, 57765_CLASS
);
14059 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
))
14060 tg3_flag_set(tp
, 57765_PLUS
);
14062 /* Intentionally exclude ASIC_REV_5906 */
14063 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14064 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14065 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14066 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14067 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14068 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14069 tg3_flag(tp
, 57765_PLUS
))
14070 tg3_flag_set(tp
, 5755_PLUS
);
14072 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14073 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14074 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
14075 tg3_flag(tp
, 5755_PLUS
) ||
14076 tg3_flag(tp
, 5780_CLASS
))
14077 tg3_flag_set(tp
, 5750_PLUS
);
14079 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14080 tg3_flag(tp
, 5750_PLUS
))
14081 tg3_flag_set(tp
, 5705_PLUS
);
14083 /* Determine TSO capabilities */
14084 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
14085 ; /* Do nothing. HW bug. */
14086 else if (tg3_flag(tp
, 57765_PLUS
))
14087 tg3_flag_set(tp
, HW_TSO_3
);
14088 else if (tg3_flag(tp
, 5755_PLUS
) ||
14089 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14090 tg3_flag_set(tp
, HW_TSO_2
);
14091 else if (tg3_flag(tp
, 5750_PLUS
)) {
14092 tg3_flag_set(tp
, HW_TSO_1
);
14093 tg3_flag_set(tp
, TSO_BUG
);
14094 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
14095 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
14096 tg3_flag_clear(tp
, TSO_BUG
);
14097 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14098 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14099 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
14100 tg3_flag_set(tp
, TSO_BUG
);
14101 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
14102 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
14104 tp
->fw_needed
= FIRMWARE_TG3TSO
;
14107 /* Selectively allow TSO based on operating conditions */
14108 if (tg3_flag(tp
, HW_TSO_1
) ||
14109 tg3_flag(tp
, HW_TSO_2
) ||
14110 tg3_flag(tp
, HW_TSO_3
) ||
14112 /* For firmware TSO, assume ASF is disabled.
14113 * We'll disable TSO later if we discover ASF
14114 * is enabled in tg3_get_eeprom_hw_cfg().
14116 tg3_flag_set(tp
, TSO_CAPABLE
);
14118 tg3_flag_clear(tp
, TSO_CAPABLE
);
14119 tg3_flag_clear(tp
, TSO_BUG
);
14120 tp
->fw_needed
= NULL
;
14123 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14124 tp
->fw_needed
= FIRMWARE_TG3
;
14128 if (tg3_flag(tp
, 5750_PLUS
)) {
14129 tg3_flag_set(tp
, SUPPORT_MSI
);
14130 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14131 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14132 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14133 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14134 tp
->pdev_peer
== tp
->pdev
))
14135 tg3_flag_clear(tp
, SUPPORT_MSI
);
14137 if (tg3_flag(tp
, 5755_PLUS
) ||
14138 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14139 tg3_flag_set(tp
, 1SHOT_MSI
);
14142 if (tg3_flag(tp
, 57765_PLUS
)) {
14143 tg3_flag_set(tp
, SUPPORT_MSIX
);
14144 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14145 tg3_rss_init_dflt_indir_tbl(tp
);
14149 if (tg3_flag(tp
, 5755_PLUS
))
14150 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14152 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14153 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
14154 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14155 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
14157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14158 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14159 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14160 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14162 if (tg3_flag(tp
, 57765_PLUS
) &&
14163 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14164 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14166 if (!tg3_flag(tp
, 5705_PLUS
) ||
14167 tg3_flag(tp
, 5780_CLASS
) ||
14168 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14169 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14171 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14174 if (pci_is_pcie(tp
->pdev
)) {
14177 tg3_flag_set(tp
, PCI_EXPRESS
);
14179 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
) {
14180 int readrq
= pcie_get_readrq(tp
->pdev
);
14182 pcie_set_readrq(tp
->pdev
, 2048);
14185 pci_read_config_word(tp
->pdev
,
14186 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
14188 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14189 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14191 tg3_flag_clear(tp
, HW_TSO_2
);
14192 tg3_flag_clear(tp
, TSO_CAPABLE
);
14194 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14195 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14196 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14197 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14198 tg3_flag_set(tp
, CLKREQ_BUG
);
14199 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14200 tg3_flag_set(tp
, L1PLLPD_EN
);
14202 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14203 /* BCM5785 devices are effectively PCIe devices, and should
14204 * follow PCIe codepaths, but do not have a PCIe capabilities
14207 tg3_flag_set(tp
, PCI_EXPRESS
);
14208 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14209 tg3_flag(tp
, 5780_CLASS
)) {
14210 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14211 if (!tp
->pcix_cap
) {
14212 dev_err(&tp
->pdev
->dev
,
14213 "Cannot find PCI-X capability, aborting\n");
14217 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14218 tg3_flag_set(tp
, PCIX_MODE
);
14221 /* If we have an AMD 762 or VIA K8T800 chipset, write
14222 * reordering to the mailbox registers done by the host
14223 * controller can cause major troubles. We read back from
14224 * every mailbox register write to force the writes to be
14225 * posted to the chip in order.
14227 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14228 !tg3_flag(tp
, PCI_EXPRESS
))
14229 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14231 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14232 &tp
->pci_cacheline_sz
);
14233 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14234 &tp
->pci_lat_timer
);
14235 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14236 tp
->pci_lat_timer
< 64) {
14237 tp
->pci_lat_timer
= 64;
14238 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14239 tp
->pci_lat_timer
);
14242 /* Important! -- It is critical that the PCI-X hw workaround
14243 * situation is decided before the first MMIO register access.
14245 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14246 /* 5700 BX chips need to have their TX producer index
14247 * mailboxes written twice to workaround a bug.
14249 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14251 /* If we are in PCI-X mode, enable register write workaround.
14253 * The workaround is to use indirect register accesses
14254 * for all chip writes not to mailbox registers.
14256 if (tg3_flag(tp
, PCIX_MODE
)) {
14259 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14261 /* The chip can have it's power management PCI config
14262 * space registers clobbered due to this bug.
14263 * So explicitly force the chip into D0 here.
14265 pci_read_config_dword(tp
->pdev
,
14266 tp
->pm_cap
+ PCI_PM_CTRL
,
14268 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14269 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14270 pci_write_config_dword(tp
->pdev
,
14271 tp
->pm_cap
+ PCI_PM_CTRL
,
14274 /* Also, force SERR#/PERR# in PCI command. */
14275 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14276 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14277 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14281 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14282 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14283 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14284 tg3_flag_set(tp
, PCI_32BIT
);
14286 /* Chip-specific fixup from Broadcom driver */
14287 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14288 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14289 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14290 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14293 /* Default fast path register access methods */
14294 tp
->read32
= tg3_read32
;
14295 tp
->write32
= tg3_write32
;
14296 tp
->read32_mbox
= tg3_read32
;
14297 tp
->write32_mbox
= tg3_write32
;
14298 tp
->write32_tx_mbox
= tg3_write32
;
14299 tp
->write32_rx_mbox
= tg3_write32
;
14301 /* Various workaround register access methods */
14302 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14303 tp
->write32
= tg3_write_indirect_reg32
;
14304 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14305 (tg3_flag(tp
, PCI_EXPRESS
) &&
14306 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14308 * Back to back register writes can cause problems on these
14309 * chips, the workaround is to read back all reg writes
14310 * except those to mailbox regs.
14312 * See tg3_write_indirect_reg32().
14314 tp
->write32
= tg3_write_flush_reg32
;
14317 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14318 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14319 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14320 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14323 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14324 tp
->read32
= tg3_read_indirect_reg32
;
14325 tp
->write32
= tg3_write_indirect_reg32
;
14326 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14327 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14328 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14329 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14334 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14335 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14336 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14338 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14339 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14340 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14341 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14342 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14345 if (tp
->write32
== tg3_write_indirect_reg32
||
14346 (tg3_flag(tp
, PCIX_MODE
) &&
14347 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14348 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14349 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14351 /* The memory arbiter has to be enabled in order for SRAM accesses
14352 * to succeed. Normally on powerup the tg3 chip firmware will make
14353 * sure it is enabled, but other entities such as system netboot
14354 * code might disable it.
14356 val
= tr32(MEMARB_MODE
);
14357 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14359 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14360 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14361 tg3_flag(tp
, 5780_CLASS
)) {
14362 if (tg3_flag(tp
, PCIX_MODE
)) {
14363 pci_read_config_dword(tp
->pdev
,
14364 tp
->pcix_cap
+ PCI_X_STATUS
,
14366 tp
->pci_fn
= val
& 0x7;
14368 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14369 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14370 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14371 NIC_SRAM_CPMUSTAT_SIG
) {
14372 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14373 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14375 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14376 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14377 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14378 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14379 NIC_SRAM_CPMUSTAT_SIG
) {
14380 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14381 TG3_CPMU_STATUS_FSHFT_5719
;
14385 /* Get eeprom hw config before calling tg3_set_power_state().
14386 * In particular, the TG3_FLAG_IS_NIC flag must be
14387 * determined before calling tg3_set_power_state() so that
14388 * we know whether or not to switch out of Vaux power.
14389 * When the flag is set, it means that GPIO1 is used for eeprom
14390 * write protect and also implies that it is a LOM where GPIOs
14391 * are not used to switch power.
14393 tg3_get_eeprom_hw_cfg(tp
);
14395 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
14396 tg3_flag_clear(tp
, TSO_CAPABLE
);
14397 tg3_flag_clear(tp
, TSO_BUG
);
14398 tp
->fw_needed
= NULL
;
14401 if (tg3_flag(tp
, ENABLE_APE
)) {
14402 /* Allow reads and writes to the
14403 * APE register and memory space.
14405 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14406 PCISTATE_ALLOW_APE_SHMEM_WR
|
14407 PCISTATE_ALLOW_APE_PSPACE_WR
;
14408 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14411 tg3_ape_lock_init(tp
);
14414 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14415 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14416 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14417 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14418 tg3_flag(tp
, 57765_PLUS
))
14419 tg3_flag_set(tp
, CPMU_PRESENT
);
14421 /* Set up tp->grc_local_ctrl before calling
14422 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14423 * will bring 5700's external PHY out of reset.
14424 * It is also used as eeprom write protect on LOMs.
14426 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14427 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14428 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14429 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14430 GRC_LCLCTRL_GPIO_OUTPUT1
);
14431 /* Unused GPIO3 must be driven as output on 5752 because there
14432 * are no pull-up resistors on unused GPIO pins.
14434 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14435 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14437 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14438 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14439 tg3_flag(tp
, 57765_CLASS
))
14440 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14442 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14443 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14444 /* Turn off the debug UART. */
14445 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14446 if (tg3_flag(tp
, IS_NIC
))
14447 /* Keep VMain power. */
14448 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14449 GRC_LCLCTRL_GPIO_OUTPUT0
;
14452 /* Switch out of Vaux if it is a NIC */
14453 tg3_pwrsrc_switch_to_vmain(tp
);
14455 /* Derive initial jumbo mode from MTU assigned in
14456 * ether_setup() via the alloc_etherdev() call
14458 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14459 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14461 /* Determine WakeOnLan speed to use. */
14462 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14463 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14464 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14465 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14466 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14468 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14471 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14472 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14474 /* A few boards don't want Ethernet@WireSpeed phy feature */
14475 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14476 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14477 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14478 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14479 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14480 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14481 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14483 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14484 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14485 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14486 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14487 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14489 if (tg3_flag(tp
, 5705_PLUS
) &&
14490 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14491 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14492 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14493 !tg3_flag(tp
, 57765_PLUS
)) {
14494 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14495 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14496 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14497 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14498 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14499 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14500 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14501 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14502 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14504 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14507 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14508 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14509 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14510 if (tp
->phy_otp
== 0)
14511 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14514 if (tg3_flag(tp
, CPMU_PRESENT
))
14515 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14517 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14519 tp
->coalesce_mode
= 0;
14520 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14521 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14522 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14524 /* Set these bits to enable statistics workaround. */
14525 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14526 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14527 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14528 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14529 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14532 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14533 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14534 tg3_flag_set(tp
, USE_PHYLIB
);
14536 err
= tg3_mdio_init(tp
);
14540 /* Initialize data/descriptor byte/word swapping. */
14541 val
= tr32(GRC_MODE
);
14542 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14543 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14544 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14545 GRC_MODE_B2HRX_ENABLE
|
14546 GRC_MODE_HTX2B_ENABLE
|
14547 GRC_MODE_HOST_STACKUP
);
14549 val
&= GRC_MODE_HOST_STACKUP
;
14551 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14553 tg3_switch_clocks(tp
);
14555 /* Clear this out for sanity. */
14556 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14558 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14560 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14561 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14562 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14564 if (chiprevid
== CHIPREV_ID_5701_A0
||
14565 chiprevid
== CHIPREV_ID_5701_B0
||
14566 chiprevid
== CHIPREV_ID_5701_B2
||
14567 chiprevid
== CHIPREV_ID_5701_B5
) {
14568 void __iomem
*sram_base
;
14570 /* Write some dummy words into the SRAM status block
14571 * area, see if it reads back correctly. If the return
14572 * value is bad, force enable the PCIX workaround.
14574 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14576 writel(0x00000000, sram_base
);
14577 writel(0x00000000, sram_base
+ 4);
14578 writel(0xffffffff, sram_base
+ 4);
14579 if (readl(sram_base
) != 0x00000000)
14580 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14585 tg3_nvram_init(tp
);
14587 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14588 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14590 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14591 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14592 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14593 tg3_flag_set(tp
, IS_5788
);
14595 if (!tg3_flag(tp
, IS_5788
) &&
14596 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14597 tg3_flag_set(tp
, TAGGED_STATUS
);
14598 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14599 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14600 HOSTCC_MODE_CLRTICK_TXBD
);
14602 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14603 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14604 tp
->misc_host_ctrl
);
14607 /* Preserve the APE MAC_MODE bits */
14608 if (tg3_flag(tp
, ENABLE_APE
))
14609 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14613 /* these are limited to 10/100 only */
14614 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14615 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14616 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14617 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14618 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14619 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14620 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14621 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14622 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14623 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14624 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14625 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14626 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14627 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14628 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14629 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14631 err
= tg3_phy_probe(tp
);
14633 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14634 /* ... but do not return immediately ... */
14639 tg3_read_fw_ver(tp
);
14641 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14642 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14644 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14645 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14647 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14650 /* 5700 {AX,BX} chips have a broken status block link
14651 * change bit implementation, so we must use the
14652 * status register in those cases.
14654 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14655 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14657 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14659 /* The led_ctrl is set during tg3_phy_probe, here we might
14660 * have to force the link status polling mechanism based
14661 * upon subsystem IDs.
14663 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14664 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14665 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14666 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14667 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14670 /* For all SERDES we poll the MAC status register. */
14671 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14672 tg3_flag_set(tp
, POLL_SERDES
);
14674 tg3_flag_clear(tp
, POLL_SERDES
);
14676 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
14677 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14678 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14679 tg3_flag(tp
, PCIX_MODE
)) {
14680 tp
->rx_offset
= NET_SKB_PAD
;
14681 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14682 tp
->rx_copy_thresh
= ~(u16
)0;
14686 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14687 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14688 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14690 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14692 /* Increment the rx prod index on the rx std ring by at most
14693 * 8 for these chips to workaround hw errata.
14695 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14696 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14697 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14698 tp
->rx_std_max_post
= 8;
14700 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14701 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14702 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14707 #ifdef CONFIG_SPARC
14708 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14710 struct net_device
*dev
= tp
->dev
;
14711 struct pci_dev
*pdev
= tp
->pdev
;
14712 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14713 const unsigned char *addr
;
14716 addr
= of_get_property(dp
, "local-mac-address", &len
);
14717 if (addr
&& len
== 6) {
14718 memcpy(dev
->dev_addr
, addr
, 6);
14719 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14725 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14727 struct net_device
*dev
= tp
->dev
;
14729 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14730 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14735 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14737 struct net_device
*dev
= tp
->dev
;
14738 u32 hi
, lo
, mac_offset
;
14741 #ifdef CONFIG_SPARC
14742 if (!tg3_get_macaddr_sparc(tp
))
14747 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14748 tg3_flag(tp
, 5780_CLASS
)) {
14749 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14751 if (tg3_nvram_lock(tp
))
14752 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14754 tg3_nvram_unlock(tp
);
14755 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14756 if (tp
->pci_fn
& 1)
14758 if (tp
->pci_fn
> 1)
14759 mac_offset
+= 0x18c;
14760 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14763 /* First try to get it from MAC address mailbox. */
14764 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14765 if ((hi
>> 16) == 0x484b) {
14766 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14767 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14769 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14770 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14771 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14772 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14773 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14775 /* Some old bootcode may report a 0 MAC address in SRAM */
14776 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14779 /* Next, try NVRAM. */
14780 if (!tg3_flag(tp
, NO_NVRAM
) &&
14781 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14782 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14783 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14784 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14786 /* Finally just fetch it out of the MAC control regs. */
14788 hi
= tr32(MAC_ADDR_0_HIGH
);
14789 lo
= tr32(MAC_ADDR_0_LOW
);
14791 dev
->dev_addr
[5] = lo
& 0xff;
14792 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14793 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14794 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14795 dev
->dev_addr
[1] = hi
& 0xff;
14796 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14800 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14801 #ifdef CONFIG_SPARC
14802 if (!tg3_get_default_macaddr_sparc(tp
))
14807 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14811 #define BOUNDARY_SINGLE_CACHELINE 1
14812 #define BOUNDARY_MULTI_CACHELINE 2
14814 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14816 int cacheline_size
;
14820 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14822 cacheline_size
= 1024;
14824 cacheline_size
= (int) byte
* 4;
14826 /* On 5703 and later chips, the boundary bits have no
14829 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14830 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14831 !tg3_flag(tp
, PCI_EXPRESS
))
14834 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14835 goal
= BOUNDARY_MULTI_CACHELINE
;
14837 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14838 goal
= BOUNDARY_SINGLE_CACHELINE
;
14844 if (tg3_flag(tp
, 57765_PLUS
)) {
14845 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14852 /* PCI controllers on most RISC systems tend to disconnect
14853 * when a device tries to burst across a cache-line boundary.
14854 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14856 * Unfortunately, for PCI-E there are only limited
14857 * write-side controls for this, and thus for reads
14858 * we will still get the disconnects. We'll also waste
14859 * these PCI cycles for both read and write for chips
14860 * other than 5700 and 5701 which do not implement the
14863 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14864 switch (cacheline_size
) {
14869 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14870 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14871 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14873 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14874 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14879 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14880 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14884 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14885 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14888 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14889 switch (cacheline_size
) {
14893 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14894 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14895 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14901 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14902 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14906 switch (cacheline_size
) {
14908 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14909 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14910 DMA_RWCTRL_WRITE_BNDRY_16
);
14915 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14916 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14917 DMA_RWCTRL_WRITE_BNDRY_32
);
14922 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14923 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14924 DMA_RWCTRL_WRITE_BNDRY_64
);
14929 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14930 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14931 DMA_RWCTRL_WRITE_BNDRY_128
);
14936 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14937 DMA_RWCTRL_WRITE_BNDRY_256
);
14940 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14941 DMA_RWCTRL_WRITE_BNDRY_512
);
14945 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14946 DMA_RWCTRL_WRITE_BNDRY_1024
);
14955 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14957 struct tg3_internal_buffer_desc test_desc
;
14958 u32 sram_dma_descs
;
14961 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14963 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14964 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14965 tw32(RDMAC_STATUS
, 0);
14966 tw32(WDMAC_STATUS
, 0);
14968 tw32(BUFMGR_MODE
, 0);
14969 tw32(FTQ_RESET
, 0);
14971 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14972 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14973 test_desc
.nic_mbuf
= 0x00002100;
14974 test_desc
.len
= size
;
14977 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14978 * the *second* time the tg3 driver was getting loaded after an
14981 * Broadcom tells me:
14982 * ...the DMA engine is connected to the GRC block and a DMA
14983 * reset may affect the GRC block in some unpredictable way...
14984 * The behavior of resets to individual blocks has not been tested.
14986 * Broadcom noted the GRC reset will also reset all sub-components.
14989 test_desc
.cqid_sqid
= (13 << 8) | 2;
14991 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14994 test_desc
.cqid_sqid
= (16 << 8) | 7;
14996 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14999 test_desc
.flags
= 0x00000005;
15001 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
15004 val
= *(((u32
*)&test_desc
) + i
);
15005 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
15006 sram_dma_descs
+ (i
* sizeof(u32
)));
15007 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
15009 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15012 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
15014 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
15017 for (i
= 0; i
< 40; i
++) {
15021 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
15023 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
15024 if ((val
& 0xffff) == sram_dma_descs
) {
15035 #define TEST_BUFFER_SIZE 0x2000
15037 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
15038 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
15042 static int __devinit
tg3_test_dma(struct tg3
*tp
)
15044 dma_addr_t buf_dma
;
15045 u32
*buf
, saved_dma_rwctrl
;
15048 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
15049 &buf_dma
, GFP_KERNEL
);
15055 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
15056 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
15058 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
15060 if (tg3_flag(tp
, 57765_PLUS
))
15063 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15064 /* DMA read watermark not used on PCIE */
15065 tp
->dma_rwctrl
|= 0x00180000;
15066 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
15067 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
15068 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
15069 tp
->dma_rwctrl
|= 0x003f0000;
15071 tp
->dma_rwctrl
|= 0x003f000f;
15073 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15074 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
15075 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
15076 u32 read_water
= 0x7;
15078 /* If the 5704 is behind the EPB bridge, we can
15079 * do the less restrictive ONE_DMA workaround for
15080 * better performance.
15082 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
15083 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15084 tp
->dma_rwctrl
|= 0x8000;
15085 else if (ccval
== 0x6 || ccval
== 0x7)
15086 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
15088 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
15090 /* Set bit 23 to enable PCIX hw bug fix */
15092 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
15093 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
15095 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
15096 /* 5780 always in PCIX mode */
15097 tp
->dma_rwctrl
|= 0x00144000;
15098 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
15099 /* 5714 always in PCIX mode */
15100 tp
->dma_rwctrl
|= 0x00148000;
15102 tp
->dma_rwctrl
|= 0x001b000f;
15106 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15107 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15108 tp
->dma_rwctrl
&= 0xfffffff0;
15110 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15111 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
15112 /* Remove this if it causes problems for some boards. */
15113 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
15115 /* On 5700/5701 chips, we need to set this bit.
15116 * Otherwise the chip will issue cacheline transactions
15117 * to streamable DMA memory with not all the byte
15118 * enables turned on. This is an error on several
15119 * RISC PCI controllers, in particular sparc64.
15121 * On 5703/5704 chips, this bit has been reassigned
15122 * a different meaning. In particular, it is used
15123 * on those chips to enable a PCI-X workaround.
15125 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15128 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15131 /* Unneeded, already done by tg3_get_invariants. */
15132 tg3_switch_clocks(tp
);
15135 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15136 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15139 /* It is best to perform DMA test with maximum write burst size
15140 * to expose the 5700/5701 write DMA bug.
15142 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15143 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15144 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15149 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15152 /* Send the buffer to the chip. */
15153 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15155 dev_err(&tp
->pdev
->dev
,
15156 "%s: Buffer write failed. err = %d\n",
15162 /* validate data reached card RAM correctly. */
15163 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15165 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15166 if (le32_to_cpu(val
) != p
[i
]) {
15167 dev_err(&tp
->pdev
->dev
,
15168 "%s: Buffer corrupted on device! "
15169 "(%d != %d)\n", __func__
, val
, i
);
15170 /* ret = -ENODEV here? */
15175 /* Now read it back. */
15176 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15178 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15179 "err = %d\n", __func__
, ret
);
15184 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15188 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15189 DMA_RWCTRL_WRITE_BNDRY_16
) {
15190 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15191 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15192 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15195 dev_err(&tp
->pdev
->dev
,
15196 "%s: Buffer corrupted on read back! "
15197 "(%d != %d)\n", __func__
, p
[i
], i
);
15203 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15209 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15210 DMA_RWCTRL_WRITE_BNDRY_16
) {
15211 /* DMA test passed without adjusting DMA boundary,
15212 * now look for chipsets that are known to expose the
15213 * DMA bug without failing the test.
15215 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15216 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15217 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15219 /* Safe to use the calculated DMA boundary. */
15220 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15223 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15227 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15232 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15234 if (tg3_flag(tp
, 57765_PLUS
)) {
15235 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15236 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15237 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15238 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15239 tp
->bufmgr_config
.mbuf_high_water
=
15240 DEFAULT_MB_HIGH_WATER_57765
;
15242 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15243 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15244 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15245 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15246 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15247 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15248 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15249 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15250 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15251 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15252 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15253 tp
->bufmgr_config
.mbuf_high_water
=
15254 DEFAULT_MB_HIGH_WATER_5705
;
15255 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15256 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15257 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15258 tp
->bufmgr_config
.mbuf_high_water
=
15259 DEFAULT_MB_HIGH_WATER_5906
;
15262 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15263 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15264 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15265 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15266 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15267 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15269 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15270 DEFAULT_MB_RDMA_LOW_WATER
;
15271 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15272 DEFAULT_MB_MACRX_LOW_WATER
;
15273 tp
->bufmgr_config
.mbuf_high_water
=
15274 DEFAULT_MB_HIGH_WATER
;
15276 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15277 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15278 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15279 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15280 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15281 DEFAULT_MB_HIGH_WATER_JUMBO
;
15284 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15285 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15288 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15290 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15291 case TG3_PHY_ID_BCM5400
: return "5400";
15292 case TG3_PHY_ID_BCM5401
: return "5401";
15293 case TG3_PHY_ID_BCM5411
: return "5411";
15294 case TG3_PHY_ID_BCM5701
: return "5701";
15295 case TG3_PHY_ID_BCM5703
: return "5703";
15296 case TG3_PHY_ID_BCM5704
: return "5704";
15297 case TG3_PHY_ID_BCM5705
: return "5705";
15298 case TG3_PHY_ID_BCM5750
: return "5750";
15299 case TG3_PHY_ID_BCM5752
: return "5752";
15300 case TG3_PHY_ID_BCM5714
: return "5714";
15301 case TG3_PHY_ID_BCM5780
: return "5780";
15302 case TG3_PHY_ID_BCM5755
: return "5755";
15303 case TG3_PHY_ID_BCM5787
: return "5787";
15304 case TG3_PHY_ID_BCM5784
: return "5784";
15305 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15306 case TG3_PHY_ID_BCM5906
: return "5906";
15307 case TG3_PHY_ID_BCM5761
: return "5761";
15308 case TG3_PHY_ID_BCM5718C
: return "5718C";
15309 case TG3_PHY_ID_BCM5718S
: return "5718S";
15310 case TG3_PHY_ID_BCM57765
: return "57765";
15311 case TG3_PHY_ID_BCM5719C
: return "5719C";
15312 case TG3_PHY_ID_BCM5720C
: return "5720C";
15313 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15314 case 0: return "serdes";
15315 default: return "unknown";
15319 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15321 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15322 strcpy(str
, "PCI Express");
15324 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15325 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15327 strcpy(str
, "PCIX:");
15329 if ((clock_ctrl
== 7) ||
15330 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15331 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15332 strcat(str
, "133MHz");
15333 else if (clock_ctrl
== 0)
15334 strcat(str
, "33MHz");
15335 else if (clock_ctrl
== 2)
15336 strcat(str
, "50MHz");
15337 else if (clock_ctrl
== 4)
15338 strcat(str
, "66MHz");
15339 else if (clock_ctrl
== 6)
15340 strcat(str
, "100MHz");
15342 strcpy(str
, "PCI:");
15343 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15344 strcat(str
, "66MHz");
15346 strcat(str
, "33MHz");
15348 if (tg3_flag(tp
, PCI_32BIT
))
15349 strcat(str
, ":32-bit");
15351 strcat(str
, ":64-bit");
15355 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15357 struct pci_dev
*peer
;
15358 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15360 for (func
= 0; func
< 8; func
++) {
15361 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15362 if (peer
&& peer
!= tp
->pdev
)
15366 /* 5704 can be configured in single-port mode, set peer to
15367 * tp->pdev in that case.
15375 * We don't need to keep the refcount elevated; there's no way
15376 * to remove one half of this device without removing the other
15383 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15385 struct ethtool_coalesce
*ec
= &tp
->coal
;
15387 memset(ec
, 0, sizeof(*ec
));
15388 ec
->cmd
= ETHTOOL_GCOALESCE
;
15389 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15390 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15391 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15392 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15393 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15394 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15395 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15396 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15397 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15399 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15400 HOSTCC_MODE_CLRTICK_TXBD
)) {
15401 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15402 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15403 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15404 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15407 if (tg3_flag(tp
, 5705_PLUS
)) {
15408 ec
->rx_coalesce_usecs_irq
= 0;
15409 ec
->tx_coalesce_usecs_irq
= 0;
15410 ec
->stats_block_coalesce_usecs
= 0;
15414 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
15415 struct rtnl_link_stats64
*stats
)
15417 struct tg3
*tp
= netdev_priv(dev
);
15420 return &tp
->net_stats_prev
;
15422 spin_lock_bh(&tp
->lock
);
15423 tg3_get_nstats(tp
, stats
);
15424 spin_unlock_bh(&tp
->lock
);
15429 static const struct net_device_ops tg3_netdev_ops
= {
15430 .ndo_open
= tg3_open
,
15431 .ndo_stop
= tg3_close
,
15432 .ndo_start_xmit
= tg3_start_xmit
,
15433 .ndo_get_stats64
= tg3_get_stats64
,
15434 .ndo_validate_addr
= eth_validate_addr
,
15435 .ndo_set_rx_mode
= tg3_set_rx_mode
,
15436 .ndo_set_mac_address
= tg3_set_mac_addr
,
15437 .ndo_do_ioctl
= tg3_ioctl
,
15438 .ndo_tx_timeout
= tg3_tx_timeout
,
15439 .ndo_change_mtu
= tg3_change_mtu
,
15440 .ndo_fix_features
= tg3_fix_features
,
15441 .ndo_set_features
= tg3_set_features
,
15442 #ifdef CONFIG_NET_POLL_CONTROLLER
15443 .ndo_poll_controller
= tg3_poll_controller
,
15447 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15448 const struct pci_device_id
*ent
)
15450 struct net_device
*dev
;
15452 int i
, err
, pm_cap
;
15453 u32 sndmbx
, rcvmbx
, intmbx
;
15455 u64 dma_mask
, persist_dma_mask
;
15456 netdev_features_t features
= 0;
15458 printk_once(KERN_INFO
"%s\n", version
);
15460 err
= pci_enable_device(pdev
);
15462 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15466 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15468 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15469 goto err_out_disable_pdev
;
15472 pci_set_master(pdev
);
15474 /* Find power-management capability. */
15475 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15477 dev_err(&pdev
->dev
,
15478 "Cannot find Power Management capability, aborting\n");
15480 goto err_out_free_res
;
15483 err
= pci_set_power_state(pdev
, PCI_D0
);
15485 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15486 goto err_out_free_res
;
15489 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15491 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15493 goto err_out_power_down
;
15496 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15498 tp
= netdev_priv(dev
);
15501 tp
->pm_cap
= pm_cap
;
15502 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15503 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15506 tp
->msg_enable
= tg3_debug
;
15508 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15510 /* The word/byte swap controls here control register access byte
15511 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15514 tp
->misc_host_ctrl
=
15515 MISC_HOST_CTRL_MASK_PCI_INT
|
15516 MISC_HOST_CTRL_WORD_SWAP
|
15517 MISC_HOST_CTRL_INDIR_ACCESS
|
15518 MISC_HOST_CTRL_PCISTATE_RW
;
15520 /* The NONFRM (non-frame) byte/word swap controls take effect
15521 * on descriptor entries, anything which isn't packet data.
15523 * The StrongARM chips on the board (one for tx, one for rx)
15524 * are running in big-endian mode.
15526 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15527 GRC_MODE_WSWAP_NONFRM_DATA
);
15528 #ifdef __BIG_ENDIAN
15529 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15531 spin_lock_init(&tp
->lock
);
15532 spin_lock_init(&tp
->indirect_lock
);
15533 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15535 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15537 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15539 goto err_out_free_dev
;
15542 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15543 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15544 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15545 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15546 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15547 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15548 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15549 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15550 tg3_flag_set(tp
, ENABLE_APE
);
15551 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15552 if (!tp
->aperegs
) {
15553 dev_err(&pdev
->dev
,
15554 "Cannot map APE registers, aborting\n");
15556 goto err_out_iounmap
;
15560 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15561 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15563 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15564 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15565 dev
->netdev_ops
= &tg3_netdev_ops
;
15566 dev
->irq
= pdev
->irq
;
15568 err
= tg3_get_invariants(tp
);
15570 dev_err(&pdev
->dev
,
15571 "Problem fetching invariants of chip, aborting\n");
15572 goto err_out_apeunmap
;
15575 /* The EPB bridge inside 5714, 5715, and 5780 and any
15576 * device behind the EPB cannot support DMA addresses > 40-bit.
15577 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15578 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15579 * do DMA address check in tg3_start_xmit().
15581 if (tg3_flag(tp
, IS_5788
))
15582 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15583 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15584 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15585 #ifdef CONFIG_HIGHMEM
15586 dma_mask
= DMA_BIT_MASK(64);
15589 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15591 /* Configure DMA attributes. */
15592 if (dma_mask
> DMA_BIT_MASK(32)) {
15593 err
= pci_set_dma_mask(pdev
, dma_mask
);
15595 features
|= NETIF_F_HIGHDMA
;
15596 err
= pci_set_consistent_dma_mask(pdev
,
15599 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15600 "DMA for consistent allocations\n");
15601 goto err_out_apeunmap
;
15605 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15606 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15608 dev_err(&pdev
->dev
,
15609 "No usable DMA configuration, aborting\n");
15610 goto err_out_apeunmap
;
15614 tg3_init_bufmgr_config(tp
);
15616 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15618 /* 5700 B0 chips do not support checksumming correctly due
15619 * to hardware bugs.
15621 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15622 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15624 if (tg3_flag(tp
, 5755_PLUS
))
15625 features
|= NETIF_F_IPV6_CSUM
;
15628 /* TSO is on by default on chips that support hardware TSO.
15629 * Firmware TSO on older chips gives lower performance, so it
15630 * is off by default, but can be enabled using ethtool.
15632 if ((tg3_flag(tp
, HW_TSO_1
) ||
15633 tg3_flag(tp
, HW_TSO_2
) ||
15634 tg3_flag(tp
, HW_TSO_3
)) &&
15635 (features
& NETIF_F_IP_CSUM
))
15636 features
|= NETIF_F_TSO
;
15637 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15638 if (features
& NETIF_F_IPV6_CSUM
)
15639 features
|= NETIF_F_TSO6
;
15640 if (tg3_flag(tp
, HW_TSO_3
) ||
15641 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15642 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15643 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15644 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15645 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15646 features
|= NETIF_F_TSO_ECN
;
15649 dev
->features
|= features
;
15650 dev
->vlan_features
|= features
;
15653 * Add loopback capability only for a subset of devices that support
15654 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15655 * loopback for the remaining devices.
15657 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15658 !tg3_flag(tp
, CPMU_PRESENT
))
15659 /* Add the loopback capability */
15660 features
|= NETIF_F_LOOPBACK
;
15662 dev
->hw_features
|= features
;
15664 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15665 !tg3_flag(tp
, TSO_CAPABLE
) &&
15666 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15667 tg3_flag_set(tp
, MAX_RXPEND_64
);
15668 tp
->rx_pending
= 63;
15671 err
= tg3_get_device_address(tp
);
15673 dev_err(&pdev
->dev
,
15674 "Could not obtain valid ethernet address, aborting\n");
15675 goto err_out_apeunmap
;
15679 * Reset chip in case UNDI or EFI driver did not shutdown
15680 * DMA self test will enable WDMAC and we'll see (spurious)
15681 * pending DMA on the PCI bus at that point.
15683 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15684 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15685 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15686 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15689 err
= tg3_test_dma(tp
);
15691 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15692 goto err_out_apeunmap
;
15695 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15696 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15697 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15698 for (i
= 0; i
< tp
->irq_max
; i
++) {
15699 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15702 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15704 tnapi
->int_mbox
= intmbx
;
15710 tnapi
->consmbox
= rcvmbx
;
15711 tnapi
->prodmbox
= sndmbx
;
15714 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15716 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15718 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15722 * If we support MSIX, we'll be using RSS. If we're using
15723 * RSS, the first vector only handles link interrupts and the
15724 * remaining vectors handle rx and tx interrupts. Reuse the
15725 * mailbox values for the next iteration. The values we setup
15726 * above are still useful for the single vectored mode.
15741 pci_set_drvdata(pdev
, dev
);
15743 if (tg3_flag(tp
, 5717_PLUS
)) {
15744 /* Resume a low-power mode */
15745 tg3_frob_aux_power(tp
, false);
15748 err
= register_netdev(dev
);
15750 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15751 goto err_out_apeunmap
;
15754 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15755 tp
->board_part_number
,
15756 tp
->pci_chip_rev_id
,
15757 tg3_bus_string(tp
, str
),
15760 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15761 struct phy_device
*phydev
;
15762 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15764 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15765 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15769 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15770 ethtype
= "10/100Base-TX";
15771 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15772 ethtype
= "1000Base-SX";
15774 ethtype
= "10/100/1000Base-T";
15776 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15777 "(WireSpeed[%d], EEE[%d])\n",
15778 tg3_phy_string(tp
), ethtype
,
15779 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15780 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15783 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15784 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15785 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15786 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15787 tg3_flag(tp
, ENABLE_ASF
) != 0,
15788 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15789 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15791 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15792 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15794 pci_save_state(pdev
);
15800 iounmap(tp
->aperegs
);
15801 tp
->aperegs
= NULL
;
15813 err_out_power_down
:
15814 pci_set_power_state(pdev
, PCI_D3hot
);
15817 pci_release_regions(pdev
);
15819 err_out_disable_pdev
:
15820 pci_disable_device(pdev
);
15821 pci_set_drvdata(pdev
, NULL
);
15825 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15827 struct net_device
*dev
= pci_get_drvdata(pdev
);
15830 struct tg3
*tp
= netdev_priv(dev
);
15833 release_firmware(tp
->fw
);
15835 tg3_reset_task_cancel(tp
);
15837 if (tg3_flag(tp
, USE_PHYLIB
)) {
15842 unregister_netdev(dev
);
15844 iounmap(tp
->aperegs
);
15845 tp
->aperegs
= NULL
;
15852 pci_release_regions(pdev
);
15853 pci_disable_device(pdev
);
15854 pci_set_drvdata(pdev
, NULL
);
15858 #ifdef CONFIG_PM_SLEEP
15859 static int tg3_suspend(struct device
*device
)
15861 struct pci_dev
*pdev
= to_pci_dev(device
);
15862 struct net_device
*dev
= pci_get_drvdata(pdev
);
15863 struct tg3
*tp
= netdev_priv(dev
);
15866 if (!netif_running(dev
))
15869 tg3_reset_task_cancel(tp
);
15871 tg3_netif_stop(tp
);
15873 del_timer_sync(&tp
->timer
);
15875 tg3_full_lock(tp
, 1);
15876 tg3_disable_ints(tp
);
15877 tg3_full_unlock(tp
);
15879 netif_device_detach(dev
);
15881 tg3_full_lock(tp
, 0);
15882 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15883 tg3_flag_clear(tp
, INIT_COMPLETE
);
15884 tg3_full_unlock(tp
);
15886 err
= tg3_power_down_prepare(tp
);
15890 tg3_full_lock(tp
, 0);
15892 tg3_flag_set(tp
, INIT_COMPLETE
);
15893 err2
= tg3_restart_hw(tp
, 1);
15897 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15898 add_timer(&tp
->timer
);
15900 netif_device_attach(dev
);
15901 tg3_netif_start(tp
);
15904 tg3_full_unlock(tp
);
15913 static int tg3_resume(struct device
*device
)
15915 struct pci_dev
*pdev
= to_pci_dev(device
);
15916 struct net_device
*dev
= pci_get_drvdata(pdev
);
15917 struct tg3
*tp
= netdev_priv(dev
);
15920 if (!netif_running(dev
))
15923 netif_device_attach(dev
);
15925 tg3_full_lock(tp
, 0);
15927 tg3_flag_set(tp
, INIT_COMPLETE
);
15928 err
= tg3_restart_hw(tp
, 1);
15932 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15933 add_timer(&tp
->timer
);
15935 tg3_netif_start(tp
);
15938 tg3_full_unlock(tp
);
15946 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15947 #define TG3_PM_OPS (&tg3_pm_ops)
15951 #define TG3_PM_OPS NULL
15953 #endif /* CONFIG_PM_SLEEP */
15956 * tg3_io_error_detected - called when PCI error is detected
15957 * @pdev: Pointer to PCI device
15958 * @state: The current pci connection state
15960 * This function is called after a PCI bus error affecting
15961 * this device has been detected.
15963 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15964 pci_channel_state_t state
)
15966 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15967 struct tg3
*tp
= netdev_priv(netdev
);
15968 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15970 netdev_info(netdev
, "PCI I/O error detected\n");
15974 if (!netif_running(netdev
))
15979 tg3_netif_stop(tp
);
15981 del_timer_sync(&tp
->timer
);
15983 /* Want to make sure that the reset task doesn't run */
15984 tg3_reset_task_cancel(tp
);
15985 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15987 netif_device_detach(netdev
);
15989 /* Clean up software state, even if MMIO is blocked */
15990 tg3_full_lock(tp
, 0);
15991 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15992 tg3_full_unlock(tp
);
15995 if (state
== pci_channel_io_perm_failure
)
15996 err
= PCI_ERS_RESULT_DISCONNECT
;
15998 pci_disable_device(pdev
);
16006 * tg3_io_slot_reset - called after the pci bus has been reset.
16007 * @pdev: Pointer to PCI device
16009 * Restart the card from scratch, as if from a cold-boot.
16010 * At this point, the card has exprienced a hard reset,
16011 * followed by fixups by BIOS, and has its config space
16012 * set up identically to what it was at cold boot.
16014 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
16016 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16017 struct tg3
*tp
= netdev_priv(netdev
);
16018 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
16023 if (pci_enable_device(pdev
)) {
16024 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
16028 pci_set_master(pdev
);
16029 pci_restore_state(pdev
);
16030 pci_save_state(pdev
);
16032 if (!netif_running(netdev
)) {
16033 rc
= PCI_ERS_RESULT_RECOVERED
;
16037 err
= tg3_power_up(tp
);
16041 rc
= PCI_ERS_RESULT_RECOVERED
;
16050 * tg3_io_resume - called when traffic can start flowing again.
16051 * @pdev: Pointer to PCI device
16053 * This callback is called when the error recovery driver tells
16054 * us that its OK to resume normal operation.
16056 static void tg3_io_resume(struct pci_dev
*pdev
)
16058 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16059 struct tg3
*tp
= netdev_priv(netdev
);
16064 if (!netif_running(netdev
))
16067 tg3_full_lock(tp
, 0);
16068 tg3_flag_set(tp
, INIT_COMPLETE
);
16069 err
= tg3_restart_hw(tp
, 1);
16070 tg3_full_unlock(tp
);
16072 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
16076 netif_device_attach(netdev
);
16078 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
16079 add_timer(&tp
->timer
);
16081 tg3_netif_start(tp
);
16089 static struct pci_error_handlers tg3_err_handler
= {
16090 .error_detected
= tg3_io_error_detected
,
16091 .slot_reset
= tg3_io_slot_reset
,
16092 .resume
= tg3_io_resume
16095 static struct pci_driver tg3_driver
= {
16096 .name
= DRV_MODULE_NAME
,
16097 .id_table
= tg3_pci_tbl
,
16098 .probe
= tg3_init_one
,
16099 .remove
= __devexit_p(tg3_remove_one
),
16100 .err_handler
= &tg3_err_handler
,
16101 .driver
.pm
= TG3_PM_OPS
,
16104 static int __init
tg3_init(void)
16106 return pci_register_driver(&tg3_driver
);
16109 static void __exit
tg3_cleanup(void)
16111 pci_unregister_driver(&tg3_driver
);
16114 module_init(tg3_init
);
16115 module_exit(tg3_cleanup
);