2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 #define TG3_DMA_BYTE_ENAB 64
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version
[] __devinitdata
=
213 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION
);
219 MODULE_FIRMWARE(FIRMWARE_TG3
);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
223 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug
, int, 0);
225 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
314 static const struct {
315 const char string
[ETH_GSTRING_LEN
];
316 } ethtool_stats_keys
[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string
[ETH_GSTRING_LEN
];
402 } ethtool_test_keys
[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
418 writel(val
, tp
->regs
+ off
);
421 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
423 return readl(tp
->regs
+ off
);
426 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
428 writel(val
, tp
->aperegs
+ off
);
431 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
433 return readl(tp
->aperegs
+ off
);
436 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
440 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
441 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
443 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
446 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
448 writel(val
, tp
->regs
+ off
);
449 readl(tp
->regs
+ off
);
452 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
457 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
458 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
459 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
460 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
464 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
468 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
469 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
470 TG3_64BIT_REG_LOW
, val
);
473 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
475 TG3_64BIT_REG_LOW
, val
);
479 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
480 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
482 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
490 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
494 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
499 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
501 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
502 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
513 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
514 /* Non-posted methods */
515 tp
->write32(tp
, off
, val
);
518 tg3_write32(tp
, off
, val
);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
532 tp
->write32_mbox(tp
, off
, val
);
533 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
534 tp
->read32_mbox(tp
, off
);
537 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
539 void __iomem
*mbox
= tp
->regs
+ off
;
541 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
543 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
547 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
549 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
552 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
554 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
572 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
573 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
576 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
577 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
578 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
579 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
585 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
590 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
593 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
597 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
598 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
603 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
604 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
605 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
606 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
612 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
617 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
620 static void tg3_ape_lock_init(struct tg3
*tp
)
625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
626 regbase
= TG3_APE_LOCK_GRANT
;
628 regbase
= TG3_APE_PER_LOCK_GRANT
;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
633 case TG3_APE_LOCK_PHY0
:
634 case TG3_APE_LOCK_PHY1
:
635 case TG3_APE_LOCK_PHY2
:
636 case TG3_APE_LOCK_PHY3
:
637 bit
= APE_LOCK_GRANT_DRIVER
;
641 bit
= APE_LOCK_GRANT_DRIVER
;
643 bit
= 1 << tp
->pci_fn
;
645 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
650 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
654 u32 status
, req
, gnt
, bit
;
656 if (!tg3_flag(tp
, ENABLE_APE
))
660 case TG3_APE_LOCK_GPIO
:
661 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
663 case TG3_APE_LOCK_GRC
:
664 case TG3_APE_LOCK_MEM
:
666 bit
= APE_LOCK_REQ_DRIVER
;
668 bit
= 1 << tp
->pci_fn
;
674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
675 req
= TG3_APE_LOCK_REQ
;
676 gnt
= TG3_APE_LOCK_GRANT
;
678 req
= TG3_APE_PER_LOCK_REQ
;
679 gnt
= TG3_APE_PER_LOCK_GRANT
;
684 tg3_ape_write32(tp
, req
+ off
, bit
);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i
= 0; i
< 100; i
++) {
688 status
= tg3_ape_read32(tp
, gnt
+ off
);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp
, gnt
+ off
, bit
);
703 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
707 if (!tg3_flag(tp
, ENABLE_APE
))
711 case TG3_APE_LOCK_GPIO
:
712 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
714 case TG3_APE_LOCK_GRC
:
715 case TG3_APE_LOCK_MEM
:
717 bit
= APE_LOCK_GRANT_DRIVER
;
719 bit
= 1 << tp
->pci_fn
;
725 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
726 gnt
= TG3_APE_LOCK_GRANT
;
728 gnt
= TG3_APE_PER_LOCK_GRANT
;
730 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
733 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp
, APE_HAS_NCSI
))
742 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
743 if (apedata
!= APE_SEG_SIG_MAGIC
)
746 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
747 if (!(apedata
& APE_FW_STATUS_READY
))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i
= 0; i
< 10; i
++) {
752 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
755 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
757 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
758 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
759 event
| APE_EVENT_STATUS_EVENT_PENDING
);
761 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
763 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
769 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
770 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
773 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case RESET_KIND_INIT
:
783 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
784 APE_HOST_SEG_SIG_MAGIC
);
785 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
786 APE_HOST_SEG_LEN_MAGIC
);
787 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
788 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
789 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
791 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
792 APE_HOST_BEHAV_NO_PHYLOCK
);
793 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
794 TG3_APE_HOST_DRVR_STATE_START
);
796 event
= APE_EVENT_STATUS_STATE_START
;
798 case RESET_KIND_SHUTDOWN
:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
806 if (device_may_wakeup(&tp
->pdev
->dev
) &&
807 tg3_flag(tp
, WOL_ENABLE
)) {
808 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
809 TG3_APE_HOST_WOL_SPEED_AUTO
);
810 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
812 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
814 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
816 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
818 case RESET_KIND_SUSPEND
:
819 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
825 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
827 tg3_ape_send_event(tp
, event
);
830 static void tg3_disable_ints(struct tg3
*tp
)
834 tw32(TG3PCI_MISC_HOST_CTRL
,
835 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
836 for (i
= 0; i
< tp
->irq_max
; i
++)
837 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
840 static void tg3_enable_ints(struct tg3
*tp
)
847 tw32(TG3PCI_MISC_HOST_CTRL
,
848 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
850 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
851 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
852 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
854 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
855 if (tg3_flag(tp
, 1SHOT_MSI
))
856 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
858 tp
->coal_now
|= tnapi
->coal_now
;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
863 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
864 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
866 tw32(HOSTCC_MODE
, tp
->coal_now
);
868 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
871 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
873 struct tg3
*tp
= tnapi
->tp
;
874 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
875 unsigned int work_exists
= 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
879 if (sblk
->status
& SD_STATUS_LINK_CHG
)
882 /* check for RX/TX work to do */
883 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
884 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
895 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
897 struct tg3
*tp
= tnapi
->tp
;
899 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
906 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
907 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
908 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
911 static void tg3_switch_clocks(struct tg3
*tp
)
916 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
919 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
921 orig_clock_ctrl
= clock_ctrl
;
922 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
923 CLOCK_CTRL_CLKRUN_OENABLE
|
925 tp
->pci_clock_ctrl
= clock_ctrl
;
927 if (tg3_flag(tp
, 5705_PLUS
)) {
928 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
930 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
932 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
935 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
937 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
938 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
941 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
944 #define PHY_BUSY_LOOPS 5000
946 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
952 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
954 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
960 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
961 MI_COM_PHY_ADDR_MASK
);
962 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
963 MI_COM_REG_ADDR_MASK
);
964 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
966 tw32_f(MAC_MI_COM
, frame_val
);
968 loops
= PHY_BUSY_LOOPS
;
971 frame_val
= tr32(MAC_MI_COM
);
973 if ((frame_val
& MI_COM_BUSY
) == 0) {
975 frame_val
= tr32(MAC_MI_COM
);
983 *val
= frame_val
& MI_COM_DATA_MASK
;
987 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
988 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
995 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1001 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1002 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1005 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1007 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1011 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1012 MI_COM_PHY_ADDR_MASK
);
1013 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1014 MI_COM_REG_ADDR_MASK
);
1015 frame_val
|= (val
& MI_COM_DATA_MASK
);
1016 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1018 tw32_f(MAC_MI_COM
, frame_val
);
1020 loops
= PHY_BUSY_LOOPS
;
1021 while (loops
!= 0) {
1023 frame_val
= tr32(MAC_MI_COM
);
1024 if ((frame_val
& MI_COM_BUSY
) == 0) {
1026 frame_val
= tr32(MAC_MI_COM
);
1036 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1037 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1044 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1048 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1052 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1056 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1057 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1061 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1067 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1071 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1075 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1079 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1080 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1084 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1090 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1094 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1096 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1101 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1105 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1107 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1112 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1116 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1117 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1120 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1125 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1127 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1128 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1130 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1142 static int tg3_bmcr_reset(struct tg3
*tp
)
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1150 phy_control
= BMCR_RESET
;
1151 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1157 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1161 if ((phy_control
& BMCR_RESET
) == 0) {
1173 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1175 struct tg3
*tp
= bp
->priv
;
1178 spin_lock_bh(&tp
->lock
);
1180 if (tg3_readphy(tp
, reg
, &val
))
1183 spin_unlock_bh(&tp
->lock
);
1188 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1190 struct tg3
*tp
= bp
->priv
;
1193 spin_lock_bh(&tp
->lock
);
1195 if (tg3_writephy(tp
, reg
, val
))
1198 spin_unlock_bh(&tp
->lock
);
1203 static int tg3_mdio_reset(struct mii_bus
*bp
)
1208 static void tg3_mdio_config_5785(struct tg3
*tp
)
1211 struct phy_device
*phydev
;
1213 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1214 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1215 case PHY_ID_BCM50610
:
1216 case PHY_ID_BCM50610M
:
1217 val
= MAC_PHYCFG2_50610_LED_MODES
;
1219 case PHY_ID_BCMAC131
:
1220 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1222 case PHY_ID_RTL8211C
:
1223 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1225 case PHY_ID_RTL8201E
:
1226 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1232 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1233 tw32(MAC_PHYCFG2
, val
);
1235 val
= tr32(MAC_PHYCFG1
);
1236 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1237 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1238 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1239 tw32(MAC_PHYCFG1
, val
);
1244 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1245 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1246 MAC_PHYCFG2_FMODE_MASK_MASK
|
1247 MAC_PHYCFG2_GMODE_MASK_MASK
|
1248 MAC_PHYCFG2_ACT_MASK_MASK
|
1249 MAC_PHYCFG2_QUAL_MASK_MASK
|
1250 MAC_PHYCFG2_INBAND_ENABLE
;
1252 tw32(MAC_PHYCFG2
, val
);
1254 val
= tr32(MAC_PHYCFG1
);
1255 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1257 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1258 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1259 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1260 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1261 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1263 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1264 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1265 tw32(MAC_PHYCFG1
, val
);
1267 val
= tr32(MAC_EXT_RGMII_MODE
);
1268 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1269 MAC_RGMII_MODE_RX_QUALITY
|
1270 MAC_RGMII_MODE_RX_ACTIVITY
|
1271 MAC_RGMII_MODE_RX_ENG_DET
|
1272 MAC_RGMII_MODE_TX_ENABLE
|
1273 MAC_RGMII_MODE_TX_LOWPWR
|
1274 MAC_RGMII_MODE_TX_RESET
);
1275 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1276 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1277 val
|= MAC_RGMII_MODE_RX_INT_B
|
1278 MAC_RGMII_MODE_RX_QUALITY
|
1279 MAC_RGMII_MODE_RX_ACTIVITY
|
1280 MAC_RGMII_MODE_RX_ENG_DET
;
1281 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1282 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1283 MAC_RGMII_MODE_TX_LOWPWR
|
1284 MAC_RGMII_MODE_TX_RESET
;
1286 tw32(MAC_EXT_RGMII_MODE
, val
);
1289 static void tg3_mdio_start(struct tg3
*tp
)
1291 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1292 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1295 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1296 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1297 tg3_mdio_config_5785(tp
);
1300 static int tg3_mdio_init(struct tg3
*tp
)
1304 struct phy_device
*phydev
;
1306 if (tg3_flag(tp
, 5717_PLUS
)) {
1309 tp
->phy_addr
= tp
->pci_fn
+ 1;
1311 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1312 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1314 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1319 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1323 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1326 tp
->mdio_bus
= mdiobus_alloc();
1327 if (tp
->mdio_bus
== NULL
)
1330 tp
->mdio_bus
->name
= "tg3 mdio bus";
1331 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1332 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1333 tp
->mdio_bus
->priv
= tp
;
1334 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1335 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1336 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1337 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1338 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1339 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1341 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1342 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1349 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1352 i
= mdiobus_register(tp
->mdio_bus
);
1354 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1355 mdiobus_free(tp
->mdio_bus
);
1359 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1361 if (!phydev
|| !phydev
->drv
) {
1362 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1363 mdiobus_unregister(tp
->mdio_bus
);
1364 mdiobus_free(tp
->mdio_bus
);
1368 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1369 case PHY_ID_BCM57780
:
1370 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1371 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1373 case PHY_ID_BCM50610
:
1374 case PHY_ID_BCM50610M
:
1375 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1376 PHY_BRCM_RX_REFCLK_UNUSED
|
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1379 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1380 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1381 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1382 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1383 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1384 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1386 case PHY_ID_RTL8211C
:
1387 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1389 case PHY_ID_RTL8201E
:
1390 case PHY_ID_BCMAC131
:
1391 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1392 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1393 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1397 tg3_flag_set(tp
, MDIOBUS_INITED
);
1399 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1400 tg3_mdio_config_5785(tp
);
1405 static void tg3_mdio_fini(struct tg3
*tp
)
1407 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1408 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1409 mdiobus_unregister(tp
->mdio_bus
);
1410 mdiobus_free(tp
->mdio_bus
);
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1419 val
= tr32(GRC_RX_CPU_EVENT
);
1420 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1421 tw32_f(GRC_RX_CPU_EVENT
, val
);
1423 tp
->last_event_jiffies
= jiffies
;
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1432 unsigned int delay_cnt
;
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1439 if (time_remain
< 0)
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt
= jiffies_to_usecs(time_remain
);
1444 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1445 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1446 delay_cnt
= (delay_cnt
>> 3) + 1;
1448 for (i
= 0; i
< delay_cnt
; i
++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3
*tp
)
1461 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1464 tg3_wait_for_event_ack(tp
);
1466 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1468 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1471 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1473 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1474 val
|= (reg
& 0xffff);
1475 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1478 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1480 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1481 val
|= (reg
& 0xffff);
1482 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1485 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1486 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1488 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1489 val
|= (reg
& 0xffff);
1491 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1493 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1497 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1499 tg3_generate_fw_event(tp
);
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3
*tp
)
1505 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1506 /* Wait for RX cpu to ACK the previous event. */
1507 tg3_wait_for_event_ack(tp
);
1509 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1511 tg3_generate_fw_event(tp
);
1513 /* Wait for RX cpu to ACK this event. */
1514 tg3_wait_for_event_ack(tp
);
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1521 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1522 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1524 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1526 case RESET_KIND_INIT
:
1527 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1531 case RESET_KIND_SHUTDOWN
:
1532 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1536 case RESET_KIND_SUSPEND
:
1537 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1546 if (kind
== RESET_KIND_INIT
||
1547 kind
== RESET_KIND_SUSPEND
)
1548 tg3_ape_driver_state_change(tp
, kind
);
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1554 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1556 case RESET_KIND_INIT
:
1557 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1558 DRV_STATE_START_DONE
);
1561 case RESET_KIND_SHUTDOWN
:
1562 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1563 DRV_STATE_UNLOAD_DONE
);
1571 if (kind
== RESET_KIND_SHUTDOWN
)
1572 tg3_ape_driver_state_change(tp
, kind
);
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1578 if (tg3_flag(tp
, ENABLE_ASF
)) {
1580 case RESET_KIND_INIT
:
1581 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1585 case RESET_KIND_SHUTDOWN
:
1586 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1590 case RESET_KIND_SUSPEND
:
1591 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1601 static int tg3_poll_fw(struct tg3
*tp
)
1606 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1607 /* Wait up to 20ms for init done. */
1608 for (i
= 0; i
< 200; i
++) {
1609 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1616 /* Wait for firmware initialization to complete. */
1617 for (i
= 0; i
< 100000; i
++) {
1618 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1619 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1624 /* Chip might not be fitted with firmware. Some Sun onboard
1625 * parts are configured like that. So don't signal the timeout
1626 * of the above loop as an error, but do report the lack of
1627 * running firmware once.
1629 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1630 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1632 netdev_info(tp
->dev
, "No firmware running\n");
1635 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1636 /* The 57765 A0 needs a little more
1637 * time to do some important work.
1645 static void tg3_link_report(struct tg3
*tp
)
1647 if (!netif_carrier_ok(tp
->dev
)) {
1648 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1649 tg3_ump_link_report(tp
);
1650 } else if (netif_msg_link(tp
)) {
1651 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1652 (tp
->link_config
.active_speed
== SPEED_1000
?
1654 (tp
->link_config
.active_speed
== SPEED_100
?
1656 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1659 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1660 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1662 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1665 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1666 netdev_info(tp
->dev
, "EEE is %s\n",
1667 tp
->setlpicnt
? "enabled" : "disabled");
1669 tg3_ump_link_report(tp
);
1673 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1677 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1678 miireg
= ADVERTISE_1000XPAUSE
;
1679 else if (flow_ctrl
& FLOW_CTRL_TX
)
1680 miireg
= ADVERTISE_1000XPSE_ASYM
;
1681 else if (flow_ctrl
& FLOW_CTRL_RX
)
1682 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1689 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1693 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1694 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1695 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1696 if (lcladv
& ADVERTISE_1000XPAUSE
)
1698 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1705 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1709 u32 old_rx_mode
= tp
->rx_mode
;
1710 u32 old_tx_mode
= tp
->tx_mode
;
1712 if (tg3_flag(tp
, USE_PHYLIB
))
1713 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1715 autoneg
= tp
->link_config
.autoneg
;
1717 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1718 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1719 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1721 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1723 flowctrl
= tp
->link_config
.flowctrl
;
1725 tp
->link_config
.active_flowctrl
= flowctrl
;
1727 if (flowctrl
& FLOW_CTRL_RX
)
1728 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1730 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1732 if (old_rx_mode
!= tp
->rx_mode
)
1733 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1735 if (flowctrl
& FLOW_CTRL_TX
)
1736 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1738 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1740 if (old_tx_mode
!= tp
->tx_mode
)
1741 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1744 static void tg3_adjust_link(struct net_device
*dev
)
1746 u8 oldflowctrl
, linkmesg
= 0;
1747 u32 mac_mode
, lcl_adv
, rmt_adv
;
1748 struct tg3
*tp
= netdev_priv(dev
);
1749 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1751 spin_lock_bh(&tp
->lock
);
1753 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1754 MAC_MODE_HALF_DUPLEX
);
1756 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1762 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1763 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1764 else if (phydev
->speed
== SPEED_1000
||
1765 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1766 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1768 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1770 if (phydev
->duplex
== DUPLEX_HALF
)
1771 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1773 lcl_adv
= mii_advertise_flowctrl(
1774 tp
->link_config
.flowctrl
);
1777 rmt_adv
= LPA_PAUSE_CAP
;
1778 if (phydev
->asym_pause
)
1779 rmt_adv
|= LPA_PAUSE_ASYM
;
1782 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1784 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1786 if (mac_mode
!= tp
->mac_mode
) {
1787 tp
->mac_mode
= mac_mode
;
1788 tw32_f(MAC_MODE
, tp
->mac_mode
);
1792 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1793 if (phydev
->speed
== SPEED_10
)
1795 MAC_MI_STAT_10MBPS_MODE
|
1796 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1798 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1801 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1802 tw32(MAC_TX_LENGTHS
,
1803 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1804 (6 << TX_LENGTHS_IPG_SHIFT
) |
1805 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1807 tw32(MAC_TX_LENGTHS
,
1808 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1809 (6 << TX_LENGTHS_IPG_SHIFT
) |
1810 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1812 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1813 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1814 phydev
->speed
!= tp
->link_config
.active_speed
||
1815 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1816 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1819 tp
->link_config
.active_speed
= phydev
->speed
;
1820 tp
->link_config
.active_duplex
= phydev
->duplex
;
1822 spin_unlock_bh(&tp
->lock
);
1825 tg3_link_report(tp
);
1828 static int tg3_phy_init(struct tg3
*tp
)
1830 struct phy_device
*phydev
;
1832 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1835 /* Bring the PHY back to a known state. */
1838 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1840 /* Attach the MAC to the PHY. */
1841 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1842 phydev
->dev_flags
, phydev
->interface
);
1843 if (IS_ERR(phydev
)) {
1844 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1845 return PTR_ERR(phydev
);
1848 /* Mask with MAC supported features. */
1849 switch (phydev
->interface
) {
1850 case PHY_INTERFACE_MODE_GMII
:
1851 case PHY_INTERFACE_MODE_RGMII
:
1852 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1853 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1855 SUPPORTED_Asym_Pause
);
1859 case PHY_INTERFACE_MODE_MII
:
1860 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1862 SUPPORTED_Asym_Pause
);
1865 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1869 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1871 phydev
->advertising
= phydev
->supported
;
1876 static void tg3_phy_start(struct tg3
*tp
)
1878 struct phy_device
*phydev
;
1880 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1883 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1885 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1886 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1887 phydev
->speed
= tp
->link_config
.orig_speed
;
1888 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1889 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1890 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1895 phy_start_aneg(phydev
);
1898 static void tg3_phy_stop(struct tg3
*tp
)
1900 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1903 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1906 static void tg3_phy_fini(struct tg3
*tp
)
1908 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1909 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1910 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1914 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
1919 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
1922 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
1923 /* Cannot do read-modify-write on 5401 */
1924 err
= tg3_phy_auxctl_write(tp
,
1925 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1926 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
1931 err
= tg3_phy_auxctl_read(tp
,
1932 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1936 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
1937 err
= tg3_phy_auxctl_write(tp
,
1938 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
1944 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1948 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1951 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1952 phytest
| MII_TG3_FET_SHADOW_EN
);
1953 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1955 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1957 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1958 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1960 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1964 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1968 if (!tg3_flag(tp
, 5705_PLUS
) ||
1969 (tg3_flag(tp
, 5717_PLUS
) &&
1970 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1973 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1974 tg3_phy_fet_toggle_apd(tp
, enable
);
1978 reg
= MII_TG3_MISC_SHDW_WREN
|
1979 MII_TG3_MISC_SHDW_SCR5_SEL
|
1980 MII_TG3_MISC_SHDW_SCR5_LPED
|
1981 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1982 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1983 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1984 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1985 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1987 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1990 reg
= MII_TG3_MISC_SHDW_WREN
|
1991 MII_TG3_MISC_SHDW_APD_SEL
|
1992 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1994 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1996 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1999 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2003 if (!tg3_flag(tp
, 5705_PLUS
) ||
2004 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2007 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2010 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2011 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2013 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2014 ephy
| MII_TG3_FET_SHADOW_EN
);
2015 if (!tg3_readphy(tp
, reg
, &phy
)) {
2017 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2019 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2020 tg3_writephy(tp
, reg
, phy
);
2022 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2027 ret
= tg3_phy_auxctl_read(tp
,
2028 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2031 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2033 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2034 tg3_phy_auxctl_write(tp
,
2035 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2040 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2045 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2048 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2050 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2051 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2054 static void tg3_phy_apply_otp(struct tg3
*tp
)
2063 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
2066 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2067 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2068 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2070 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2071 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2072 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2074 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2075 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2076 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2078 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2079 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2081 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2082 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2084 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2085 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2086 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2088 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2091 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2095 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2100 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2101 current_link_up
== 1 &&
2102 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2103 (tp
->link_config
.active_speed
== SPEED_100
||
2104 tp
->link_config
.active_speed
== SPEED_1000
)) {
2107 if (tp
->link_config
.active_speed
== SPEED_1000
)
2108 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2110 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2112 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2114 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2115 TG3_CL45_D7_EEERES_STAT
, &val
);
2117 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2118 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2122 if (!tp
->setlpicnt
) {
2123 if (current_link_up
== 1 &&
2124 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2125 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2126 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2129 val
= tr32(TG3_CPMU_EEE_MODE
);
2130 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2134 static void tg3_phy_eee_enable(struct tg3
*tp
)
2138 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2139 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2140 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2141 tg3_flag(tp
, 57765_CLASS
)) &&
2142 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2143 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2144 MII_TG3_DSP_TAP26_RMRXSTO
;
2145 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2149 val
= tr32(TG3_CPMU_EEE_MODE
);
2150 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2153 static int tg3_wait_macro_done(struct tg3
*tp
)
2160 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2161 if ((tmp32
& 0x1000) == 0)
2171 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2173 static const u32 test_pat
[4][6] = {
2174 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2181 for (chan
= 0; chan
< 4; chan
++) {
2184 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2185 (chan
* 0x2000) | 0x0200);
2186 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2188 for (i
= 0; i
< 6; i
++)
2189 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2192 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2193 if (tg3_wait_macro_done(tp
)) {
2198 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2199 (chan
* 0x2000) | 0x0200);
2200 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2201 if (tg3_wait_macro_done(tp
)) {
2206 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2207 if (tg3_wait_macro_done(tp
)) {
2212 for (i
= 0; i
< 6; i
+= 2) {
2215 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2216 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2217 tg3_wait_macro_done(tp
)) {
2223 if (low
!= test_pat
[chan
][i
] ||
2224 high
!= test_pat
[chan
][i
+1]) {
2225 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2226 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2227 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2237 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2241 for (chan
= 0; chan
< 4; chan
++) {
2244 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2245 (chan
* 0x2000) | 0x0200);
2246 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2247 for (i
= 0; i
< 6; i
++)
2248 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2249 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2250 if (tg3_wait_macro_done(tp
))
2257 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2259 u32 reg32
, phy9_orig
;
2260 int retries
, do_phy_reset
, err
;
2266 err
= tg3_bmcr_reset(tp
);
2272 /* Disable transmitter and interrupt. */
2273 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2277 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2279 /* Set full-duplex, 1000 mbps. */
2280 tg3_writephy(tp
, MII_BMCR
,
2281 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2283 /* Set to master mode. */
2284 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2287 tg3_writephy(tp
, MII_CTRL1000
,
2288 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2290 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2294 /* Block the PHY control access. */
2295 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2297 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2300 } while (--retries
);
2302 err
= tg3_phy_reset_chanpat(tp
);
2306 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2308 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2309 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2311 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2313 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2315 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2317 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2324 /* This will reset the tigon3 PHY if there is no valid
2325 * link unless the FORCE argument is non-zero.
2327 static int tg3_phy_reset(struct tg3
*tp
)
2332 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2333 val
= tr32(GRC_MISC_CFG
);
2334 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2337 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2338 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2342 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2343 netif_carrier_off(tp
->dev
);
2344 tg3_link_report(tp
);
2347 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2348 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2349 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2350 err
= tg3_phy_reset_5703_4_5(tp
);
2357 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2358 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2359 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2360 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2362 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2365 err
= tg3_bmcr_reset(tp
);
2369 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2370 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2371 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2373 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2376 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2377 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2378 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2379 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2380 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2381 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2383 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2387 if (tg3_flag(tp
, 5717_PLUS
) &&
2388 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2391 tg3_phy_apply_otp(tp
);
2393 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2394 tg3_phy_toggle_apd(tp
, true);
2396 tg3_phy_toggle_apd(tp
, false);
2399 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2400 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2401 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2402 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2403 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2406 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2407 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2408 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2411 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2412 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2413 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2414 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2415 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2416 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2418 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2419 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2420 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2421 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2422 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2423 tg3_writephy(tp
, MII_TG3_TEST1
,
2424 MII_TG3_TEST1_TRIM_EN
| 0x4);
2426 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2428 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2432 /* Set Extended packet length bit (bit 14) on all chips that */
2433 /* support jumbo frames */
2434 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2435 /* Cannot do read-modify-write on 5401 */
2436 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2437 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2438 /* Set bit 14 with read-modify-write to preserve other bits */
2439 err
= tg3_phy_auxctl_read(tp
,
2440 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2442 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2443 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2446 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447 * jumbo frames transmission.
2449 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2450 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2451 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2452 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2455 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2456 /* adjust output voltage */
2457 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2460 tg3_phy_toggle_automdix(tp
, 1);
2461 tg3_phy_set_wirespeed(tp
);
2465 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2467 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2468 TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473 (TG3_GPIO_MSG_DRVR_PRES << 12))
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479 (TG3_GPIO_MSG_NEED_VAUX << 12))
2481 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2485 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2486 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2487 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2489 status
= tr32(TG3_CPMU_DRV_STATUS
);
2491 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2492 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2493 status
|= (newstat
<< shift
);
2495 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2496 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2497 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2499 tw32(TG3_CPMU_DRV_STATUS
, status
);
2501 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2506 if (!tg3_flag(tp
, IS_NIC
))
2509 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2510 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2511 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2512 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2515 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2517 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2518 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2520 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2522 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2523 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2533 if (!tg3_flag(tp
, IS_NIC
) ||
2534 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2535 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2538 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2540 tw32_wait_f(GRC_LOCAL_CTRL
,
2541 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2542 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2544 tw32_wait_f(GRC_LOCAL_CTRL
,
2546 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2548 tw32_wait_f(GRC_LOCAL_CTRL
,
2549 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2550 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2555 if (!tg3_flag(tp
, IS_NIC
))
2558 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2559 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2560 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2561 (GRC_LCLCTRL_GPIO_OE0
|
2562 GRC_LCLCTRL_GPIO_OE1
|
2563 GRC_LCLCTRL_GPIO_OE2
|
2564 GRC_LCLCTRL_GPIO_OUTPUT0
|
2565 GRC_LCLCTRL_GPIO_OUTPUT1
),
2566 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2567 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2568 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2569 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2571 GRC_LCLCTRL_GPIO_OE1
|
2572 GRC_LCLCTRL_GPIO_OE2
|
2573 GRC_LCLCTRL_GPIO_OUTPUT0
|
2574 GRC_LCLCTRL_GPIO_OUTPUT1
|
2576 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2577 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2579 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2580 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2581 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2583 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2584 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2585 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2588 u32 grc_local_ctrl
= 0;
2590 /* Workaround to prevent overdrawing Amps. */
2591 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2592 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2593 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2595 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2598 /* On 5753 and variants, GPIO2 cannot be used. */
2599 no_gpio2
= tp
->nic_sram_data_cfg
&
2600 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2602 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2603 GRC_LCLCTRL_GPIO_OE1
|
2604 GRC_LCLCTRL_GPIO_OE2
|
2605 GRC_LCLCTRL_GPIO_OUTPUT1
|
2606 GRC_LCLCTRL_GPIO_OUTPUT2
;
2608 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2609 GRC_LCLCTRL_GPIO_OUTPUT2
);
2611 tw32_wait_f(GRC_LOCAL_CTRL
,
2612 tp
->grc_local_ctrl
| grc_local_ctrl
,
2613 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2615 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2617 tw32_wait_f(GRC_LOCAL_CTRL
,
2618 tp
->grc_local_ctrl
| grc_local_ctrl
,
2619 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2622 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2623 tw32_wait_f(GRC_LOCAL_CTRL
,
2624 tp
->grc_local_ctrl
| grc_local_ctrl
,
2625 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2630 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2634 /* Serialize power state transitions */
2635 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2638 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2639 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2641 msg
= tg3_set_function_status(tp
, msg
);
2643 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2646 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2647 tg3_pwrsrc_switch_to_vaux(tp
);
2649 tg3_pwrsrc_die_with_vmain(tp
);
2652 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2655 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2657 bool need_vaux
= false;
2659 /* The GPIOs do something completely different on 57765. */
2660 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2663 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2664 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2665 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2666 tg3_frob_aux_power_5717(tp
, include_wol
?
2667 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2671 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2672 struct net_device
*dev_peer
;
2674 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2676 /* remove_one() may have been run on the peer. */
2678 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2680 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2683 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2684 tg3_flag(tp_peer
, ENABLE_ASF
))
2689 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2690 tg3_flag(tp
, ENABLE_ASF
))
2694 tg3_pwrsrc_switch_to_vaux(tp
);
2696 tg3_pwrsrc_die_with_vmain(tp
);
2699 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2701 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2703 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2704 if (speed
!= SPEED_10
)
2706 } else if (speed
== SPEED_10
)
2712 static int tg3_setup_phy(struct tg3
*, int);
2713 static int tg3_halt_cpu(struct tg3
*, u32
);
2715 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2719 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2720 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2721 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2722 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2725 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2726 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2727 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2732 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2734 val
= tr32(GRC_MISC_CFG
);
2735 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2738 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2740 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2743 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2744 tg3_writephy(tp
, MII_BMCR
,
2745 BMCR_ANENABLE
| BMCR_ANRESTART
);
2747 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2748 phytest
| MII_TG3_FET_SHADOW_EN
);
2749 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2750 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2752 MII_TG3_FET_SHDW_AUXMODE4
,
2755 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2758 } else if (do_low_power
) {
2759 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2760 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2762 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2763 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2764 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2765 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2768 /* The PHY should not be powered down on some chips because
2771 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2772 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2773 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2774 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2775 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
&&
2779 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2780 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2781 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2782 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2783 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2784 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2787 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2790 /* tp->lock is held. */
2791 static int tg3_nvram_lock(struct tg3
*tp
)
2793 if (tg3_flag(tp
, NVRAM
)) {
2796 if (tp
->nvram_lock_cnt
== 0) {
2797 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2798 for (i
= 0; i
< 8000; i
++) {
2799 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2804 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2808 tp
->nvram_lock_cnt
++;
2813 /* tp->lock is held. */
2814 static void tg3_nvram_unlock(struct tg3
*tp
)
2816 if (tg3_flag(tp
, NVRAM
)) {
2817 if (tp
->nvram_lock_cnt
> 0)
2818 tp
->nvram_lock_cnt
--;
2819 if (tp
->nvram_lock_cnt
== 0)
2820 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2824 /* tp->lock is held. */
2825 static void tg3_enable_nvram_access(struct tg3
*tp
)
2827 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2828 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2830 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2834 /* tp->lock is held. */
2835 static void tg3_disable_nvram_access(struct tg3
*tp
)
2837 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2838 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2840 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2844 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2845 u32 offset
, u32
*val
)
2850 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2853 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2854 EEPROM_ADDR_DEVID_MASK
|
2856 tw32(GRC_EEPROM_ADDR
,
2858 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2859 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2860 EEPROM_ADDR_ADDR_MASK
) |
2861 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2863 for (i
= 0; i
< 1000; i
++) {
2864 tmp
= tr32(GRC_EEPROM_ADDR
);
2866 if (tmp
& EEPROM_ADDR_COMPLETE
)
2870 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2873 tmp
= tr32(GRC_EEPROM_DATA
);
2876 * The data will always be opposite the native endian
2877 * format. Perform a blind byteswap to compensate.
2884 #define NVRAM_CMD_TIMEOUT 10000
2886 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2890 tw32(NVRAM_CMD
, nvram_cmd
);
2891 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2893 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2899 if (i
== NVRAM_CMD_TIMEOUT
)
2905 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2907 if (tg3_flag(tp
, NVRAM
) &&
2908 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2909 tg3_flag(tp
, FLASH
) &&
2910 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2911 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2913 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2914 ATMEL_AT45DB0X1B_PAGE_POS
) +
2915 (addr
% tp
->nvram_pagesize
);
2920 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2922 if (tg3_flag(tp
, NVRAM
) &&
2923 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2924 tg3_flag(tp
, FLASH
) &&
2925 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2926 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2928 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2929 tp
->nvram_pagesize
) +
2930 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2935 /* NOTE: Data read in from NVRAM is byteswapped according to
2936 * the byteswapping settings for all other register accesses.
2937 * tg3 devices are BE devices, so on a BE machine, the data
2938 * returned will be exactly as it is seen in NVRAM. On a LE
2939 * machine, the 32-bit value will be byteswapped.
2941 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2945 if (!tg3_flag(tp
, NVRAM
))
2946 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2948 offset
= tg3_nvram_phys_addr(tp
, offset
);
2950 if (offset
> NVRAM_ADDR_MSK
)
2953 ret
= tg3_nvram_lock(tp
);
2957 tg3_enable_nvram_access(tp
);
2959 tw32(NVRAM_ADDR
, offset
);
2960 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2961 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2964 *val
= tr32(NVRAM_RDDATA
);
2966 tg3_disable_nvram_access(tp
);
2968 tg3_nvram_unlock(tp
);
2973 /* Ensures NVRAM data is in bytestream format. */
2974 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2977 int res
= tg3_nvram_read(tp
, offset
, &v
);
2979 *val
= cpu_to_be32(v
);
2983 #define RX_CPU_SCRATCH_BASE 0x30000
2984 #define RX_CPU_SCRATCH_SIZE 0x04000
2985 #define TX_CPU_SCRATCH_BASE 0x34000
2986 #define TX_CPU_SCRATCH_SIZE 0x04000
2988 /* tp->lock is held. */
2989 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
2993 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
2995 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2996 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
2998 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3001 if (offset
== RX_CPU_BASE
) {
3002 for (i
= 0; i
< 10000; i
++) {
3003 tw32(offset
+ CPU_STATE
, 0xffffffff);
3004 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3005 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3009 tw32(offset
+ CPU_STATE
, 0xffffffff);
3010 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3013 for (i
= 0; i
< 10000; i
++) {
3014 tw32(offset
+ CPU_STATE
, 0xffffffff);
3015 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3016 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3022 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3023 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3027 /* Clear firmware's nvram arbitration. */
3028 if (tg3_flag(tp
, NVRAM
))
3029 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3034 unsigned int fw_base
;
3035 unsigned int fw_len
;
3036 const __be32
*fw_data
;
3039 /* tp->lock is held. */
3040 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3041 u32 cpu_scratch_base
, int cpu_scratch_size
,
3042 struct fw_info
*info
)
3044 int err
, lock_err
, i
;
3045 void (*write_op
)(struct tg3
*, u32
, u32
);
3047 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3049 "%s: Trying to load TX cpu firmware which is 5705\n",
3054 if (tg3_flag(tp
, 5705_PLUS
))
3055 write_op
= tg3_write_mem
;
3057 write_op
= tg3_write_indirect_reg32
;
3059 /* It is possible that bootcode is still loading at this point.
3060 * Get the nvram lock first before halting the cpu.
3062 lock_err
= tg3_nvram_lock(tp
);
3063 err
= tg3_halt_cpu(tp
, cpu_base
);
3065 tg3_nvram_unlock(tp
);
3069 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3070 write_op(tp
, cpu_scratch_base
+ i
, 0);
3071 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3072 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3073 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3074 write_op(tp
, (cpu_scratch_base
+
3075 (info
->fw_base
& 0xffff) +
3077 be32_to_cpu(info
->fw_data
[i
]));
3085 /* tp->lock is held. */
3086 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3088 struct fw_info info
;
3089 const __be32
*fw_data
;
3092 fw_data
= (void *)tp
->fw
->data
;
3094 /* Firmware blob starts with version numbers, followed by
3095 start address and length. We are setting complete length.
3096 length = end_address_of_bss - start_address_of_text.
3097 Remainder is the blob to be loaded contiguously
3098 from start address. */
3100 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3101 info
.fw_len
= tp
->fw
->size
- 12;
3102 info
.fw_data
= &fw_data
[3];
3104 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3105 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3110 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3111 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3116 /* Now startup only the RX cpu. */
3117 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3118 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3120 for (i
= 0; i
< 5; i
++) {
3121 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3123 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3124 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3125 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3129 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3130 "should be %08x\n", __func__
,
3131 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3134 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3135 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3140 /* tp->lock is held. */
3141 static int tg3_load_tso_firmware(struct tg3
*tp
)
3143 struct fw_info info
;
3144 const __be32
*fw_data
;
3145 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3148 if (tg3_flag(tp
, HW_TSO_1
) ||
3149 tg3_flag(tp
, HW_TSO_2
) ||
3150 tg3_flag(tp
, HW_TSO_3
))
3153 fw_data
= (void *)tp
->fw
->data
;
3155 /* Firmware blob starts with version numbers, followed by
3156 start address and length. We are setting complete length.
3157 length = end_address_of_bss - start_address_of_text.
3158 Remainder is the blob to be loaded contiguously
3159 from start address. */
3161 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3162 cpu_scratch_size
= tp
->fw_len
;
3163 info
.fw_len
= tp
->fw
->size
- 12;
3164 info
.fw_data
= &fw_data
[3];
3166 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3167 cpu_base
= RX_CPU_BASE
;
3168 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3170 cpu_base
= TX_CPU_BASE
;
3171 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3172 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3175 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3176 cpu_scratch_base
, cpu_scratch_size
,
3181 /* Now startup the cpu. */
3182 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3183 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3185 for (i
= 0; i
< 5; i
++) {
3186 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3188 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3189 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3190 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3195 "%s fails to set CPU PC, is %08x should be %08x\n",
3196 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3199 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3200 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3205 /* tp->lock is held. */
3206 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3208 u32 addr_high
, addr_low
;
3211 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3212 tp
->dev
->dev_addr
[1]);
3213 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3214 (tp
->dev
->dev_addr
[3] << 16) |
3215 (tp
->dev
->dev_addr
[4] << 8) |
3216 (tp
->dev
->dev_addr
[5] << 0));
3217 for (i
= 0; i
< 4; i
++) {
3218 if (i
== 1 && skip_mac_1
)
3220 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3221 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3224 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3225 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3226 for (i
= 0; i
< 12; i
++) {
3227 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3228 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3232 addr_high
= (tp
->dev
->dev_addr
[0] +
3233 tp
->dev
->dev_addr
[1] +
3234 tp
->dev
->dev_addr
[2] +
3235 tp
->dev
->dev_addr
[3] +
3236 tp
->dev
->dev_addr
[4] +
3237 tp
->dev
->dev_addr
[5]) &
3238 TX_BACKOFF_SEED_MASK
;
3239 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3242 static void tg3_enable_register_access(struct tg3
*tp
)
3245 * Make sure register accesses (indirect or otherwise) will function
3248 pci_write_config_dword(tp
->pdev
,
3249 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3252 static int tg3_power_up(struct tg3
*tp
)
3256 tg3_enable_register_access(tp
);
3258 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3260 /* Switch out of Vaux if it is a NIC */
3261 tg3_pwrsrc_switch_to_vmain(tp
);
3263 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3269 static int tg3_power_down_prepare(struct tg3
*tp
)
3272 bool device_should_wake
, do_low_power
;
3274 tg3_enable_register_access(tp
);
3276 /* Restore the CLKREQ setting. */
3277 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3280 pci_read_config_word(tp
->pdev
,
3281 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3283 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
3284 pci_write_config_word(tp
->pdev
,
3285 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3289 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3290 tw32(TG3PCI_MISC_HOST_CTRL
,
3291 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3293 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3294 tg3_flag(tp
, WOL_ENABLE
);
3296 if (tg3_flag(tp
, USE_PHYLIB
)) {
3297 do_low_power
= false;
3298 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3299 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3300 struct phy_device
*phydev
;
3301 u32 phyid
, advertising
;
3303 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3305 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3307 tp
->link_config
.orig_speed
= phydev
->speed
;
3308 tp
->link_config
.orig_duplex
= phydev
->duplex
;
3309 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
3310 tp
->link_config
.orig_advertising
= phydev
->advertising
;
3312 advertising
= ADVERTISED_TP
|
3314 ADVERTISED_Autoneg
|
3315 ADVERTISED_10baseT_Half
;
3317 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3318 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3320 ADVERTISED_100baseT_Half
|
3321 ADVERTISED_100baseT_Full
|
3322 ADVERTISED_10baseT_Full
;
3324 advertising
|= ADVERTISED_10baseT_Full
;
3327 phydev
->advertising
= advertising
;
3329 phy_start_aneg(phydev
);
3331 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3332 if (phyid
!= PHY_ID_BCMAC131
) {
3333 phyid
&= PHY_BCM_OUI_MASK
;
3334 if (phyid
== PHY_BCM_OUI_1
||
3335 phyid
== PHY_BCM_OUI_2
||
3336 phyid
== PHY_BCM_OUI_3
)
3337 do_low_power
= true;
3341 do_low_power
= true;
3343 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3344 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3345 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
3346 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
3347 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
3350 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
3351 tp
->link_config
.speed
= SPEED_10
;
3352 tp
->link_config
.duplex
= DUPLEX_HALF
;
3353 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
3354 tg3_setup_phy(tp
, 0);
3358 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3361 val
= tr32(GRC_VCPU_EXT_CTRL
);
3362 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3363 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3367 for (i
= 0; i
< 200; i
++) {
3368 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3369 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3374 if (tg3_flag(tp
, WOL_CAP
))
3375 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3376 WOL_DRV_STATE_SHUTDOWN
|
3380 if (device_should_wake
) {
3383 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3385 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3386 tg3_phy_auxctl_write(tp
,
3387 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3388 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3389 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3390 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3394 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3395 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3397 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3399 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3400 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3402 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3403 SPEED_100
: SPEED_10
;
3404 if (tg3_5700_link_polarity(tp
, speed
))
3405 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3407 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3410 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3413 if (!tg3_flag(tp
, 5750_PLUS
))
3414 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3416 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3417 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3418 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3419 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3421 if (tg3_flag(tp
, ENABLE_APE
))
3422 mac_mode
|= MAC_MODE_APE_TX_EN
|
3423 MAC_MODE_APE_RX_EN
|
3424 MAC_MODE_TDE_ENABLE
;
3426 tw32_f(MAC_MODE
, mac_mode
);
3429 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3433 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3434 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3435 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3438 base_val
= tp
->pci_clock_ctrl
;
3439 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3440 CLOCK_CTRL_TXCLK_DISABLE
);
3442 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3443 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3444 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3445 tg3_flag(tp
, CPMU_PRESENT
) ||
3446 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3448 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3449 u32 newbits1
, newbits2
;
3451 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3452 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3453 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3454 CLOCK_CTRL_TXCLK_DISABLE
|
3456 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3457 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3458 newbits1
= CLOCK_CTRL_625_CORE
;
3459 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3461 newbits1
= CLOCK_CTRL_ALTCLK
;
3462 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3465 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3468 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3471 if (!tg3_flag(tp
, 5705_PLUS
)) {
3474 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3475 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3476 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3477 CLOCK_CTRL_TXCLK_DISABLE
|
3478 CLOCK_CTRL_44MHZ_CORE
);
3480 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3483 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3484 tp
->pci_clock_ctrl
| newbits3
, 40);
3488 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3489 tg3_power_down_phy(tp
, do_low_power
);
3491 tg3_frob_aux_power(tp
, true);
3493 /* Workaround for unstable PLL clock */
3494 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3495 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3496 u32 val
= tr32(0x7d00);
3498 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3500 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3503 err
= tg3_nvram_lock(tp
);
3504 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3506 tg3_nvram_unlock(tp
);
3510 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3515 static void tg3_power_down(struct tg3
*tp
)
3517 tg3_power_down_prepare(tp
);
3519 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3520 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3523 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3525 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3526 case MII_TG3_AUX_STAT_10HALF
:
3528 *duplex
= DUPLEX_HALF
;
3531 case MII_TG3_AUX_STAT_10FULL
:
3533 *duplex
= DUPLEX_FULL
;
3536 case MII_TG3_AUX_STAT_100HALF
:
3538 *duplex
= DUPLEX_HALF
;
3541 case MII_TG3_AUX_STAT_100FULL
:
3543 *duplex
= DUPLEX_FULL
;
3546 case MII_TG3_AUX_STAT_1000HALF
:
3547 *speed
= SPEED_1000
;
3548 *duplex
= DUPLEX_HALF
;
3551 case MII_TG3_AUX_STAT_1000FULL
:
3552 *speed
= SPEED_1000
;
3553 *duplex
= DUPLEX_FULL
;
3557 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3558 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3560 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3564 *speed
= SPEED_INVALID
;
3565 *duplex
= DUPLEX_INVALID
;
3570 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3575 new_adv
= ADVERTISE_CSMA
;
3576 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
3577 new_adv
|= mii_advertise_flowctrl(flowctrl
);
3579 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3583 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3584 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
3586 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3587 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3588 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3590 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3595 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3598 tw32(TG3_CPMU_EEE_MODE
,
3599 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3601 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3606 /* Advertise 100-BaseTX EEE ability */
3607 if (advertise
& ADVERTISED_100baseT_Full
)
3608 val
|= MDIO_AN_EEE_ADV_100TX
;
3609 /* Advertise 1000-BaseT EEE ability */
3610 if (advertise
& ADVERTISED_1000baseT_Full
)
3611 val
|= MDIO_AN_EEE_ADV_1000T
;
3612 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3616 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3618 case ASIC_REV_57765
:
3619 case ASIC_REV_57766
:
3621 /* If we advertised any eee advertisements above... */
3623 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3624 MII_TG3_DSP_TAP26_RMRXSTO
|
3625 MII_TG3_DSP_TAP26_OPCSINPT
;
3626 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3629 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3630 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3631 MII_TG3_DSP_CH34TP2_HIBW01
);
3634 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3643 static void tg3_phy_copper_begin(struct tg3
*tp
)
3648 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3649 new_adv
= ADVERTISED_10baseT_Half
|
3650 ADVERTISED_10baseT_Full
;
3651 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3652 new_adv
|= ADVERTISED_100baseT_Half
|
3653 ADVERTISED_100baseT_Full
;
3655 tg3_phy_autoneg_cfg(tp
, new_adv
,
3656 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3657 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3658 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3659 tp
->link_config
.advertising
&=
3660 ~(ADVERTISED_1000baseT_Half
|
3661 ADVERTISED_1000baseT_Full
);
3663 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3664 tp
->link_config
.flowctrl
);
3666 /* Asking for a specific link mode. */
3667 if (tp
->link_config
.speed
== SPEED_1000
) {
3668 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3669 new_adv
= ADVERTISED_1000baseT_Full
;
3671 new_adv
= ADVERTISED_1000baseT_Half
;
3672 } else if (tp
->link_config
.speed
== SPEED_100
) {
3673 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3674 new_adv
= ADVERTISED_100baseT_Full
;
3676 new_adv
= ADVERTISED_100baseT_Half
;
3678 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3679 new_adv
= ADVERTISED_10baseT_Full
;
3681 new_adv
= ADVERTISED_10baseT_Half
;
3684 tg3_phy_autoneg_cfg(tp
, new_adv
,
3685 tp
->link_config
.flowctrl
);
3688 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3689 tp
->link_config
.speed
!= SPEED_INVALID
) {
3690 u32 bmcr
, orig_bmcr
;
3692 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3693 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3696 switch (tp
->link_config
.speed
) {
3702 bmcr
|= BMCR_SPEED100
;
3706 bmcr
|= BMCR_SPEED1000
;
3710 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3711 bmcr
|= BMCR_FULLDPLX
;
3713 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3714 (bmcr
!= orig_bmcr
)) {
3715 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3716 for (i
= 0; i
< 1500; i
++) {
3720 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3721 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3723 if (!(tmp
& BMSR_LSTATUS
)) {
3728 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3732 tg3_writephy(tp
, MII_BMCR
,
3733 BMCR_ANENABLE
| BMCR_ANRESTART
);
3737 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3741 /* Turn off tap power management. */
3742 /* Set Extended packet length bit */
3743 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3745 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3746 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3747 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3748 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3749 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3756 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
3758 u32 advmsk
, tgtadv
, advertising
;
3760 advertising
= tp
->link_config
.advertising
;
3761 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
3763 advmsk
= ADVERTISE_ALL
;
3764 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3765 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
3766 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3769 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3772 if ((*lcladv
& advmsk
) != tgtadv
)
3775 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3778 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
3780 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3783 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
3784 if (tg3_ctrl
!= tgtadv
)
3791 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
3795 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3798 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
3801 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
3804 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
3807 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
3808 tp
->link_config
.rmt_adv
= lpeth
;
3813 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3815 int current_link_up
;
3817 u32 lcl_adv
, rmt_adv
;
3825 (MAC_STATUS_SYNC_CHANGED
|
3826 MAC_STATUS_CFG_CHANGED
|
3827 MAC_STATUS_MI_COMPLETION
|
3828 MAC_STATUS_LNKSTATE_CHANGED
));
3831 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3833 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3837 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3839 /* Some third-party PHYs need to be reset on link going
3842 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3843 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3844 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3845 netif_carrier_ok(tp
->dev
)) {
3846 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3847 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3848 !(bmsr
& BMSR_LSTATUS
))
3854 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3855 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3856 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3857 !tg3_flag(tp
, INIT_COMPLETE
))
3860 if (!(bmsr
& BMSR_LSTATUS
)) {
3861 err
= tg3_init_5401phy_dsp(tp
);
3865 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3866 for (i
= 0; i
< 1000; i
++) {
3868 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3869 (bmsr
& BMSR_LSTATUS
)) {
3875 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3876 TG3_PHY_REV_BCM5401_B0
&&
3877 !(bmsr
& BMSR_LSTATUS
) &&
3878 tp
->link_config
.active_speed
== SPEED_1000
) {
3879 err
= tg3_phy_reset(tp
);
3881 err
= tg3_init_5401phy_dsp(tp
);
3886 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3887 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3888 /* 5701 {A0,B0} CRC bug workaround */
3889 tg3_writephy(tp
, 0x15, 0x0a75);
3890 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3891 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3892 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3895 /* Clear pending interrupts... */
3896 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3897 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3899 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3900 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3901 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3902 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3904 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3905 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3906 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3907 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3908 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3910 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3913 current_link_up
= 0;
3914 current_speed
= SPEED_INVALID
;
3915 current_duplex
= DUPLEX_INVALID
;
3916 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
3917 tp
->link_config
.rmt_adv
= 0;
3919 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3920 err
= tg3_phy_auxctl_read(tp
,
3921 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3923 if (!err
&& !(val
& (1 << 10))) {
3924 tg3_phy_auxctl_write(tp
,
3925 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3932 for (i
= 0; i
< 100; i
++) {
3933 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3934 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3935 (bmsr
& BMSR_LSTATUS
))
3940 if (bmsr
& BMSR_LSTATUS
) {
3943 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3944 for (i
= 0; i
< 2000; i
++) {
3946 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3951 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3956 for (i
= 0; i
< 200; i
++) {
3957 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3958 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3960 if (bmcr
&& bmcr
!= 0x7fff)
3968 tp
->link_config
.active_speed
= current_speed
;
3969 tp
->link_config
.active_duplex
= current_duplex
;
3971 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3972 if ((bmcr
& BMCR_ANENABLE
) &&
3973 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
3974 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
3975 current_link_up
= 1;
3977 if (!(bmcr
& BMCR_ANENABLE
) &&
3978 tp
->link_config
.speed
== current_speed
&&
3979 tp
->link_config
.duplex
== current_duplex
&&
3980 tp
->link_config
.flowctrl
==
3981 tp
->link_config
.active_flowctrl
) {
3982 current_link_up
= 1;
3986 if (current_link_up
== 1 &&
3987 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3990 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3991 reg
= MII_TG3_FET_GEN_STAT
;
3992 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
3994 reg
= MII_TG3_EXT_STAT
;
3995 bit
= MII_TG3_EXT_STAT_MDIX
;
3998 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
3999 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4001 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4006 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4007 tg3_phy_copper_begin(tp
);
4009 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4010 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4011 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4012 current_link_up
= 1;
4015 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4016 if (current_link_up
== 1) {
4017 if (tp
->link_config
.active_speed
== SPEED_100
||
4018 tp
->link_config
.active_speed
== SPEED_10
)
4019 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4021 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4022 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4023 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4025 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4027 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4028 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4029 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4031 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4032 if (current_link_up
== 1 &&
4033 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4034 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4036 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4039 /* ??? Without this setting Netgear GA302T PHY does not
4040 * ??? send/receive packets...
4042 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4043 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4044 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4045 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4049 tw32_f(MAC_MODE
, tp
->mac_mode
);
4052 tg3_phy_eee_adjust(tp
, current_link_up
);
4054 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4055 /* Polled via timer. */
4056 tw32_f(MAC_EVENT
, 0);
4058 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4062 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4063 current_link_up
== 1 &&
4064 tp
->link_config
.active_speed
== SPEED_1000
&&
4065 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4068 (MAC_STATUS_SYNC_CHANGED
|
4069 MAC_STATUS_CFG_CHANGED
));
4072 NIC_SRAM_FIRMWARE_MBOX
,
4073 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4076 /* Prevent send BD corruption. */
4077 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4078 u16 oldlnkctl
, newlnkctl
;
4080 pci_read_config_word(tp
->pdev
,
4081 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
4083 if (tp
->link_config
.active_speed
== SPEED_100
||
4084 tp
->link_config
.active_speed
== SPEED_10
)
4085 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4087 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
4088 if (newlnkctl
!= oldlnkctl
)
4089 pci_write_config_word(tp
->pdev
,
4090 pci_pcie_cap(tp
->pdev
) +
4091 PCI_EXP_LNKCTL
, newlnkctl
);
4094 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4095 if (current_link_up
)
4096 netif_carrier_on(tp
->dev
);
4098 netif_carrier_off(tp
->dev
);
4099 tg3_link_report(tp
);
4105 struct tg3_fiber_aneginfo
{
4107 #define ANEG_STATE_UNKNOWN 0
4108 #define ANEG_STATE_AN_ENABLE 1
4109 #define ANEG_STATE_RESTART_INIT 2
4110 #define ANEG_STATE_RESTART 3
4111 #define ANEG_STATE_DISABLE_LINK_OK 4
4112 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4113 #define ANEG_STATE_ABILITY_DETECT 6
4114 #define ANEG_STATE_ACK_DETECT_INIT 7
4115 #define ANEG_STATE_ACK_DETECT 8
4116 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4117 #define ANEG_STATE_COMPLETE_ACK 10
4118 #define ANEG_STATE_IDLE_DETECT_INIT 11
4119 #define ANEG_STATE_IDLE_DETECT 12
4120 #define ANEG_STATE_LINK_OK 13
4121 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4122 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4125 #define MR_AN_ENABLE 0x00000001
4126 #define MR_RESTART_AN 0x00000002
4127 #define MR_AN_COMPLETE 0x00000004
4128 #define MR_PAGE_RX 0x00000008
4129 #define MR_NP_LOADED 0x00000010
4130 #define MR_TOGGLE_TX 0x00000020
4131 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4132 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4133 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4134 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4135 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4136 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4137 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4138 #define MR_TOGGLE_RX 0x00002000
4139 #define MR_NP_RX 0x00004000
4141 #define MR_LINK_OK 0x80000000
4143 unsigned long link_time
, cur_time
;
4145 u32 ability_match_cfg
;
4146 int ability_match_count
;
4148 char ability_match
, idle_match
, ack_match
;
4150 u32 txconfig
, rxconfig
;
4151 #define ANEG_CFG_NP 0x00000080
4152 #define ANEG_CFG_ACK 0x00000040
4153 #define ANEG_CFG_RF2 0x00000020
4154 #define ANEG_CFG_RF1 0x00000010
4155 #define ANEG_CFG_PS2 0x00000001
4156 #define ANEG_CFG_PS1 0x00008000
4157 #define ANEG_CFG_HD 0x00004000
4158 #define ANEG_CFG_FD 0x00002000
4159 #define ANEG_CFG_INVAL 0x00001f06
4164 #define ANEG_TIMER_ENAB 2
4165 #define ANEG_FAILED -1
4167 #define ANEG_STATE_SETTLE_TIME 10000
4169 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4170 struct tg3_fiber_aneginfo
*ap
)
4173 unsigned long delta
;
4177 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4181 ap
->ability_match_cfg
= 0;
4182 ap
->ability_match_count
= 0;
4183 ap
->ability_match
= 0;
4189 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4190 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4192 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4193 ap
->ability_match_cfg
= rx_cfg_reg
;
4194 ap
->ability_match
= 0;
4195 ap
->ability_match_count
= 0;
4197 if (++ap
->ability_match_count
> 1) {
4198 ap
->ability_match
= 1;
4199 ap
->ability_match_cfg
= rx_cfg_reg
;
4202 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4210 ap
->ability_match_cfg
= 0;
4211 ap
->ability_match_count
= 0;
4212 ap
->ability_match
= 0;
4218 ap
->rxconfig
= rx_cfg_reg
;
4221 switch (ap
->state
) {
4222 case ANEG_STATE_UNKNOWN
:
4223 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4224 ap
->state
= ANEG_STATE_AN_ENABLE
;
4227 case ANEG_STATE_AN_ENABLE
:
4228 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4229 if (ap
->flags
& MR_AN_ENABLE
) {
4232 ap
->ability_match_cfg
= 0;
4233 ap
->ability_match_count
= 0;
4234 ap
->ability_match
= 0;
4238 ap
->state
= ANEG_STATE_RESTART_INIT
;
4240 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4244 case ANEG_STATE_RESTART_INIT
:
4245 ap
->link_time
= ap
->cur_time
;
4246 ap
->flags
&= ~(MR_NP_LOADED
);
4248 tw32(MAC_TX_AUTO_NEG
, 0);
4249 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4250 tw32_f(MAC_MODE
, tp
->mac_mode
);
4253 ret
= ANEG_TIMER_ENAB
;
4254 ap
->state
= ANEG_STATE_RESTART
;
4257 case ANEG_STATE_RESTART
:
4258 delta
= ap
->cur_time
- ap
->link_time
;
4259 if (delta
> ANEG_STATE_SETTLE_TIME
)
4260 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4262 ret
= ANEG_TIMER_ENAB
;
4265 case ANEG_STATE_DISABLE_LINK_OK
:
4269 case ANEG_STATE_ABILITY_DETECT_INIT
:
4270 ap
->flags
&= ~(MR_TOGGLE_TX
);
4271 ap
->txconfig
= ANEG_CFG_FD
;
4272 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4273 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4274 ap
->txconfig
|= ANEG_CFG_PS1
;
4275 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4276 ap
->txconfig
|= ANEG_CFG_PS2
;
4277 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4278 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4279 tw32_f(MAC_MODE
, tp
->mac_mode
);
4282 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4285 case ANEG_STATE_ABILITY_DETECT
:
4286 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4287 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4290 case ANEG_STATE_ACK_DETECT_INIT
:
4291 ap
->txconfig
|= ANEG_CFG_ACK
;
4292 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4293 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4294 tw32_f(MAC_MODE
, tp
->mac_mode
);
4297 ap
->state
= ANEG_STATE_ACK_DETECT
;
4300 case ANEG_STATE_ACK_DETECT
:
4301 if (ap
->ack_match
!= 0) {
4302 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4303 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4304 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4306 ap
->state
= ANEG_STATE_AN_ENABLE
;
4308 } else if (ap
->ability_match
!= 0 &&
4309 ap
->rxconfig
== 0) {
4310 ap
->state
= ANEG_STATE_AN_ENABLE
;
4314 case ANEG_STATE_COMPLETE_ACK_INIT
:
4315 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4319 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4320 MR_LP_ADV_HALF_DUPLEX
|
4321 MR_LP_ADV_SYM_PAUSE
|
4322 MR_LP_ADV_ASYM_PAUSE
|
4323 MR_LP_ADV_REMOTE_FAULT1
|
4324 MR_LP_ADV_REMOTE_FAULT2
|
4325 MR_LP_ADV_NEXT_PAGE
|
4328 if (ap
->rxconfig
& ANEG_CFG_FD
)
4329 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4330 if (ap
->rxconfig
& ANEG_CFG_HD
)
4331 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4332 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4333 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4334 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4335 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4336 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4337 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4338 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4339 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4340 if (ap
->rxconfig
& ANEG_CFG_NP
)
4341 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4343 ap
->link_time
= ap
->cur_time
;
4345 ap
->flags
^= (MR_TOGGLE_TX
);
4346 if (ap
->rxconfig
& 0x0008)
4347 ap
->flags
|= MR_TOGGLE_RX
;
4348 if (ap
->rxconfig
& ANEG_CFG_NP
)
4349 ap
->flags
|= MR_NP_RX
;
4350 ap
->flags
|= MR_PAGE_RX
;
4352 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4353 ret
= ANEG_TIMER_ENAB
;
4356 case ANEG_STATE_COMPLETE_ACK
:
4357 if (ap
->ability_match
!= 0 &&
4358 ap
->rxconfig
== 0) {
4359 ap
->state
= ANEG_STATE_AN_ENABLE
;
4362 delta
= ap
->cur_time
- ap
->link_time
;
4363 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4364 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4365 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4367 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4368 !(ap
->flags
& MR_NP_RX
)) {
4369 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4377 case ANEG_STATE_IDLE_DETECT_INIT
:
4378 ap
->link_time
= ap
->cur_time
;
4379 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4380 tw32_f(MAC_MODE
, tp
->mac_mode
);
4383 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4384 ret
= ANEG_TIMER_ENAB
;
4387 case ANEG_STATE_IDLE_DETECT
:
4388 if (ap
->ability_match
!= 0 &&
4389 ap
->rxconfig
== 0) {
4390 ap
->state
= ANEG_STATE_AN_ENABLE
;
4393 delta
= ap
->cur_time
- ap
->link_time
;
4394 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4395 /* XXX another gem from the Broadcom driver :( */
4396 ap
->state
= ANEG_STATE_LINK_OK
;
4400 case ANEG_STATE_LINK_OK
:
4401 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4405 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4406 /* ??? unimplemented */
4409 case ANEG_STATE_NEXT_PAGE_WAIT
:
4410 /* ??? unimplemented */
4421 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4424 struct tg3_fiber_aneginfo aninfo
;
4425 int status
= ANEG_FAILED
;
4429 tw32_f(MAC_TX_AUTO_NEG
, 0);
4431 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4432 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4435 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4438 memset(&aninfo
, 0, sizeof(aninfo
));
4439 aninfo
.flags
|= MR_AN_ENABLE
;
4440 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4441 aninfo
.cur_time
= 0;
4443 while (++tick
< 195000) {
4444 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4445 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4451 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4452 tw32_f(MAC_MODE
, tp
->mac_mode
);
4455 *txflags
= aninfo
.txconfig
;
4456 *rxflags
= aninfo
.flags
;
4458 if (status
== ANEG_DONE
&&
4459 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4460 MR_LP_ADV_FULL_DUPLEX
)))
4466 static void tg3_init_bcm8002(struct tg3
*tp
)
4468 u32 mac_status
= tr32(MAC_STATUS
);
4471 /* Reset when initting first time or we have a link. */
4472 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4473 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4476 /* Set PLL lock range. */
4477 tg3_writephy(tp
, 0x16, 0x8007);
4480 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4482 /* Wait for reset to complete. */
4483 /* XXX schedule_timeout() ... */
4484 for (i
= 0; i
< 500; i
++)
4487 /* Config mode; select PMA/Ch 1 regs. */
4488 tg3_writephy(tp
, 0x10, 0x8411);
4490 /* Enable auto-lock and comdet, select txclk for tx. */
4491 tg3_writephy(tp
, 0x11, 0x0a10);
4493 tg3_writephy(tp
, 0x18, 0x00a0);
4494 tg3_writephy(tp
, 0x16, 0x41ff);
4496 /* Assert and deassert POR. */
4497 tg3_writephy(tp
, 0x13, 0x0400);
4499 tg3_writephy(tp
, 0x13, 0x0000);
4501 tg3_writephy(tp
, 0x11, 0x0a50);
4503 tg3_writephy(tp
, 0x11, 0x0a10);
4505 /* Wait for signal to stabilize */
4506 /* XXX schedule_timeout() ... */
4507 for (i
= 0; i
< 15000; i
++)
4510 /* Deselect the channel register so we can read the PHYID
4513 tg3_writephy(tp
, 0x10, 0x8011);
4516 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4519 u32 sg_dig_ctrl
, sg_dig_status
;
4520 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4521 int workaround
, port_a
;
4522 int current_link_up
;
4525 expected_sg_dig_ctrl
= 0;
4528 current_link_up
= 0;
4530 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4531 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4533 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4536 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4537 /* preserve bits 20-23 for voltage regulator */
4538 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4541 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4543 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4544 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4546 u32 val
= serdes_cfg
;
4552 tw32_f(MAC_SERDES_CFG
, val
);
4555 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4557 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4558 tg3_setup_flow_control(tp
, 0, 0);
4559 current_link_up
= 1;
4564 /* Want auto-negotiation. */
4565 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4567 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4568 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4569 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4570 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4571 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4573 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4574 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4575 tp
->serdes_counter
&&
4576 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4577 MAC_STATUS_RCVD_CFG
)) ==
4578 MAC_STATUS_PCS_SYNCED
)) {
4579 tp
->serdes_counter
--;
4580 current_link_up
= 1;
4585 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4586 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4588 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4590 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4591 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4592 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4593 MAC_STATUS_SIGNAL_DET
)) {
4594 sg_dig_status
= tr32(SG_DIG_STATUS
);
4595 mac_status
= tr32(MAC_STATUS
);
4597 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4598 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4599 u32 local_adv
= 0, remote_adv
= 0;
4601 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4602 local_adv
|= ADVERTISE_1000XPAUSE
;
4603 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4604 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4606 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4607 remote_adv
|= LPA_1000XPAUSE
;
4608 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4609 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4611 tp
->link_config
.rmt_adv
=
4612 mii_adv_to_ethtool_adv_x(remote_adv
);
4614 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4615 current_link_up
= 1;
4616 tp
->serdes_counter
= 0;
4617 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4618 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4619 if (tp
->serdes_counter
)
4620 tp
->serdes_counter
--;
4623 u32 val
= serdes_cfg
;
4630 tw32_f(MAC_SERDES_CFG
, val
);
4633 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4636 /* Link parallel detection - link is up */
4637 /* only if we have PCS_SYNC and not */
4638 /* receiving config code words */
4639 mac_status
= tr32(MAC_STATUS
);
4640 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4641 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4642 tg3_setup_flow_control(tp
, 0, 0);
4643 current_link_up
= 1;
4645 TG3_PHYFLG_PARALLEL_DETECT
;
4646 tp
->serdes_counter
=
4647 SERDES_PARALLEL_DET_TIMEOUT
;
4649 goto restart_autoneg
;
4653 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4654 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4658 return current_link_up
;
4661 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4663 int current_link_up
= 0;
4665 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4668 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4669 u32 txflags
, rxflags
;
4672 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4673 u32 local_adv
= 0, remote_adv
= 0;
4675 if (txflags
& ANEG_CFG_PS1
)
4676 local_adv
|= ADVERTISE_1000XPAUSE
;
4677 if (txflags
& ANEG_CFG_PS2
)
4678 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4680 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4681 remote_adv
|= LPA_1000XPAUSE
;
4682 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4683 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4685 tp
->link_config
.rmt_adv
=
4686 mii_adv_to_ethtool_adv_x(remote_adv
);
4688 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4690 current_link_up
= 1;
4692 for (i
= 0; i
< 30; i
++) {
4695 (MAC_STATUS_SYNC_CHANGED
|
4696 MAC_STATUS_CFG_CHANGED
));
4698 if ((tr32(MAC_STATUS
) &
4699 (MAC_STATUS_SYNC_CHANGED
|
4700 MAC_STATUS_CFG_CHANGED
)) == 0)
4704 mac_status
= tr32(MAC_STATUS
);
4705 if (current_link_up
== 0 &&
4706 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4707 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4708 current_link_up
= 1;
4710 tg3_setup_flow_control(tp
, 0, 0);
4712 /* Forcing 1000FD link up. */
4713 current_link_up
= 1;
4715 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4718 tw32_f(MAC_MODE
, tp
->mac_mode
);
4723 return current_link_up
;
4726 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4729 u16 orig_active_speed
;
4730 u8 orig_active_duplex
;
4732 int current_link_up
;
4735 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4736 orig_active_speed
= tp
->link_config
.active_speed
;
4737 orig_active_duplex
= tp
->link_config
.active_duplex
;
4739 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4740 netif_carrier_ok(tp
->dev
) &&
4741 tg3_flag(tp
, INIT_COMPLETE
)) {
4742 mac_status
= tr32(MAC_STATUS
);
4743 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4744 MAC_STATUS_SIGNAL_DET
|
4745 MAC_STATUS_CFG_CHANGED
|
4746 MAC_STATUS_RCVD_CFG
);
4747 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4748 MAC_STATUS_SIGNAL_DET
)) {
4749 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4750 MAC_STATUS_CFG_CHANGED
));
4755 tw32_f(MAC_TX_AUTO_NEG
, 0);
4757 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4758 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4759 tw32_f(MAC_MODE
, tp
->mac_mode
);
4762 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4763 tg3_init_bcm8002(tp
);
4765 /* Enable link change event even when serdes polling. */
4766 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4769 current_link_up
= 0;
4770 tp
->link_config
.rmt_adv
= 0;
4771 mac_status
= tr32(MAC_STATUS
);
4773 if (tg3_flag(tp
, HW_AUTONEG
))
4774 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4776 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4778 tp
->napi
[0].hw_status
->status
=
4779 (SD_STATUS_UPDATED
|
4780 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4782 for (i
= 0; i
< 100; i
++) {
4783 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4784 MAC_STATUS_CFG_CHANGED
));
4786 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4787 MAC_STATUS_CFG_CHANGED
|
4788 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4792 mac_status
= tr32(MAC_STATUS
);
4793 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4794 current_link_up
= 0;
4795 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4796 tp
->serdes_counter
== 0) {
4797 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4798 MAC_MODE_SEND_CONFIGS
));
4800 tw32_f(MAC_MODE
, tp
->mac_mode
);
4804 if (current_link_up
== 1) {
4805 tp
->link_config
.active_speed
= SPEED_1000
;
4806 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4807 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4808 LED_CTRL_LNKLED_OVERRIDE
|
4809 LED_CTRL_1000MBPS_ON
));
4811 tp
->link_config
.active_speed
= SPEED_INVALID
;
4812 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4813 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4814 LED_CTRL_LNKLED_OVERRIDE
|
4815 LED_CTRL_TRAFFIC_OVERRIDE
));
4818 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4819 if (current_link_up
)
4820 netif_carrier_on(tp
->dev
);
4822 netif_carrier_off(tp
->dev
);
4823 tg3_link_report(tp
);
4825 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4826 if (orig_pause_cfg
!= now_pause_cfg
||
4827 orig_active_speed
!= tp
->link_config
.active_speed
||
4828 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4829 tg3_link_report(tp
);
4835 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4837 int current_link_up
, err
= 0;
4841 u32 local_adv
, remote_adv
;
4843 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4844 tw32_f(MAC_MODE
, tp
->mac_mode
);
4850 (MAC_STATUS_SYNC_CHANGED
|
4851 MAC_STATUS_CFG_CHANGED
|
4852 MAC_STATUS_MI_COMPLETION
|
4853 MAC_STATUS_LNKSTATE_CHANGED
));
4859 current_link_up
= 0;
4860 current_speed
= SPEED_INVALID
;
4861 current_duplex
= DUPLEX_INVALID
;
4862 tp
->link_config
.rmt_adv
= 0;
4864 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4865 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4866 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4867 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4868 bmsr
|= BMSR_LSTATUS
;
4870 bmsr
&= ~BMSR_LSTATUS
;
4873 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4875 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4876 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4877 /* do nothing, just check for link up at the end */
4878 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4881 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4882 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4883 ADVERTISE_1000XPAUSE
|
4884 ADVERTISE_1000XPSE_ASYM
|
4887 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4888 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
4890 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4891 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
4892 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4893 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4895 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4896 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4897 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4904 bmcr
&= ~BMCR_SPEED1000
;
4905 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4907 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4908 new_bmcr
|= BMCR_FULLDPLX
;
4910 if (new_bmcr
!= bmcr
) {
4911 /* BMCR_SPEED1000 is a reserved bit that needs
4912 * to be set on write.
4914 new_bmcr
|= BMCR_SPEED1000
;
4916 /* Force a linkdown */
4917 if (netif_carrier_ok(tp
->dev
)) {
4920 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4921 adv
&= ~(ADVERTISE_1000XFULL
|
4922 ADVERTISE_1000XHALF
|
4924 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4925 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4929 netif_carrier_off(tp
->dev
);
4931 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4933 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4934 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4935 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4937 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4938 bmsr
|= BMSR_LSTATUS
;
4940 bmsr
&= ~BMSR_LSTATUS
;
4942 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4946 if (bmsr
& BMSR_LSTATUS
) {
4947 current_speed
= SPEED_1000
;
4948 current_link_up
= 1;
4949 if (bmcr
& BMCR_FULLDPLX
)
4950 current_duplex
= DUPLEX_FULL
;
4952 current_duplex
= DUPLEX_HALF
;
4957 if (bmcr
& BMCR_ANENABLE
) {
4960 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4961 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4962 common
= local_adv
& remote_adv
;
4963 if (common
& (ADVERTISE_1000XHALF
|
4964 ADVERTISE_1000XFULL
)) {
4965 if (common
& ADVERTISE_1000XFULL
)
4966 current_duplex
= DUPLEX_FULL
;
4968 current_duplex
= DUPLEX_HALF
;
4970 tp
->link_config
.rmt_adv
=
4971 mii_adv_to_ethtool_adv_x(remote_adv
);
4972 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4973 /* Link is up via parallel detect */
4975 current_link_up
= 0;
4980 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4981 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4983 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4984 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4985 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4987 tw32_f(MAC_MODE
, tp
->mac_mode
);
4990 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4992 tp
->link_config
.active_speed
= current_speed
;
4993 tp
->link_config
.active_duplex
= current_duplex
;
4995 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4996 if (current_link_up
)
4997 netif_carrier_on(tp
->dev
);
4999 netif_carrier_off(tp
->dev
);
5000 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5002 tg3_link_report(tp
);
5007 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5009 if (tp
->serdes_counter
) {
5010 /* Give autoneg time to complete. */
5011 tp
->serdes_counter
--;
5015 if (!netif_carrier_ok(tp
->dev
) &&
5016 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5019 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5020 if (bmcr
& BMCR_ANENABLE
) {
5023 /* Select shadow register 0x1f */
5024 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5025 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5027 /* Select expansion interrupt status register */
5028 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5029 MII_TG3_DSP_EXP1_INT_STAT
);
5030 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5031 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5033 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5034 /* We have signal detect and not receiving
5035 * config code words, link is up by parallel
5039 bmcr
&= ~BMCR_ANENABLE
;
5040 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5041 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5042 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5045 } else if (netif_carrier_ok(tp
->dev
) &&
5046 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5047 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5050 /* Select expansion interrupt status register */
5051 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5052 MII_TG3_DSP_EXP1_INT_STAT
);
5053 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5057 /* Config code words received, turn on autoneg. */
5058 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5059 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5061 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5067 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5072 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5073 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5074 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5075 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5077 err
= tg3_setup_copper_phy(tp
, force_reset
);
5079 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5082 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5083 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5085 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5090 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5091 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5092 tw32(GRC_MISC_CFG
, val
);
5095 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5096 (6 << TX_LENGTHS_IPG_SHIFT
);
5097 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5098 val
|= tr32(MAC_TX_LENGTHS
) &
5099 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5100 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5102 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5103 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5104 tw32(MAC_TX_LENGTHS
, val
|
5105 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5107 tw32(MAC_TX_LENGTHS
, val
|
5108 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5110 if (!tg3_flag(tp
, 5705_PLUS
)) {
5111 if (netif_carrier_ok(tp
->dev
)) {
5112 tw32(HOSTCC_STAT_COAL_TICKS
,
5113 tp
->coal
.stats_block_coalesce_usecs
);
5115 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5119 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5120 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5121 if (!netif_carrier_ok(tp
->dev
))
5122 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5125 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5126 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5132 static inline int tg3_irq_sync(struct tg3
*tp
)
5134 return tp
->irq_sync
;
5137 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5141 dst
= (u32
*)((u8
*)dst
+ off
);
5142 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5143 *dst
++ = tr32(off
+ i
);
5146 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5148 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5149 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5150 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5151 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5152 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5153 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5154 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5155 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5156 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5157 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5158 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5159 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5160 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5161 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5162 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5163 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5164 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5165 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5166 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5168 if (tg3_flag(tp
, SUPPORT_MSIX
))
5169 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5171 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5172 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5173 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5174 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5175 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5176 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5177 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5178 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5180 if (!tg3_flag(tp
, 5705_PLUS
)) {
5181 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5182 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5183 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5186 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5187 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5188 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5189 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5190 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5192 if (tg3_flag(tp
, NVRAM
))
5193 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5196 static void tg3_dump_state(struct tg3
*tp
)
5201 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5203 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5207 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5208 /* Read up to but not including private PCI registers */
5209 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5210 regs
[i
/ sizeof(u32
)] = tr32(i
);
5212 tg3_dump_legacy_regs(tp
, regs
);
5214 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5215 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5216 !regs
[i
+ 2] && !regs
[i
+ 3])
5219 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5221 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5226 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5227 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5229 /* SW status block */
5231 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5233 tnapi
->hw_status
->status
,
5234 tnapi
->hw_status
->status_tag
,
5235 tnapi
->hw_status
->rx_jumbo_consumer
,
5236 tnapi
->hw_status
->rx_consumer
,
5237 tnapi
->hw_status
->rx_mini_consumer
,
5238 tnapi
->hw_status
->idx
[0].rx_producer
,
5239 tnapi
->hw_status
->idx
[0].tx_consumer
);
5242 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5244 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5245 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5247 tnapi
->prodring
.rx_std_prod_idx
,
5248 tnapi
->prodring
.rx_std_cons_idx
,
5249 tnapi
->prodring
.rx_jmb_prod_idx
,
5250 tnapi
->prodring
.rx_jmb_cons_idx
);
5254 /* This is called whenever we suspect that the system chipset is re-
5255 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5256 * is bogus tx completions. We try to recover by setting the
5257 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5260 static void tg3_tx_recover(struct tg3
*tp
)
5262 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5263 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5265 netdev_warn(tp
->dev
,
5266 "The system may be re-ordering memory-mapped I/O "
5267 "cycles to the network device, attempting to recover. "
5268 "Please report the problem to the driver maintainer "
5269 "and include system chipset information.\n");
5271 spin_lock(&tp
->lock
);
5272 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5273 spin_unlock(&tp
->lock
);
5276 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5278 /* Tell compiler to fetch tx indices from memory. */
5280 return tnapi
->tx_pending
-
5281 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5284 /* Tigon3 never reports partial packet sends. So we do not
5285 * need special logic to handle SKBs that have not had all
5286 * of their frags sent yet, like SunGEM does.
5288 static void tg3_tx(struct tg3_napi
*tnapi
)
5290 struct tg3
*tp
= tnapi
->tp
;
5291 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5292 u32 sw_idx
= tnapi
->tx_cons
;
5293 struct netdev_queue
*txq
;
5294 int index
= tnapi
- tp
->napi
;
5295 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5297 if (tg3_flag(tp
, ENABLE_TSS
))
5300 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5302 while (sw_idx
!= hw_idx
) {
5303 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5304 struct sk_buff
*skb
= ri
->skb
;
5307 if (unlikely(skb
== NULL
)) {
5312 pci_unmap_single(tp
->pdev
,
5313 dma_unmap_addr(ri
, mapping
),
5319 while (ri
->fragmented
) {
5320 ri
->fragmented
= false;
5321 sw_idx
= NEXT_TX(sw_idx
);
5322 ri
= &tnapi
->tx_buffers
[sw_idx
];
5325 sw_idx
= NEXT_TX(sw_idx
);
5327 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5328 ri
= &tnapi
->tx_buffers
[sw_idx
];
5329 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5332 pci_unmap_page(tp
->pdev
,
5333 dma_unmap_addr(ri
, mapping
),
5334 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5337 while (ri
->fragmented
) {
5338 ri
->fragmented
= false;
5339 sw_idx
= NEXT_TX(sw_idx
);
5340 ri
= &tnapi
->tx_buffers
[sw_idx
];
5343 sw_idx
= NEXT_TX(sw_idx
);
5347 bytes_compl
+= skb
->len
;
5351 if (unlikely(tx_bug
)) {
5357 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
5359 tnapi
->tx_cons
= sw_idx
;
5361 /* Need to make the tx_cons update visible to tg3_start_xmit()
5362 * before checking for netif_queue_stopped(). Without the
5363 * memory barrier, there is a small possibility that tg3_start_xmit()
5364 * will miss it and cause the queue to be stopped forever.
5368 if (unlikely(netif_tx_queue_stopped(txq
) &&
5369 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5370 __netif_tx_lock(txq
, smp_processor_id());
5371 if (netif_tx_queue_stopped(txq
) &&
5372 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5373 netif_tx_wake_queue(txq
);
5374 __netif_tx_unlock(txq
);
5378 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5383 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5384 map_sz
, PCI_DMA_FROMDEVICE
);
5389 /* Returns size of skb allocated or < 0 on error.
5391 * We only need to fill in the address because the other members
5392 * of the RX descriptor are invariant, see tg3_init_rings.
5394 * Note the purposeful assymetry of cpu vs. chip accesses. For
5395 * posting buffers we only dirty the first cache line of the RX
5396 * descriptor (containing the address). Whereas for the RX status
5397 * buffers the cpu only reads the last cacheline of the RX descriptor
5398 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5400 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5401 u32 opaque_key
, u32 dest_idx_unmasked
)
5403 struct tg3_rx_buffer_desc
*desc
;
5404 struct ring_info
*map
;
5407 int skb_size
, data_size
, dest_idx
;
5409 switch (opaque_key
) {
5410 case RXD_OPAQUE_RING_STD
:
5411 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5412 desc
= &tpr
->rx_std
[dest_idx
];
5413 map
= &tpr
->rx_std_buffers
[dest_idx
];
5414 data_size
= tp
->rx_pkt_map_sz
;
5417 case RXD_OPAQUE_RING_JUMBO
:
5418 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5419 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5420 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5421 data_size
= TG3_RX_JMB_MAP_SZ
;
5428 /* Do not overwrite any of the map or rp information
5429 * until we are sure we can commit to a new buffer.
5431 * Callers depend upon this behavior and assume that
5432 * we leave everything unchanged if we fail.
5434 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
5435 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5436 data
= kmalloc(skb_size
, GFP_ATOMIC
);
5440 mapping
= pci_map_single(tp
->pdev
,
5441 data
+ TG3_RX_OFFSET(tp
),
5443 PCI_DMA_FROMDEVICE
);
5444 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5450 dma_unmap_addr_set(map
, mapping
, mapping
);
5452 desc
->addr_hi
= ((u64
)mapping
>> 32);
5453 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5458 /* We only need to move over in the address because the other
5459 * members of the RX descriptor are invariant. See notes above
5460 * tg3_alloc_rx_data for full details.
5462 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5463 struct tg3_rx_prodring_set
*dpr
,
5464 u32 opaque_key
, int src_idx
,
5465 u32 dest_idx_unmasked
)
5467 struct tg3
*tp
= tnapi
->tp
;
5468 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5469 struct ring_info
*src_map
, *dest_map
;
5470 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5473 switch (opaque_key
) {
5474 case RXD_OPAQUE_RING_STD
:
5475 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5476 dest_desc
= &dpr
->rx_std
[dest_idx
];
5477 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5478 src_desc
= &spr
->rx_std
[src_idx
];
5479 src_map
= &spr
->rx_std_buffers
[src_idx
];
5482 case RXD_OPAQUE_RING_JUMBO
:
5483 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5484 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5485 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5486 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5487 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5494 dest_map
->data
= src_map
->data
;
5495 dma_unmap_addr_set(dest_map
, mapping
,
5496 dma_unmap_addr(src_map
, mapping
));
5497 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5498 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5500 /* Ensure that the update to the skb happens after the physical
5501 * addresses have been transferred to the new BD location.
5505 src_map
->data
= NULL
;
5508 /* The RX ring scheme is composed of multiple rings which post fresh
5509 * buffers to the chip, and one special ring the chip uses to report
5510 * status back to the host.
5512 * The special ring reports the status of received packets to the
5513 * host. The chip does not write into the original descriptor the
5514 * RX buffer was obtained from. The chip simply takes the original
5515 * descriptor as provided by the host, updates the status and length
5516 * field, then writes this into the next status ring entry.
5518 * Each ring the host uses to post buffers to the chip is described
5519 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5520 * it is first placed into the on-chip ram. When the packet's length
5521 * is known, it walks down the TG3_BDINFO entries to select the ring.
5522 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5523 * which is within the range of the new packet's length is chosen.
5525 * The "separate ring for rx status" scheme may sound queer, but it makes
5526 * sense from a cache coherency perspective. If only the host writes
5527 * to the buffer post rings, and only the chip writes to the rx status
5528 * rings, then cache lines never move beyond shared-modified state.
5529 * If both the host and chip were to write into the same ring, cache line
5530 * eviction could occur since both entities want it in an exclusive state.
5532 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5534 struct tg3
*tp
= tnapi
->tp
;
5535 u32 work_mask
, rx_std_posted
= 0;
5536 u32 std_prod_idx
, jmb_prod_idx
;
5537 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5540 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5542 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5544 * We need to order the read of hw_idx and the read of
5545 * the opaque cookie.
5550 std_prod_idx
= tpr
->rx_std_prod_idx
;
5551 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5552 while (sw_idx
!= hw_idx
&& budget
> 0) {
5553 struct ring_info
*ri
;
5554 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5556 struct sk_buff
*skb
;
5557 dma_addr_t dma_addr
;
5558 u32 opaque_key
, desc_idx
, *post_ptr
;
5561 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5562 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5563 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5564 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5565 dma_addr
= dma_unmap_addr(ri
, mapping
);
5567 post_ptr
= &std_prod_idx
;
5569 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5570 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5571 dma_addr
= dma_unmap_addr(ri
, mapping
);
5573 post_ptr
= &jmb_prod_idx
;
5575 goto next_pkt_nopost
;
5577 work_mask
|= opaque_key
;
5579 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5580 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5582 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5583 desc_idx
, *post_ptr
);
5585 /* Other statistics kept track of by card. */
5590 prefetch(data
+ TG3_RX_OFFSET(tp
));
5591 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5594 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5597 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
5602 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5603 PCI_DMA_FROMDEVICE
);
5605 skb
= build_skb(data
);
5608 goto drop_it_no_recycle
;
5610 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5611 /* Ensure that the update to the data happens
5612 * after the usage of the old DMA mapping.
5619 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5620 desc_idx
, *post_ptr
);
5622 skb
= netdev_alloc_skb(tp
->dev
,
5623 len
+ TG3_RAW_IP_ALIGN
);
5625 goto drop_it_no_recycle
;
5627 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
5628 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5630 data
+ TG3_RX_OFFSET(tp
),
5632 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5636 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5637 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5638 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5639 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5640 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5642 skb_checksum_none_assert(skb
);
5644 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5646 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5647 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5649 goto drop_it_no_recycle
;
5652 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5653 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5654 __vlan_hwaccel_put_tag(skb
,
5655 desc
->err_vlan
& RXD_VLAN_MASK
);
5657 napi_gro_receive(&tnapi
->napi
, skb
);
5665 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5666 tpr
->rx_std_prod_idx
= std_prod_idx
&
5667 tp
->rx_std_ring_mask
;
5668 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5669 tpr
->rx_std_prod_idx
);
5670 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5675 sw_idx
&= tp
->rx_ret_ring_mask
;
5677 /* Refresh hw_idx to see if there is new work */
5678 if (sw_idx
== hw_idx
) {
5679 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5684 /* ACK the status ring. */
5685 tnapi
->rx_rcb_ptr
= sw_idx
;
5686 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5688 /* Refill RX ring(s). */
5689 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5690 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5691 tpr
->rx_std_prod_idx
= std_prod_idx
&
5692 tp
->rx_std_ring_mask
;
5693 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5694 tpr
->rx_std_prod_idx
);
5696 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5697 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5698 tp
->rx_jmb_ring_mask
;
5699 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5700 tpr
->rx_jmb_prod_idx
);
5703 } else if (work_mask
) {
5704 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5705 * updated before the producer indices can be updated.
5709 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5710 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5712 if (tnapi
!= &tp
->napi
[1])
5713 napi_schedule(&tp
->napi
[1].napi
);
5719 static void tg3_poll_link(struct tg3
*tp
)
5721 /* handle link change and other phy events */
5722 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5723 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5725 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5726 sblk
->status
= SD_STATUS_UPDATED
|
5727 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5728 spin_lock(&tp
->lock
);
5729 if (tg3_flag(tp
, USE_PHYLIB
)) {
5731 (MAC_STATUS_SYNC_CHANGED
|
5732 MAC_STATUS_CFG_CHANGED
|
5733 MAC_STATUS_MI_COMPLETION
|
5734 MAC_STATUS_LNKSTATE_CHANGED
));
5737 tg3_setup_phy(tp
, 0);
5738 spin_unlock(&tp
->lock
);
5743 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5744 struct tg3_rx_prodring_set
*dpr
,
5745 struct tg3_rx_prodring_set
*spr
)
5747 u32 si
, di
, cpycnt
, src_prod_idx
;
5751 src_prod_idx
= spr
->rx_std_prod_idx
;
5753 /* Make sure updates to the rx_std_buffers[] entries and the
5754 * standard producer index are seen in the correct order.
5758 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5761 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5762 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5764 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5765 spr
->rx_std_cons_idx
;
5767 cpycnt
= min(cpycnt
,
5768 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5770 si
= spr
->rx_std_cons_idx
;
5771 di
= dpr
->rx_std_prod_idx
;
5773 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5774 if (dpr
->rx_std_buffers
[i
].data
) {
5784 /* Ensure that updates to the rx_std_buffers ring and the
5785 * shadowed hardware producer ring from tg3_recycle_skb() are
5786 * ordered correctly WRT the skb check above.
5790 memcpy(&dpr
->rx_std_buffers
[di
],
5791 &spr
->rx_std_buffers
[si
],
5792 cpycnt
* sizeof(struct ring_info
));
5794 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5795 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5796 sbd
= &spr
->rx_std
[si
];
5797 dbd
= &dpr
->rx_std
[di
];
5798 dbd
->addr_hi
= sbd
->addr_hi
;
5799 dbd
->addr_lo
= sbd
->addr_lo
;
5802 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5803 tp
->rx_std_ring_mask
;
5804 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5805 tp
->rx_std_ring_mask
;
5809 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5811 /* Make sure updates to the rx_jmb_buffers[] entries and
5812 * the jumbo producer index are seen in the correct order.
5816 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5819 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5820 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5822 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5823 spr
->rx_jmb_cons_idx
;
5825 cpycnt
= min(cpycnt
,
5826 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5828 si
= spr
->rx_jmb_cons_idx
;
5829 di
= dpr
->rx_jmb_prod_idx
;
5831 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5832 if (dpr
->rx_jmb_buffers
[i
].data
) {
5842 /* Ensure that updates to the rx_jmb_buffers ring and the
5843 * shadowed hardware producer ring from tg3_recycle_skb() are
5844 * ordered correctly WRT the skb check above.
5848 memcpy(&dpr
->rx_jmb_buffers
[di
],
5849 &spr
->rx_jmb_buffers
[si
],
5850 cpycnt
* sizeof(struct ring_info
));
5852 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5853 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5854 sbd
= &spr
->rx_jmb
[si
].std
;
5855 dbd
= &dpr
->rx_jmb
[di
].std
;
5856 dbd
->addr_hi
= sbd
->addr_hi
;
5857 dbd
->addr_lo
= sbd
->addr_lo
;
5860 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5861 tp
->rx_jmb_ring_mask
;
5862 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5863 tp
->rx_jmb_ring_mask
;
5869 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5871 struct tg3
*tp
= tnapi
->tp
;
5873 /* run TX completion thread */
5874 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5876 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5880 /* run RX thread, within the bounds set by NAPI.
5881 * All RX "locking" is done by ensuring outside
5882 * code synchronizes with tg3->napi.poll()
5884 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5885 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5887 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5888 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5890 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5891 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5893 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5894 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5895 &tp
->napi
[i
].prodring
);
5899 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5900 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5901 dpr
->rx_std_prod_idx
);
5903 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5904 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5905 dpr
->rx_jmb_prod_idx
);
5910 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5916 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
5918 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
5919 schedule_work(&tp
->reset_task
);
5922 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
5924 cancel_work_sync(&tp
->reset_task
);
5925 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
5928 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5930 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5931 struct tg3
*tp
= tnapi
->tp
;
5933 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5936 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5938 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5941 if (unlikely(work_done
>= budget
))
5944 /* tp->last_tag is used in tg3_int_reenable() below
5945 * to tell the hw how much work has been processed,
5946 * so we must read it before checking for more work.
5948 tnapi
->last_tag
= sblk
->status_tag
;
5949 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5952 /* check for RX/TX work to do */
5953 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5954 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5955 napi_complete(napi
);
5956 /* Reenable interrupts. */
5957 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5966 /* work_done is guaranteed to be less than budget. */
5967 napi_complete(napi
);
5968 tg3_reset_task_schedule(tp
);
5972 static void tg3_process_error(struct tg3
*tp
)
5975 bool real_error
= false;
5977 if (tg3_flag(tp
, ERROR_PROCESSED
))
5980 /* Check Flow Attention register */
5981 val
= tr32(HOSTCC_FLOW_ATTN
);
5982 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5983 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5987 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5988 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5992 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5993 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6002 tg3_flag_set(tp
, ERROR_PROCESSED
);
6003 tg3_reset_task_schedule(tp
);
6006 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6008 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6009 struct tg3
*tp
= tnapi
->tp
;
6011 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6014 if (sblk
->status
& SD_STATUS_ERROR
)
6015 tg3_process_error(tp
);
6019 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6021 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6024 if (unlikely(work_done
>= budget
))
6027 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6028 /* tp->last_tag is used in tg3_int_reenable() below
6029 * to tell the hw how much work has been processed,
6030 * so we must read it before checking for more work.
6032 tnapi
->last_tag
= sblk
->status_tag
;
6033 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6036 sblk
->status
&= ~SD_STATUS_UPDATED
;
6038 if (likely(!tg3_has_work(tnapi
))) {
6039 napi_complete(napi
);
6040 tg3_int_reenable(tnapi
);
6048 /* work_done is guaranteed to be less than budget. */
6049 napi_complete(napi
);
6050 tg3_reset_task_schedule(tp
);
6054 static void tg3_napi_disable(struct tg3
*tp
)
6058 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6059 napi_disable(&tp
->napi
[i
].napi
);
6062 static void tg3_napi_enable(struct tg3
*tp
)
6066 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6067 napi_enable(&tp
->napi
[i
].napi
);
6070 static void tg3_napi_init(struct tg3
*tp
)
6074 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6075 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6076 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6079 static void tg3_napi_fini(struct tg3
*tp
)
6083 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6084 netif_napi_del(&tp
->napi
[i
].napi
);
6087 static inline void tg3_netif_stop(struct tg3
*tp
)
6089 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6090 tg3_napi_disable(tp
);
6091 netif_tx_disable(tp
->dev
);
6094 static inline void tg3_netif_start(struct tg3
*tp
)
6096 /* NOTE: unconditional netif_tx_wake_all_queues is only
6097 * appropriate so long as all callers are assured to
6098 * have free tx slots (such as after tg3_init_hw)
6100 netif_tx_wake_all_queues(tp
->dev
);
6102 tg3_napi_enable(tp
);
6103 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6104 tg3_enable_ints(tp
);
6107 static void tg3_irq_quiesce(struct tg3
*tp
)
6111 BUG_ON(tp
->irq_sync
);
6116 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6117 synchronize_irq(tp
->napi
[i
].irq_vec
);
6120 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6121 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6122 * with as well. Most of the time, this is not necessary except when
6123 * shutting down the device.
6125 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6127 spin_lock_bh(&tp
->lock
);
6129 tg3_irq_quiesce(tp
);
6132 static inline void tg3_full_unlock(struct tg3
*tp
)
6134 spin_unlock_bh(&tp
->lock
);
6137 /* One-shot MSI handler - Chip automatically disables interrupt
6138 * after sending MSI so driver doesn't have to do it.
6140 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6142 struct tg3_napi
*tnapi
= dev_id
;
6143 struct tg3
*tp
= tnapi
->tp
;
6145 prefetch(tnapi
->hw_status
);
6147 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6149 if (likely(!tg3_irq_sync(tp
)))
6150 napi_schedule(&tnapi
->napi
);
6155 /* MSI ISR - No need to check for interrupt sharing and no need to
6156 * flush status block and interrupt mailbox. PCI ordering rules
6157 * guarantee that MSI will arrive after the status block.
6159 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6161 struct tg3_napi
*tnapi
= dev_id
;
6162 struct tg3
*tp
= tnapi
->tp
;
6164 prefetch(tnapi
->hw_status
);
6166 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6168 * Writing any value to intr-mbox-0 clears PCI INTA# and
6169 * chip-internal interrupt pending events.
6170 * Writing non-zero to intr-mbox-0 additional tells the
6171 * NIC to stop sending us irqs, engaging "in-intr-handler"
6174 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6175 if (likely(!tg3_irq_sync(tp
)))
6176 napi_schedule(&tnapi
->napi
);
6178 return IRQ_RETVAL(1);
6181 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6183 struct tg3_napi
*tnapi
= dev_id
;
6184 struct tg3
*tp
= tnapi
->tp
;
6185 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6186 unsigned int handled
= 1;
6188 /* In INTx mode, it is possible for the interrupt to arrive at
6189 * the CPU before the status block posted prior to the interrupt.
6190 * Reading the PCI State register will confirm whether the
6191 * interrupt is ours and will flush the status block.
6193 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6194 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6195 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6202 * Writing any value to intr-mbox-0 clears PCI INTA# and
6203 * chip-internal interrupt pending events.
6204 * Writing non-zero to intr-mbox-0 additional tells the
6205 * NIC to stop sending us irqs, engaging "in-intr-handler"
6208 * Flush the mailbox to de-assert the IRQ immediately to prevent
6209 * spurious interrupts. The flush impacts performance but
6210 * excessive spurious interrupts can be worse in some cases.
6212 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6213 if (tg3_irq_sync(tp
))
6215 sblk
->status
&= ~SD_STATUS_UPDATED
;
6216 if (likely(tg3_has_work(tnapi
))) {
6217 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6218 napi_schedule(&tnapi
->napi
);
6220 /* No work, shared interrupt perhaps? re-enable
6221 * interrupts, and flush that PCI write
6223 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6227 return IRQ_RETVAL(handled
);
6230 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6232 struct tg3_napi
*tnapi
= dev_id
;
6233 struct tg3
*tp
= tnapi
->tp
;
6234 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6235 unsigned int handled
= 1;
6237 /* In INTx mode, it is possible for the interrupt to arrive at
6238 * the CPU before the status block posted prior to the interrupt.
6239 * Reading the PCI State register will confirm whether the
6240 * interrupt is ours and will flush the status block.
6242 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6243 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6244 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6251 * writing any value to intr-mbox-0 clears PCI INTA# and
6252 * chip-internal interrupt pending events.
6253 * writing non-zero to intr-mbox-0 additional tells the
6254 * NIC to stop sending us irqs, engaging "in-intr-handler"
6257 * Flush the mailbox to de-assert the IRQ immediately to prevent
6258 * spurious interrupts. The flush impacts performance but
6259 * excessive spurious interrupts can be worse in some cases.
6261 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6264 * In a shared interrupt configuration, sometimes other devices'
6265 * interrupts will scream. We record the current status tag here
6266 * so that the above check can report that the screaming interrupts
6267 * are unhandled. Eventually they will be silenced.
6269 tnapi
->last_irq_tag
= sblk
->status_tag
;
6271 if (tg3_irq_sync(tp
))
6274 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6276 napi_schedule(&tnapi
->napi
);
6279 return IRQ_RETVAL(handled
);
6282 /* ISR for interrupt test */
6283 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6285 struct tg3_napi
*tnapi
= dev_id
;
6286 struct tg3
*tp
= tnapi
->tp
;
6287 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6289 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6290 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6291 tg3_disable_ints(tp
);
6292 return IRQ_RETVAL(1);
6294 return IRQ_RETVAL(0);
6297 static int tg3_init_hw(struct tg3
*, int);
6298 static int tg3_halt(struct tg3
*, int, int);
6300 /* Restart hardware after configuration changes, self-test, etc.
6301 * Invoked with tp->lock held.
6303 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
6304 __releases(tp
->lock
)
6305 __acquires(tp
->lock
)
6309 err
= tg3_init_hw(tp
, reset_phy
);
6312 "Failed to re-initialize device, aborting\n");
6313 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6314 tg3_full_unlock(tp
);
6315 del_timer_sync(&tp
->timer
);
6317 tg3_napi_enable(tp
);
6319 tg3_full_lock(tp
, 0);
6324 #ifdef CONFIG_NET_POLL_CONTROLLER
6325 static void tg3_poll_controller(struct net_device
*dev
)
6328 struct tg3
*tp
= netdev_priv(dev
);
6330 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6331 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6335 static void tg3_reset_task(struct work_struct
*work
)
6337 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
6340 tg3_full_lock(tp
, 0);
6342 if (!netif_running(tp
->dev
)) {
6343 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6344 tg3_full_unlock(tp
);
6348 tg3_full_unlock(tp
);
6354 tg3_full_lock(tp
, 1);
6356 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
6357 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
6358 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
6359 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
6360 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6363 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
6364 err
= tg3_init_hw(tp
, 1);
6368 tg3_netif_start(tp
);
6371 tg3_full_unlock(tp
);
6376 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6379 static void tg3_tx_timeout(struct net_device
*dev
)
6381 struct tg3
*tp
= netdev_priv(dev
);
6383 if (netif_msg_tx_err(tp
)) {
6384 netdev_err(dev
, "transmit timed out, resetting\n");
6388 tg3_reset_task_schedule(tp
);
6391 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6392 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6394 u32 base
= (u32
) mapping
& 0xffffffff;
6396 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6399 /* Test for DMA addresses > 40-bit */
6400 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6403 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6404 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6405 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6412 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6413 dma_addr_t mapping
, u32 len
, u32 flags
,
6416 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6417 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6418 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6419 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6422 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6423 dma_addr_t map
, u32 len
, u32 flags
,
6426 struct tg3
*tp
= tnapi
->tp
;
6429 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6432 if (tg3_4g_overflow_test(map
, len
))
6435 if (tg3_40bit_overflow_test(tp
, map
, len
))
6438 if (tp
->dma_limit
) {
6439 u32 prvidx
= *entry
;
6440 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6441 while (len
> tp
->dma_limit
&& *budget
) {
6442 u32 frag_len
= tp
->dma_limit
;
6443 len
-= tp
->dma_limit
;
6445 /* Avoid the 8byte DMA problem */
6447 len
+= tp
->dma_limit
/ 2;
6448 frag_len
= tp
->dma_limit
/ 2;
6451 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6453 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6454 frag_len
, tmp_flag
, mss
, vlan
);
6457 *entry
= NEXT_TX(*entry
);
6464 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6465 len
, flags
, mss
, vlan
);
6467 *entry
= NEXT_TX(*entry
);
6470 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6474 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6475 len
, flags
, mss
, vlan
);
6476 *entry
= NEXT_TX(*entry
);
6482 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6485 struct sk_buff
*skb
;
6486 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6491 pci_unmap_single(tnapi
->tp
->pdev
,
6492 dma_unmap_addr(txb
, mapping
),
6496 while (txb
->fragmented
) {
6497 txb
->fragmented
= false;
6498 entry
= NEXT_TX(entry
);
6499 txb
= &tnapi
->tx_buffers
[entry
];
6502 for (i
= 0; i
<= last
; i
++) {
6503 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6505 entry
= NEXT_TX(entry
);
6506 txb
= &tnapi
->tx_buffers
[entry
];
6508 pci_unmap_page(tnapi
->tp
->pdev
,
6509 dma_unmap_addr(txb
, mapping
),
6510 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6512 while (txb
->fragmented
) {
6513 txb
->fragmented
= false;
6514 entry
= NEXT_TX(entry
);
6515 txb
= &tnapi
->tx_buffers
[entry
];
6520 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6521 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6522 struct sk_buff
**pskb
,
6523 u32
*entry
, u32
*budget
,
6524 u32 base_flags
, u32 mss
, u32 vlan
)
6526 struct tg3
*tp
= tnapi
->tp
;
6527 struct sk_buff
*new_skb
, *skb
= *pskb
;
6528 dma_addr_t new_addr
= 0;
6531 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6532 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6534 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6536 new_skb
= skb_copy_expand(skb
,
6537 skb_headroom(skb
) + more_headroom
,
6538 skb_tailroom(skb
), GFP_ATOMIC
);
6544 /* New SKB is guaranteed to be linear. */
6545 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6547 /* Make sure the mapping succeeded */
6548 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6549 dev_kfree_skb(new_skb
);
6552 u32 save_entry
= *entry
;
6554 base_flags
|= TXD_FLAG_END
;
6556 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6557 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6560 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6561 new_skb
->len
, base_flags
,
6563 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6564 dev_kfree_skb(new_skb
);
6575 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6577 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6578 * TSO header is greater than 80 bytes.
6580 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6582 struct sk_buff
*segs
, *nskb
;
6583 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6585 /* Estimate the number of fragments in the worst case */
6586 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6587 netif_stop_queue(tp
->dev
);
6589 /* netif_tx_stop_queue() must be done before checking
6590 * checking tx index in tg3_tx_avail() below, because in
6591 * tg3_tx(), we update tx index before checking for
6592 * netif_tx_queue_stopped().
6595 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6596 return NETDEV_TX_BUSY
;
6598 netif_wake_queue(tp
->dev
);
6601 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6603 goto tg3_tso_bug_end
;
6609 tg3_start_xmit(nskb
, tp
->dev
);
6615 return NETDEV_TX_OK
;
6618 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6619 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6621 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6623 struct tg3
*tp
= netdev_priv(dev
);
6624 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6626 int i
= -1, would_hit_hwbug
;
6628 struct tg3_napi
*tnapi
;
6629 struct netdev_queue
*txq
;
6632 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6633 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6634 if (tg3_flag(tp
, ENABLE_TSS
))
6637 budget
= tg3_tx_avail(tnapi
);
6639 /* We are running in BH disabled context with netif_tx_lock
6640 * and TX reclaim runs via tp->napi.poll inside of a software
6641 * interrupt. Furthermore, IRQ processing runs lockless so we have
6642 * no IRQ context deadlocks to worry about either. Rejoice!
6644 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6645 if (!netif_tx_queue_stopped(txq
)) {
6646 netif_tx_stop_queue(txq
);
6648 /* This is a hard error, log it. */
6650 "BUG! Tx Ring full when queue awake!\n");
6652 return NETDEV_TX_BUSY
;
6655 entry
= tnapi
->tx_prod
;
6657 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6658 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6660 mss
= skb_shinfo(skb
)->gso_size
;
6663 u32 tcp_opt_len
, hdr_len
;
6665 if (skb_header_cloned(skb
) &&
6666 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6670 tcp_opt_len
= tcp_optlen(skb
);
6672 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
6674 if (!skb_is_gso_v6(skb
)) {
6676 iph
->tot_len
= htons(mss
+ hdr_len
);
6679 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6680 tg3_flag(tp
, TSO_BUG
))
6681 return tg3_tso_bug(tp
, skb
);
6683 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6684 TXD_FLAG_CPU_POST_DMA
);
6686 if (tg3_flag(tp
, HW_TSO_1
) ||
6687 tg3_flag(tp
, HW_TSO_2
) ||
6688 tg3_flag(tp
, HW_TSO_3
)) {
6689 tcp_hdr(skb
)->check
= 0;
6690 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6692 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6697 if (tg3_flag(tp
, HW_TSO_3
)) {
6698 mss
|= (hdr_len
& 0xc) << 12;
6700 base_flags
|= 0x00000010;
6701 base_flags
|= (hdr_len
& 0x3e0) << 5;
6702 } else if (tg3_flag(tp
, HW_TSO_2
))
6703 mss
|= hdr_len
<< 9;
6704 else if (tg3_flag(tp
, HW_TSO_1
) ||
6705 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6706 if (tcp_opt_len
|| iph
->ihl
> 5) {
6709 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6710 mss
|= (tsflags
<< 11);
6713 if (tcp_opt_len
|| iph
->ihl
> 5) {
6716 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6717 base_flags
|= tsflags
<< 12;
6722 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6723 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6724 base_flags
|= TXD_FLAG_JMB_PKT
;
6726 if (vlan_tx_tag_present(skb
)) {
6727 base_flags
|= TXD_FLAG_VLAN
;
6728 vlan
= vlan_tx_tag_get(skb
);
6731 len
= skb_headlen(skb
);
6733 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6734 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6738 tnapi
->tx_buffers
[entry
].skb
= skb
;
6739 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6741 would_hit_hwbug
= 0;
6743 if (tg3_flag(tp
, 5701_DMA_BUG
))
6744 would_hit_hwbug
= 1;
6746 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6747 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6749 would_hit_hwbug
= 1;
6750 /* Now loop through additional data fragments, and queue them. */
6751 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
6754 if (!tg3_flag(tp
, HW_TSO_1
) &&
6755 !tg3_flag(tp
, HW_TSO_2
) &&
6756 !tg3_flag(tp
, HW_TSO_3
))
6759 last
= skb_shinfo(skb
)->nr_frags
- 1;
6760 for (i
= 0; i
<= last
; i
++) {
6761 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6763 len
= skb_frag_size(frag
);
6764 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
6765 len
, DMA_TO_DEVICE
);
6767 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6768 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6770 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
6774 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
6776 ((i
== last
) ? TXD_FLAG_END
: 0),
6778 would_hit_hwbug
= 1;
6784 if (would_hit_hwbug
) {
6785 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6787 /* If the workaround fails due to memory/mapping
6788 * failure, silently drop this packet.
6790 entry
= tnapi
->tx_prod
;
6791 budget
= tg3_tx_avail(tnapi
);
6792 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
6793 base_flags
, mss
, vlan
))
6797 skb_tx_timestamp(skb
);
6798 netdev_tx_sent_queue(txq
, skb
->len
);
6800 /* Packets are ready, update Tx producer idx local and on card. */
6801 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6803 tnapi
->tx_prod
= entry
;
6804 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6805 netif_tx_stop_queue(txq
);
6807 /* netif_tx_stop_queue() must be done before checking
6808 * checking tx index in tg3_tx_avail() below, because in
6809 * tg3_tx(), we update tx index before checking for
6810 * netif_tx_queue_stopped().
6813 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6814 netif_tx_wake_queue(txq
);
6818 return NETDEV_TX_OK
;
6821 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
6822 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6827 return NETDEV_TX_OK
;
6830 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
6833 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
6834 MAC_MODE_PORT_MODE_MASK
);
6836 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6838 if (!tg3_flag(tp
, 5705_PLUS
))
6839 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
6841 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
6842 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6844 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6846 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6848 if (tg3_flag(tp
, 5705_PLUS
) ||
6849 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
6850 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
6851 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6854 tw32(MAC_MODE
, tp
->mac_mode
);
6858 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
6860 u32 val
, bmcr
, mac_mode
, ptest
= 0;
6862 tg3_phy_toggle_apd(tp
, false);
6863 tg3_phy_toggle_automdix(tp
, 0);
6865 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
6868 bmcr
= BMCR_FULLDPLX
;
6873 bmcr
|= BMCR_SPEED100
;
6877 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
6879 bmcr
|= BMCR_SPEED100
;
6882 bmcr
|= BMCR_SPEED1000
;
6887 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
6888 tg3_readphy(tp
, MII_CTRL1000
, &val
);
6889 val
|= CTL1000_AS_MASTER
|
6890 CTL1000_ENABLE_MASTER
;
6891 tg3_writephy(tp
, MII_CTRL1000
, val
);
6893 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
6894 MII_TG3_FET_PTEST_TRIM_2
;
6895 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
6898 bmcr
|= BMCR_LOOPBACK
;
6900 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6902 /* The write needs to be flushed for the FETs */
6903 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
6904 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6908 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
6909 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
6910 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
6911 MII_TG3_FET_PTEST_FRC_TX_LINK
|
6912 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
6914 /* The write needs to be flushed for the AC131 */
6915 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
6918 /* Reset to prevent losing 1st rx packet intermittently */
6919 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
6920 tg3_flag(tp
, 5780_CLASS
)) {
6921 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6923 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6926 mac_mode
= tp
->mac_mode
&
6927 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
6928 if (speed
== SPEED_1000
)
6929 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6931 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6933 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
6934 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
6936 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
6937 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6938 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
6939 mac_mode
|= MAC_MODE_LINK_POLARITY
;
6941 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
6942 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
6945 tw32(MAC_MODE
, mac_mode
);
6951 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
6953 struct tg3
*tp
= netdev_priv(dev
);
6955 if (features
& NETIF_F_LOOPBACK
) {
6956 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6959 spin_lock_bh(&tp
->lock
);
6960 tg3_mac_loopback(tp
, true);
6961 netif_carrier_on(tp
->dev
);
6962 spin_unlock_bh(&tp
->lock
);
6963 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6965 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6968 spin_lock_bh(&tp
->lock
);
6969 tg3_mac_loopback(tp
, false);
6970 /* Force link status check */
6971 tg3_setup_phy(tp
, 1);
6972 spin_unlock_bh(&tp
->lock
);
6973 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6977 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
6978 netdev_features_t features
)
6980 struct tg3
*tp
= netdev_priv(dev
);
6982 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6983 features
&= ~NETIF_F_ALL_TSO
;
6988 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
6990 netdev_features_t changed
= dev
->features
^ features
;
6992 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6993 tg3_set_loopback(dev
, features
);
6998 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
7003 if (new_mtu
> ETH_DATA_LEN
) {
7004 if (tg3_flag(tp
, 5780_CLASS
)) {
7005 netdev_update_features(dev
);
7006 tg3_flag_clear(tp
, TSO_CAPABLE
);
7008 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
7011 if (tg3_flag(tp
, 5780_CLASS
)) {
7012 tg3_flag_set(tp
, TSO_CAPABLE
);
7013 netdev_update_features(dev
);
7015 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
7019 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
7021 struct tg3
*tp
= netdev_priv(dev
);
7024 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
7027 if (!netif_running(dev
)) {
7028 /* We'll just catch it later when the
7031 tg3_set_mtu(dev
, tp
, new_mtu
);
7039 tg3_full_lock(tp
, 1);
7041 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7043 tg3_set_mtu(dev
, tp
, new_mtu
);
7045 err
= tg3_restart_hw(tp
, 0);
7048 tg3_netif_start(tp
);
7050 tg3_full_unlock(tp
);
7058 static void tg3_rx_prodring_free(struct tg3
*tp
,
7059 struct tg3_rx_prodring_set
*tpr
)
7063 if (tpr
!= &tp
->napi
[0].prodring
) {
7064 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7065 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7066 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7069 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7070 for (i
= tpr
->rx_jmb_cons_idx
;
7071 i
!= tpr
->rx_jmb_prod_idx
;
7072 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7073 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7081 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7082 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7085 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7086 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7087 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7092 /* Initialize rx rings for packet processing.
7094 * The chip has been shut down and the driver detached from
7095 * the networking, so no interrupts or new tx packets will
7096 * end up in the driver. tp->{tx,}lock are held and thus
7099 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7100 struct tg3_rx_prodring_set
*tpr
)
7102 u32 i
, rx_pkt_dma_sz
;
7104 tpr
->rx_std_cons_idx
= 0;
7105 tpr
->rx_std_prod_idx
= 0;
7106 tpr
->rx_jmb_cons_idx
= 0;
7107 tpr
->rx_jmb_prod_idx
= 0;
7109 if (tpr
!= &tp
->napi
[0].prodring
) {
7110 memset(&tpr
->rx_std_buffers
[0], 0,
7111 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7112 if (tpr
->rx_jmb_buffers
)
7113 memset(&tpr
->rx_jmb_buffers
[0], 0,
7114 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7118 /* Zero out all descriptors. */
7119 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7121 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7122 if (tg3_flag(tp
, 5780_CLASS
) &&
7123 tp
->dev
->mtu
> ETH_DATA_LEN
)
7124 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7125 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7127 /* Initialize invariants of the rings, we only set this
7128 * stuff once. This works because the card does not
7129 * write into the rx buffer posting rings.
7131 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7132 struct tg3_rx_buffer_desc
*rxd
;
7134 rxd
= &tpr
->rx_std
[i
];
7135 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7136 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7137 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7138 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7141 /* Now allocate fresh SKBs for each rx ring. */
7142 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7143 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
7144 netdev_warn(tp
->dev
,
7145 "Using a smaller RX standard ring. Only "
7146 "%d out of %d buffers were allocated "
7147 "successfully\n", i
, tp
->rx_pending
);
7155 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7158 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7160 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7163 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7164 struct tg3_rx_buffer_desc
*rxd
;
7166 rxd
= &tpr
->rx_jmb
[i
].std
;
7167 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7168 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7170 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7171 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7174 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7175 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
7176 netdev_warn(tp
->dev
,
7177 "Using a smaller RX jumbo ring. Only %d "
7178 "out of %d buffers were allocated "
7179 "successfully\n", i
, tp
->rx_jumbo_pending
);
7182 tp
->rx_jumbo_pending
= i
;
7191 tg3_rx_prodring_free(tp
, tpr
);
7195 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7196 struct tg3_rx_prodring_set
*tpr
)
7198 kfree(tpr
->rx_std_buffers
);
7199 tpr
->rx_std_buffers
= NULL
;
7200 kfree(tpr
->rx_jmb_buffers
);
7201 tpr
->rx_jmb_buffers
= NULL
;
7203 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7204 tpr
->rx_std
, tpr
->rx_std_mapping
);
7208 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7209 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7214 static int tg3_rx_prodring_init(struct tg3
*tp
,
7215 struct tg3_rx_prodring_set
*tpr
)
7217 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7219 if (!tpr
->rx_std_buffers
)
7222 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7223 TG3_RX_STD_RING_BYTES(tp
),
7224 &tpr
->rx_std_mapping
,
7229 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7230 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7232 if (!tpr
->rx_jmb_buffers
)
7235 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7236 TG3_RX_JMB_RING_BYTES(tp
),
7237 &tpr
->rx_jmb_mapping
,
7246 tg3_rx_prodring_fini(tp
, tpr
);
7250 /* Free up pending packets in all rx/tx rings.
7252 * The chip has been shut down and the driver detached from
7253 * the networking, so no interrupts or new tx packets will
7254 * end up in the driver. tp->{tx,}lock is not held and we are not
7255 * in an interrupt context and thus may sleep.
7257 static void tg3_free_rings(struct tg3
*tp
)
7261 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7262 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7264 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7266 if (!tnapi
->tx_buffers
)
7269 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7270 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7275 tg3_tx_skb_unmap(tnapi
, i
,
7276 skb_shinfo(skb
)->nr_frags
- 1);
7278 dev_kfree_skb_any(skb
);
7280 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7284 /* Initialize tx/rx rings for packet processing.
7286 * The chip has been shut down and the driver detached from
7287 * the networking, so no interrupts or new tx packets will
7288 * end up in the driver. tp->{tx,}lock are held and thus
7291 static int tg3_init_rings(struct tg3
*tp
)
7295 /* Free up all the SKBs. */
7298 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7299 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7301 tnapi
->last_tag
= 0;
7302 tnapi
->last_irq_tag
= 0;
7303 tnapi
->hw_status
->status
= 0;
7304 tnapi
->hw_status
->status_tag
= 0;
7305 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7310 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7312 tnapi
->rx_rcb_ptr
= 0;
7314 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7316 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7326 * Must not be invoked with interrupt sources disabled and
7327 * the hardware shutdown down.
7329 static void tg3_free_consistent(struct tg3
*tp
)
7333 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7334 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7336 if (tnapi
->tx_ring
) {
7337 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7338 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7339 tnapi
->tx_ring
= NULL
;
7342 kfree(tnapi
->tx_buffers
);
7343 tnapi
->tx_buffers
= NULL
;
7345 if (tnapi
->rx_rcb
) {
7346 dma_free_coherent(&tp
->pdev
->dev
,
7347 TG3_RX_RCB_RING_BYTES(tp
),
7349 tnapi
->rx_rcb_mapping
);
7350 tnapi
->rx_rcb
= NULL
;
7353 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7355 if (tnapi
->hw_status
) {
7356 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7358 tnapi
->status_mapping
);
7359 tnapi
->hw_status
= NULL
;
7364 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7365 tp
->hw_stats
, tp
->stats_mapping
);
7366 tp
->hw_stats
= NULL
;
7371 * Must not be invoked with interrupt sources disabled and
7372 * the hardware shutdown down. Can sleep.
7374 static int tg3_alloc_consistent(struct tg3
*tp
)
7378 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7379 sizeof(struct tg3_hw_stats
),
7385 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7387 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7388 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7389 struct tg3_hw_status
*sblk
;
7391 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7393 &tnapi
->status_mapping
,
7395 if (!tnapi
->hw_status
)
7398 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7399 sblk
= tnapi
->hw_status
;
7401 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7404 /* If multivector TSS is enabled, vector 0 does not handle
7405 * tx interrupts. Don't allocate any resources for it.
7407 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7408 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7409 tnapi
->tx_buffers
= kzalloc(
7410 sizeof(struct tg3_tx_ring_info
) *
7411 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7412 if (!tnapi
->tx_buffers
)
7415 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7417 &tnapi
->tx_desc_mapping
,
7419 if (!tnapi
->tx_ring
)
7424 * When RSS is enabled, the status block format changes
7425 * slightly. The "rx_jumbo_consumer", "reserved",
7426 * and "rx_mini_consumer" members get mapped to the
7427 * other three rx return ring producer indexes.
7431 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7434 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7437 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7440 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7445 * If multivector RSS is enabled, vector 0 does not handle
7446 * rx or tx interrupts. Don't allocate any resources for it.
7448 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7451 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7452 TG3_RX_RCB_RING_BYTES(tp
),
7453 &tnapi
->rx_rcb_mapping
,
7458 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7464 tg3_free_consistent(tp
);
7468 #define MAX_WAIT_CNT 1000
7470 /* To stop a block, clear the enable bit and poll till it
7471 * clears. tp->lock is held.
7473 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7478 if (tg3_flag(tp
, 5705_PLUS
)) {
7485 /* We can't enable/disable these bits of the
7486 * 5705/5750, just say success.
7499 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7502 if ((val
& enable_bit
) == 0)
7506 if (i
== MAX_WAIT_CNT
&& !silent
) {
7507 dev_err(&tp
->pdev
->dev
,
7508 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7516 /* tp->lock is held. */
7517 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7521 tg3_disable_ints(tp
);
7523 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7524 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7527 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7528 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7529 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7530 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7531 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7532 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7534 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7535 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7536 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7537 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7538 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7539 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7540 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7542 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7543 tw32_f(MAC_MODE
, tp
->mac_mode
);
7546 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7547 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7549 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7551 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7554 if (i
>= MAX_WAIT_CNT
) {
7555 dev_err(&tp
->pdev
->dev
,
7556 "%s timed out, TX_MODE_ENABLE will not clear "
7557 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7561 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7562 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7563 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7565 tw32(FTQ_RESET
, 0xffffffff);
7566 tw32(FTQ_RESET
, 0x00000000);
7568 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7569 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7571 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7572 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7573 if (tnapi
->hw_status
)
7574 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7580 /* Save PCI command register before chip reset */
7581 static void tg3_save_pci_state(struct tg3
*tp
)
7583 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7586 /* Restore PCI state after chip reset */
7587 static void tg3_restore_pci_state(struct tg3
*tp
)
7591 /* Re-enable indirect register accesses. */
7592 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7593 tp
->misc_host_ctrl
);
7595 /* Set MAX PCI retry to zero. */
7596 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7597 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7598 tg3_flag(tp
, PCIX_MODE
))
7599 val
|= PCISTATE_RETRY_SAME_DMA
;
7600 /* Allow reads and writes to the APE register and memory space. */
7601 if (tg3_flag(tp
, ENABLE_APE
))
7602 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7603 PCISTATE_ALLOW_APE_SHMEM_WR
|
7604 PCISTATE_ALLOW_APE_PSPACE_WR
;
7605 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7607 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7609 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
7610 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7611 tp
->pci_cacheline_sz
);
7612 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7616 /* Make sure PCI-X relaxed ordering bit is clear. */
7617 if (tg3_flag(tp
, PCIX_MODE
)) {
7620 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7622 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7623 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7627 if (tg3_flag(tp
, 5780_CLASS
)) {
7629 /* Chip reset on 5780 will reset MSI enable bit,
7630 * so need to restore it.
7632 if (tg3_flag(tp
, USING_MSI
)) {
7635 pci_read_config_word(tp
->pdev
,
7636 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7638 pci_write_config_word(tp
->pdev
,
7639 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7640 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7641 val
= tr32(MSGINT_MODE
);
7642 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7647 /* tp->lock is held. */
7648 static int tg3_chip_reset(struct tg3
*tp
)
7651 void (*write_op
)(struct tg3
*, u32
, u32
);
7656 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7658 /* No matching tg3_nvram_unlock() after this because
7659 * chip reset below will undo the nvram lock.
7661 tp
->nvram_lock_cnt
= 0;
7663 /* GRC_MISC_CFG core clock reset will clear the memory
7664 * enable bit in PCI register 4 and the MSI enable bit
7665 * on some chips, so we save relevant registers here.
7667 tg3_save_pci_state(tp
);
7669 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7670 tg3_flag(tp
, 5755_PLUS
))
7671 tw32(GRC_FASTBOOT_PC
, 0);
7674 * We must avoid the readl() that normally takes place.
7675 * It locks machines, causes machine checks, and other
7676 * fun things. So, temporarily disable the 5701
7677 * hardware workaround, while we do the reset.
7679 write_op
= tp
->write32
;
7680 if (write_op
== tg3_write_flush_reg32
)
7681 tp
->write32
= tg3_write32
;
7683 /* Prevent the irq handler from reading or writing PCI registers
7684 * during chip reset when the memory enable bit in the PCI command
7685 * register may be cleared. The chip does not generate interrupt
7686 * at this time, but the irq handler may still be called due to irq
7687 * sharing or irqpoll.
7689 tg3_flag_set(tp
, CHIP_RESETTING
);
7690 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7691 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7692 if (tnapi
->hw_status
) {
7693 tnapi
->hw_status
->status
= 0;
7694 tnapi
->hw_status
->status_tag
= 0;
7696 tnapi
->last_tag
= 0;
7697 tnapi
->last_irq_tag
= 0;
7701 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7702 synchronize_irq(tp
->napi
[i
].irq_vec
);
7704 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7705 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7706 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7710 val
= GRC_MISC_CFG_CORECLK_RESET
;
7712 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7713 /* Force PCIe 1.0a mode */
7714 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7715 !tg3_flag(tp
, 57765_PLUS
) &&
7716 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7717 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7718 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7720 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7721 tw32(GRC_MISC_CFG
, (1 << 29));
7726 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7727 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7728 tw32(GRC_VCPU_EXT_CTRL
,
7729 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7732 /* Manage gphy power for all CPMU absent PCIe devices. */
7733 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7734 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7736 tw32(GRC_MISC_CFG
, val
);
7738 /* restore 5701 hardware bug workaround write method */
7739 tp
->write32
= write_op
;
7741 /* Unfortunately, we have to delay before the PCI read back.
7742 * Some 575X chips even will not respond to a PCI cfg access
7743 * when the reset command is given to the chip.
7745 * How do these hardware designers expect things to work
7746 * properly if the PCI write is posted for a long period
7747 * of time? It is always necessary to have some method by
7748 * which a register read back can occur to push the write
7749 * out which does the reset.
7751 * For most tg3 variants the trick below was working.
7756 /* Flush PCI posted writes. The normal MMIO registers
7757 * are inaccessible at this time so this is the only
7758 * way to make this reliably (actually, this is no longer
7759 * the case, see above). I tried to use indirect
7760 * register read/write but this upset some 5701 variants.
7762 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7766 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7769 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7773 /* Wait for link training to complete. */
7774 for (i
= 0; i
< 5000; i
++)
7777 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7778 pci_write_config_dword(tp
->pdev
, 0xc4,
7779 cfg_val
| (1 << 15));
7782 /* Clear the "no snoop" and "relaxed ordering" bits. */
7783 pci_read_config_word(tp
->pdev
,
7784 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7786 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7787 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7789 * Older PCIe devices only support the 128 byte
7790 * MPS setting. Enforce the restriction.
7792 if (!tg3_flag(tp
, CPMU_PRESENT
))
7793 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7794 pci_write_config_word(tp
->pdev
,
7795 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7798 /* Clear error status */
7799 pci_write_config_word(tp
->pdev
,
7800 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7801 PCI_EXP_DEVSTA_CED
|
7802 PCI_EXP_DEVSTA_NFED
|
7803 PCI_EXP_DEVSTA_FED
|
7804 PCI_EXP_DEVSTA_URD
);
7807 tg3_restore_pci_state(tp
);
7809 tg3_flag_clear(tp
, CHIP_RESETTING
);
7810 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7813 if (tg3_flag(tp
, 5780_CLASS
))
7814 val
= tr32(MEMARB_MODE
);
7815 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7817 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7819 tw32(0x5000, 0x400);
7822 tw32(GRC_MODE
, tp
->grc_mode
);
7824 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7827 tw32(0xc4, val
| (1 << 15));
7830 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7831 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7832 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7833 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7834 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7835 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7838 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7839 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7841 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7842 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7847 tw32_f(MAC_MODE
, val
);
7850 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7852 err
= tg3_poll_fw(tp
);
7858 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7859 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7860 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7861 !tg3_flag(tp
, 57765_PLUS
)) {
7864 tw32(0x7c00, val
| (1 << 25));
7867 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7868 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7869 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7872 /* Reprobe ASF enable state. */
7873 tg3_flag_clear(tp
, ENABLE_ASF
);
7874 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7875 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7876 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7879 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7880 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7881 tg3_flag_set(tp
, ENABLE_ASF
);
7882 tp
->last_event_jiffies
= jiffies
;
7883 if (tg3_flag(tp
, 5750_PLUS
))
7884 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7891 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
7892 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
7894 /* tp->lock is held. */
7895 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7901 tg3_write_sig_pre_reset(tp
, kind
);
7903 tg3_abort_hw(tp
, silent
);
7904 err
= tg3_chip_reset(tp
);
7906 __tg3_set_mac_addr(tp
, 0);
7908 tg3_write_sig_legacy(tp
, kind
);
7909 tg3_write_sig_post_reset(tp
, kind
);
7912 /* Save the stats across chip resets... */
7913 tg3_get_nstats(tp
, &tp
->net_stats_prev
),
7914 tg3_get_estats(tp
, &tp
->estats_prev
);
7916 /* And make sure the next sample is new data */
7917 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7926 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7928 struct tg3
*tp
= netdev_priv(dev
);
7929 struct sockaddr
*addr
= p
;
7930 int err
= 0, skip_mac_1
= 0;
7932 if (!is_valid_ether_addr(addr
->sa_data
))
7935 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7937 if (!netif_running(dev
))
7940 if (tg3_flag(tp
, ENABLE_ASF
)) {
7941 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7943 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7944 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7945 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7946 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7948 /* Skip MAC addr 1 if ASF is using it. */
7949 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7950 !(addr1_high
== 0 && addr1_low
== 0))
7953 spin_lock_bh(&tp
->lock
);
7954 __tg3_set_mac_addr(tp
, skip_mac_1
);
7955 spin_unlock_bh(&tp
->lock
);
7960 /* tp->lock is held. */
7961 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7962 dma_addr_t mapping
, u32 maxlen_flags
,
7966 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7967 ((u64
) mapping
>> 32));
7969 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7970 ((u64
) mapping
& 0xffffffff));
7972 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7975 if (!tg3_flag(tp
, 5705_PLUS
))
7977 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7981 static void __tg3_set_rx_mode(struct net_device
*);
7982 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7986 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7987 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7988 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7989 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7991 tw32(HOSTCC_TXCOL_TICKS
, 0);
7992 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7993 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7996 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7997 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7998 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7999 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8001 tw32(HOSTCC_RXCOL_TICKS
, 0);
8002 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8003 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8006 if (!tg3_flag(tp
, 5705_PLUS
)) {
8007 u32 val
= ec
->stats_block_coalesce_usecs
;
8009 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8010 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8012 if (!netif_carrier_ok(tp
->dev
))
8015 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8018 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8021 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8022 tw32(reg
, ec
->rx_coalesce_usecs
);
8023 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8024 tw32(reg
, ec
->rx_max_coalesced_frames
);
8025 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8026 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8028 if (tg3_flag(tp
, ENABLE_TSS
)) {
8029 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8030 tw32(reg
, ec
->tx_coalesce_usecs
);
8031 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8032 tw32(reg
, ec
->tx_max_coalesced_frames
);
8033 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8034 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8038 for (; i
< tp
->irq_max
- 1; i
++) {
8039 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8040 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8041 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8043 if (tg3_flag(tp
, ENABLE_TSS
)) {
8044 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8045 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8046 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8051 /* tp->lock is held. */
8052 static void tg3_rings_reset(struct tg3
*tp
)
8055 u32 stblk
, txrcb
, rxrcb
, limit
;
8056 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8058 /* Disable all transmit rings but the first. */
8059 if (!tg3_flag(tp
, 5705_PLUS
))
8060 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8061 else if (tg3_flag(tp
, 5717_PLUS
))
8062 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8063 else if (tg3_flag(tp
, 57765_CLASS
))
8064 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8066 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8068 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8069 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8070 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8071 BDINFO_FLAGS_DISABLED
);
8074 /* Disable all receive return rings but the first. */
8075 if (tg3_flag(tp
, 5717_PLUS
))
8076 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8077 else if (!tg3_flag(tp
, 5705_PLUS
))
8078 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8079 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8080 tg3_flag(tp
, 57765_CLASS
))
8081 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8083 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8085 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8086 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8087 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8088 BDINFO_FLAGS_DISABLED
);
8090 /* Disable interrupts */
8091 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8092 tp
->napi
[0].chk_msi_cnt
= 0;
8093 tp
->napi
[0].last_rx_cons
= 0;
8094 tp
->napi
[0].last_tx_cons
= 0;
8096 /* Zero mailbox registers. */
8097 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8098 for (i
= 1; i
< tp
->irq_max
; i
++) {
8099 tp
->napi
[i
].tx_prod
= 0;
8100 tp
->napi
[i
].tx_cons
= 0;
8101 if (tg3_flag(tp
, ENABLE_TSS
))
8102 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8103 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8104 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8105 tp
->napi
[i
].chk_msi_cnt
= 0;
8106 tp
->napi
[i
].last_rx_cons
= 0;
8107 tp
->napi
[i
].last_tx_cons
= 0;
8109 if (!tg3_flag(tp
, ENABLE_TSS
))
8110 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8112 tp
->napi
[0].tx_prod
= 0;
8113 tp
->napi
[0].tx_cons
= 0;
8114 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8115 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8118 /* Make sure the NIC-based send BD rings are disabled. */
8119 if (!tg3_flag(tp
, 5705_PLUS
)) {
8120 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8121 for (i
= 0; i
< 16; i
++)
8122 tw32_tx_mbox(mbox
+ i
* 8, 0);
8125 txrcb
= NIC_SRAM_SEND_RCB
;
8126 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8128 /* Clear status block in ram. */
8129 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8131 /* Set status block DMA address */
8132 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8133 ((u64
) tnapi
->status_mapping
>> 32));
8134 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8135 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8137 if (tnapi
->tx_ring
) {
8138 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8139 (TG3_TX_RING_SIZE
<<
8140 BDINFO_FLAGS_MAXLEN_SHIFT
),
8141 NIC_SRAM_TX_BUFFER_DESC
);
8142 txrcb
+= TG3_BDINFO_SIZE
;
8145 if (tnapi
->rx_rcb
) {
8146 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8147 (tp
->rx_ret_ring_mask
+ 1) <<
8148 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8149 rxrcb
+= TG3_BDINFO_SIZE
;
8152 stblk
= HOSTCC_STATBLCK_RING1
;
8154 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8155 u64 mapping
= (u64
)tnapi
->status_mapping
;
8156 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8157 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8159 /* Clear status block in ram. */
8160 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8162 if (tnapi
->tx_ring
) {
8163 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8164 (TG3_TX_RING_SIZE
<<
8165 BDINFO_FLAGS_MAXLEN_SHIFT
),
8166 NIC_SRAM_TX_BUFFER_DESC
);
8167 txrcb
+= TG3_BDINFO_SIZE
;
8170 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8171 ((tp
->rx_ret_ring_mask
+ 1) <<
8172 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8175 rxrcb
+= TG3_BDINFO_SIZE
;
8179 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8181 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8183 if (!tg3_flag(tp
, 5750_PLUS
) ||
8184 tg3_flag(tp
, 5780_CLASS
) ||
8185 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8186 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8187 tg3_flag(tp
, 57765_PLUS
))
8188 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8189 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8190 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8191 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8193 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8195 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8196 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8198 val
= min(nic_rep_thresh
, host_rep_thresh
);
8199 tw32(RCVBDI_STD_THRESH
, val
);
8201 if (tg3_flag(tp
, 57765_PLUS
))
8202 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8204 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8207 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8209 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8211 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8212 tw32(RCVBDI_JUMBO_THRESH
, val
);
8214 if (tg3_flag(tp
, 57765_PLUS
))
8215 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8218 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
)
8222 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
8223 tp
->rss_ind_tbl
[i
] =
8224 ethtool_rxfh_indir_default(i
, tp
->irq_cnt
- 1);
8227 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
8231 if (!tg3_flag(tp
, SUPPORT_MSIX
))
8234 if (tp
->irq_cnt
<= 2) {
8235 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
8239 /* Validate table against current IRQ count */
8240 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8241 if (tp
->rss_ind_tbl
[i
] >= tp
->irq_cnt
- 1)
8245 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
8246 tg3_rss_init_dflt_indir_tbl(tp
);
8249 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
8252 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8254 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8255 u32 val
= tp
->rss_ind_tbl
[i
];
8257 for (; i
% 8; i
++) {
8259 val
|= tp
->rss_ind_tbl
[i
];
8266 /* tp->lock is held. */
8267 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8269 u32 val
, rdmac_mode
;
8271 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8273 tg3_disable_ints(tp
);
8277 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8279 if (tg3_flag(tp
, INIT_COMPLETE
))
8280 tg3_abort_hw(tp
, 1);
8282 /* Enable MAC control of LPI */
8283 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8284 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8285 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8286 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8288 tw32_f(TG3_CPMU_EEE_CTRL
,
8289 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8291 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8292 TG3_CPMU_EEEMD_LPI_IN_TX
|
8293 TG3_CPMU_EEEMD_LPI_IN_RX
|
8294 TG3_CPMU_EEEMD_EEE_ENABLE
;
8296 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8297 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8299 if (tg3_flag(tp
, ENABLE_APE
))
8300 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8302 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8304 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8305 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8306 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8308 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8309 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8310 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8316 err
= tg3_chip_reset(tp
);
8320 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8322 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8323 val
= tr32(TG3_CPMU_CTRL
);
8324 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8325 tw32(TG3_CPMU_CTRL
, val
);
8327 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8328 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8329 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8330 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8332 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8333 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8334 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8335 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8337 val
= tr32(TG3_CPMU_HST_ACC
);
8338 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8339 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8340 tw32(TG3_CPMU_HST_ACC
, val
);
8343 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8344 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8345 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8346 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8347 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8349 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8350 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8352 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8354 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8355 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8358 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8359 u32 grc_mode
= tr32(GRC_MODE
);
8361 /* Access the lower 1K of PL PCIE block registers. */
8362 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8363 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8365 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8366 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8367 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8369 tw32(GRC_MODE
, grc_mode
);
8372 if (tg3_flag(tp
, 57765_CLASS
)) {
8373 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8374 u32 grc_mode
= tr32(GRC_MODE
);
8376 /* Access the lower 1K of PL PCIE block registers. */
8377 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8378 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8380 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8381 TG3_PCIE_PL_LO_PHYCTL5
);
8382 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8383 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8385 tw32(GRC_MODE
, grc_mode
);
8388 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8389 u32 grc_mode
= tr32(GRC_MODE
);
8391 /* Access the lower 1K of DL PCIE block registers. */
8392 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8393 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8395 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8396 TG3_PCIE_DL_LO_FTSMAX
);
8397 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8398 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8399 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8401 tw32(GRC_MODE
, grc_mode
);
8404 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8405 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8406 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8407 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8410 /* This works around an issue with Athlon chipsets on
8411 * B3 tigon3 silicon. This bit has no effect on any
8412 * other revision. But do not set this on PCI Express
8413 * chips and don't even touch the clocks if the CPMU is present.
8415 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8416 if (!tg3_flag(tp
, PCI_EXPRESS
))
8417 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8418 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8421 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8422 tg3_flag(tp
, PCIX_MODE
)) {
8423 val
= tr32(TG3PCI_PCISTATE
);
8424 val
|= PCISTATE_RETRY_SAME_DMA
;
8425 tw32(TG3PCI_PCISTATE
, val
);
8428 if (tg3_flag(tp
, ENABLE_APE
)) {
8429 /* Allow reads and writes to the
8430 * APE register and memory space.
8432 val
= tr32(TG3PCI_PCISTATE
);
8433 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8434 PCISTATE_ALLOW_APE_SHMEM_WR
|
8435 PCISTATE_ALLOW_APE_PSPACE_WR
;
8436 tw32(TG3PCI_PCISTATE
, val
);
8439 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8440 /* Enable some hw fixes. */
8441 val
= tr32(TG3PCI_MSI_DATA
);
8442 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8443 tw32(TG3PCI_MSI_DATA
, val
);
8446 /* Descriptor ring init may make accesses to the
8447 * NIC SRAM area to setup the TX descriptors, so we
8448 * can only do this after the hardware has been
8449 * successfully reset.
8451 err
= tg3_init_rings(tp
);
8455 if (tg3_flag(tp
, 57765_PLUS
)) {
8456 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8457 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8458 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8459 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8460 if (!tg3_flag(tp
, 57765_CLASS
) &&
8461 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8462 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8463 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8464 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8465 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8466 /* This value is determined during the probe time DMA
8467 * engine test, tg3_test_dma.
8469 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8472 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8473 GRC_MODE_4X_NIC_SEND_RINGS
|
8474 GRC_MODE_NO_TX_PHDR_CSUM
|
8475 GRC_MODE_NO_RX_PHDR_CSUM
);
8476 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8478 /* Pseudo-header checksum is done by hardware logic and not
8479 * the offload processers, so make the chip do the pseudo-
8480 * header checksums on receive. For transmit it is more
8481 * convenient to do the pseudo-header checksum in software
8482 * as Linux does that on transmit for us in all cases.
8484 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8488 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8490 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8491 val
= tr32(GRC_MISC_CFG
);
8493 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8494 tw32(GRC_MISC_CFG
, val
);
8496 /* Initialize MBUF/DESC pool. */
8497 if (tg3_flag(tp
, 5750_PLUS
)) {
8499 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8500 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8501 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8502 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8504 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8505 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8506 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8507 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8510 fw_len
= tp
->fw_len
;
8511 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8512 tw32(BUFMGR_MB_POOL_ADDR
,
8513 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8514 tw32(BUFMGR_MB_POOL_SIZE
,
8515 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8518 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8519 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8520 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8521 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8522 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8523 tw32(BUFMGR_MB_HIGH_WATER
,
8524 tp
->bufmgr_config
.mbuf_high_water
);
8526 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8527 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8528 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8529 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8530 tw32(BUFMGR_MB_HIGH_WATER
,
8531 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8533 tw32(BUFMGR_DMA_LOW_WATER
,
8534 tp
->bufmgr_config
.dma_low_water
);
8535 tw32(BUFMGR_DMA_HIGH_WATER
,
8536 tp
->bufmgr_config
.dma_high_water
);
8538 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8539 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8540 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8541 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8542 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8543 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8544 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8545 tw32(BUFMGR_MODE
, val
);
8546 for (i
= 0; i
< 2000; i
++) {
8547 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8552 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8556 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8557 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8559 tg3_setup_rxbd_thresholds(tp
);
8561 /* Initialize TG3_BDINFO's at:
8562 * RCVDBDI_STD_BD: standard eth size rx ring
8563 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8564 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8567 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8568 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8569 * ring attribute flags
8570 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8572 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8573 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8575 * The size of each ring is fixed in the firmware, but the location is
8578 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8579 ((u64
) tpr
->rx_std_mapping
>> 32));
8580 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8581 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8582 if (!tg3_flag(tp
, 5717_PLUS
))
8583 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8584 NIC_SRAM_RX_BUFFER_DESC
);
8586 /* Disable the mini ring */
8587 if (!tg3_flag(tp
, 5705_PLUS
))
8588 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8589 BDINFO_FLAGS_DISABLED
);
8591 /* Program the jumbo buffer descriptor ring control
8592 * blocks on those devices that have them.
8594 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8595 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8597 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8598 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8599 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8600 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8601 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8602 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8603 BDINFO_FLAGS_MAXLEN_SHIFT
;
8604 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8605 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8606 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8607 tg3_flag(tp
, 57765_CLASS
))
8608 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8609 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8611 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8612 BDINFO_FLAGS_DISABLED
);
8615 if (tg3_flag(tp
, 57765_PLUS
)) {
8616 val
= TG3_RX_STD_RING_SIZE(tp
);
8617 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8618 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8620 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8622 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8624 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8626 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8627 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8629 tpr
->rx_jmb_prod_idx
=
8630 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8631 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8633 tg3_rings_reset(tp
);
8635 /* Initialize MAC address and backoff seed. */
8636 __tg3_set_mac_addr(tp
, 0);
8638 /* MTU + ethernet header + FCS + optional VLAN tag */
8639 tw32(MAC_RX_MTU_SIZE
,
8640 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8642 /* The slot time is changed by tg3_setup_phy if we
8643 * run at gigabit with half duplex.
8645 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8646 (6 << TX_LENGTHS_IPG_SHIFT
) |
8647 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8649 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8650 val
|= tr32(MAC_TX_LENGTHS
) &
8651 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8652 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8654 tw32(MAC_TX_LENGTHS
, val
);
8656 /* Receive rules. */
8657 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8658 tw32(RCVLPC_CONFIG
, 0x0181);
8660 /* Calculate RDMAC_MODE setting early, we need it to determine
8661 * the RCVLPC_STATE_ENABLE mask.
8663 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8664 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8665 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8666 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8667 RDMAC_MODE_LNGREAD_ENAB
);
8669 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8670 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8672 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8673 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8674 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8675 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8676 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8677 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8679 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8680 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8681 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8682 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8683 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8684 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8685 !tg3_flag(tp
, IS_5788
)) {
8686 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8690 if (tg3_flag(tp
, PCI_EXPRESS
))
8691 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
8694 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
8696 if (tg3_flag(tp
, HW_TSO_1
) ||
8697 tg3_flag(tp
, HW_TSO_2
) ||
8698 tg3_flag(tp
, HW_TSO_3
))
8699 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8701 if (tg3_flag(tp
, 57765_PLUS
) ||
8702 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8703 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8704 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8706 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8707 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8709 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8710 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8711 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8712 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8713 tg3_flag(tp
, 57765_PLUS
)) {
8714 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8715 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8716 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8717 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8718 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8719 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8720 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8721 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8722 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8724 tw32(TG3_RDMA_RSRVCTRL_REG
,
8725 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8729 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8730 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8731 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8732 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8733 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8736 /* Receive/send statistics. */
8737 if (tg3_flag(tp
, 5750_PLUS
)) {
8738 val
= tr32(RCVLPC_STATS_ENABLE
);
8739 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8740 tw32(RCVLPC_STATS_ENABLE
, val
);
8741 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8742 tg3_flag(tp
, TSO_CAPABLE
)) {
8743 val
= tr32(RCVLPC_STATS_ENABLE
);
8744 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8745 tw32(RCVLPC_STATS_ENABLE
, val
);
8747 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8749 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8750 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8751 tw32(SNDDATAI_STATSCTRL
,
8752 (SNDDATAI_SCTRL_ENABLE
|
8753 SNDDATAI_SCTRL_FASTUPD
));
8755 /* Setup host coalescing engine. */
8756 tw32(HOSTCC_MODE
, 0);
8757 for (i
= 0; i
< 2000; i
++) {
8758 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8763 __tg3_set_coalesce(tp
, &tp
->coal
);
8765 if (!tg3_flag(tp
, 5705_PLUS
)) {
8766 /* Status/statistics block address. See tg3_timer,
8767 * the tg3_periodic_fetch_stats call there, and
8768 * tg3_get_stats to see how this works for 5705/5750 chips.
8770 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8771 ((u64
) tp
->stats_mapping
>> 32));
8772 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8773 ((u64
) tp
->stats_mapping
& 0xffffffff));
8774 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8776 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8778 /* Clear statistics and status block memory areas */
8779 for (i
= NIC_SRAM_STATS_BLK
;
8780 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8782 tg3_write_mem(tp
, i
, 0);
8787 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8789 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8790 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8791 if (!tg3_flag(tp
, 5705_PLUS
))
8792 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8794 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8795 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8796 /* reset to prevent losing 1st rx packet intermittently */
8797 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8801 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8802 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8803 MAC_MODE_FHDE_ENABLE
;
8804 if (tg3_flag(tp
, ENABLE_APE
))
8805 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8806 if (!tg3_flag(tp
, 5705_PLUS
) &&
8807 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8808 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8809 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8810 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8813 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8814 * If TG3_FLAG_IS_NIC is zero, we should read the
8815 * register to preserve the GPIO settings for LOMs. The GPIOs,
8816 * whether used as inputs or outputs, are set by boot code after
8819 if (!tg3_flag(tp
, IS_NIC
)) {
8822 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8823 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8824 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8826 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8827 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8828 GRC_LCLCTRL_GPIO_OUTPUT3
;
8830 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8831 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8833 tp
->grc_local_ctrl
&= ~gpio_mask
;
8834 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8836 /* GPIO1 must be driven high for eeprom write protect */
8837 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8838 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8839 GRC_LCLCTRL_GPIO_OUTPUT1
);
8841 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8844 if (tg3_flag(tp
, USING_MSIX
)) {
8845 val
= tr32(MSGINT_MODE
);
8846 val
|= MSGINT_MODE_ENABLE
;
8847 if (tp
->irq_cnt
> 1)
8848 val
|= MSGINT_MODE_MULTIVEC_EN
;
8849 if (!tg3_flag(tp
, 1SHOT_MSI
))
8850 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
8851 tw32(MSGINT_MODE
, val
);
8854 if (!tg3_flag(tp
, 5705_PLUS
)) {
8855 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8859 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8860 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8861 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8862 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8863 WDMAC_MODE_LNGREAD_ENAB
);
8865 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8866 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8867 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8868 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8869 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8871 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8872 !tg3_flag(tp
, IS_5788
)) {
8873 val
|= WDMAC_MODE_RX_ACCEL
;
8877 /* Enable host coalescing bug fix */
8878 if (tg3_flag(tp
, 5755_PLUS
))
8879 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8881 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8882 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8884 tw32_f(WDMAC_MODE
, val
);
8887 if (tg3_flag(tp
, PCIX_MODE
)) {
8890 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8892 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8893 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8894 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8895 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8896 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8897 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8899 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8903 tw32_f(RDMAC_MODE
, rdmac_mode
);
8906 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8907 if (!tg3_flag(tp
, 5705_PLUS
))
8908 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8910 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8912 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8914 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8916 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8917 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8918 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8919 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8920 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8921 tw32(RCVDBDI_MODE
, val
);
8922 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8923 if (tg3_flag(tp
, HW_TSO_1
) ||
8924 tg3_flag(tp
, HW_TSO_2
) ||
8925 tg3_flag(tp
, HW_TSO_3
))
8926 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8927 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8928 if (tg3_flag(tp
, ENABLE_TSS
))
8929 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8930 tw32(SNDBDI_MODE
, val
);
8931 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8933 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8934 err
= tg3_load_5701_a0_firmware_fix(tp
);
8939 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8940 err
= tg3_load_tso_firmware(tp
);
8945 tp
->tx_mode
= TX_MODE_ENABLE
;
8947 if (tg3_flag(tp
, 5755_PLUS
) ||
8948 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8949 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8951 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8952 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8953 tp
->tx_mode
&= ~val
;
8954 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8957 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8960 if (tg3_flag(tp
, ENABLE_RSS
)) {
8961 tg3_rss_write_indir_tbl(tp
);
8963 /* Setup the "secret" hash key. */
8964 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8965 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8966 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8967 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8968 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8969 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8970 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8971 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8972 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8973 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8976 tp
->rx_mode
= RX_MODE_ENABLE
;
8977 if (tg3_flag(tp
, 5755_PLUS
))
8978 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8980 if (tg3_flag(tp
, ENABLE_RSS
))
8981 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8982 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8983 RX_MODE_RSS_IPV6_HASH_EN
|
8984 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8985 RX_MODE_RSS_IPV4_HASH_EN
|
8986 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8988 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8991 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8993 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8994 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8995 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8998 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9001 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9002 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9003 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9004 /* Set drive transmission level to 1.2V */
9005 /* only if the signal pre-emphasis bit is not set */
9006 val
= tr32(MAC_SERDES_CFG
);
9009 tw32(MAC_SERDES_CFG
, val
);
9011 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9012 tw32(MAC_SERDES_CFG
, 0x616000);
9015 /* Prevent chip from dropping frames when flow control
9018 if (tg3_flag(tp
, 57765_CLASS
))
9022 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9024 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9025 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9026 /* Use hardware link auto-negotiation */
9027 tg3_flag_set(tp
, HW_AUTONEG
);
9030 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9031 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9034 tmp
= tr32(SERDES_RX_CTRL
);
9035 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9036 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9037 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9038 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9041 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9042 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
9043 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9044 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
9045 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
9046 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
9049 err
= tg3_setup_phy(tp
, 0);
9053 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9054 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9057 /* Clear CRC stats. */
9058 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9059 tg3_writephy(tp
, MII_TG3_TEST1
,
9060 tmp
| MII_TG3_TEST1_CRC_EN
);
9061 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9066 __tg3_set_rx_mode(tp
->dev
);
9068 /* Initialize receive rules. */
9069 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9070 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9071 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9072 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9074 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9078 if (tg3_flag(tp
, ENABLE_ASF
))
9082 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9084 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9086 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9088 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9090 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9092 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9094 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9096 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9098 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9100 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9102 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9104 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9106 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9108 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9116 if (tg3_flag(tp
, ENABLE_APE
))
9117 /* Write our heartbeat update interval to APE. */
9118 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9119 APE_HOST_HEARTBEAT_INT_DISABLE
);
9121 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9126 /* Called at device open time to get the chip ready for
9127 * packet processing. Invoked with tp->lock held.
9129 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9131 tg3_switch_clocks(tp
);
9133 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9135 return tg3_reset_hw(tp
, reset_phy
);
9138 #define TG3_STAT_ADD32(PSTAT, REG) \
9139 do { u32 __val = tr32(REG); \
9140 (PSTAT)->low += __val; \
9141 if ((PSTAT)->low < __val) \
9142 (PSTAT)->high += 1; \
9145 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9147 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9149 if (!netif_carrier_ok(tp
->dev
))
9152 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9153 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9154 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9155 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9156 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9157 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9158 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9159 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9160 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9161 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9162 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9163 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9164 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9166 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9167 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9168 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9169 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9170 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9171 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9172 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9173 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9174 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9175 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9176 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9177 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9178 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9179 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9181 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9182 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9183 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9184 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9185 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9187 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9188 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9190 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9191 sp
->rx_discards
.low
+= val
;
9192 if (sp
->rx_discards
.low
< val
)
9193 sp
->rx_discards
.high
+= 1;
9195 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9197 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9200 static void tg3_chk_missed_msi(struct tg3
*tp
)
9204 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9205 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9207 if (tg3_has_work(tnapi
)) {
9208 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9209 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9210 if (tnapi
->chk_msi_cnt
< 1) {
9211 tnapi
->chk_msi_cnt
++;
9217 tnapi
->chk_msi_cnt
= 0;
9218 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9219 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9223 static void tg3_timer(unsigned long __opaque
)
9225 struct tg3
*tp
= (struct tg3
*) __opaque
;
9227 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9230 spin_lock(&tp
->lock
);
9232 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9233 tg3_flag(tp
, 57765_CLASS
))
9234 tg3_chk_missed_msi(tp
);
9236 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9237 /* All of this garbage is because when using non-tagged
9238 * IRQ status the mailbox/status_block protocol the chip
9239 * uses with the cpu is race prone.
9241 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9242 tw32(GRC_LOCAL_CTRL
,
9243 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9245 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9246 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9249 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9250 spin_unlock(&tp
->lock
);
9251 tg3_reset_task_schedule(tp
);
9256 /* This part only runs once per second. */
9257 if (!--tp
->timer_counter
) {
9258 if (tg3_flag(tp
, 5705_PLUS
))
9259 tg3_periodic_fetch_stats(tp
);
9261 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9262 tg3_phy_eee_enable(tp
);
9264 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9268 mac_stat
= tr32(MAC_STATUS
);
9271 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9272 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9274 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9278 tg3_setup_phy(tp
, 0);
9279 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9280 u32 mac_stat
= tr32(MAC_STATUS
);
9283 if (netif_carrier_ok(tp
->dev
) &&
9284 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9287 if (!netif_carrier_ok(tp
->dev
) &&
9288 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9289 MAC_STATUS_SIGNAL_DET
))) {
9293 if (!tp
->serdes_counter
) {
9296 ~MAC_MODE_PORT_MODE_MASK
));
9298 tw32_f(MAC_MODE
, tp
->mac_mode
);
9301 tg3_setup_phy(tp
, 0);
9303 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9304 tg3_flag(tp
, 5780_CLASS
)) {
9305 tg3_serdes_parallel_detect(tp
);
9308 tp
->timer_counter
= tp
->timer_multiplier
;
9311 /* Heartbeat is only sent once every 2 seconds.
9313 * The heartbeat is to tell the ASF firmware that the host
9314 * driver is still alive. In the event that the OS crashes,
9315 * ASF needs to reset the hardware to free up the FIFO space
9316 * that may be filled with rx packets destined for the host.
9317 * If the FIFO is full, ASF will no longer function properly.
9319 * Unintended resets have been reported on real time kernels
9320 * where the timer doesn't run on time. Netpoll will also have
9323 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9324 * to check the ring condition when the heartbeat is expiring
9325 * before doing the reset. This will prevent most unintended
9328 if (!--tp
->asf_counter
) {
9329 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9330 tg3_wait_for_event_ack(tp
);
9332 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9333 FWCMD_NICDRV_ALIVE3
);
9334 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9335 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9336 TG3_FW_UPDATE_TIMEOUT_SEC
);
9338 tg3_generate_fw_event(tp
);
9340 tp
->asf_counter
= tp
->asf_multiplier
;
9343 spin_unlock(&tp
->lock
);
9346 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9347 add_timer(&tp
->timer
);
9350 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9353 unsigned long flags
;
9355 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9357 if (tp
->irq_cnt
== 1)
9358 name
= tp
->dev
->name
;
9360 name
= &tnapi
->irq_lbl
[0];
9361 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9362 name
[IFNAMSIZ
-1] = 0;
9365 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9367 if (tg3_flag(tp
, 1SHOT_MSI
))
9372 if (tg3_flag(tp
, TAGGED_STATUS
))
9373 fn
= tg3_interrupt_tagged
;
9374 flags
= IRQF_SHARED
;
9377 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9380 static int tg3_test_interrupt(struct tg3
*tp
)
9382 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9383 struct net_device
*dev
= tp
->dev
;
9384 int err
, i
, intr_ok
= 0;
9387 if (!netif_running(dev
))
9390 tg3_disable_ints(tp
);
9392 free_irq(tnapi
->irq_vec
, tnapi
);
9395 * Turn off MSI one shot mode. Otherwise this test has no
9396 * observable way to know whether the interrupt was delivered.
9398 if (tg3_flag(tp
, 57765_PLUS
)) {
9399 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9400 tw32(MSGINT_MODE
, val
);
9403 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9404 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9408 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9409 tg3_enable_ints(tp
);
9411 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9414 for (i
= 0; i
< 5; i
++) {
9415 u32 int_mbox
, misc_host_ctrl
;
9417 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9418 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9420 if ((int_mbox
!= 0) ||
9421 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9426 if (tg3_flag(tp
, 57765_PLUS
) &&
9427 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9428 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9433 tg3_disable_ints(tp
);
9435 free_irq(tnapi
->irq_vec
, tnapi
);
9437 err
= tg3_request_irq(tp
, 0);
9443 /* Reenable MSI one shot mode. */
9444 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
9445 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9446 tw32(MSGINT_MODE
, val
);
9454 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9455 * successfully restored
9457 static int tg3_test_msi(struct tg3
*tp
)
9462 if (!tg3_flag(tp
, USING_MSI
))
9465 /* Turn off SERR reporting in case MSI terminates with Master
9468 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9469 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9470 pci_cmd
& ~PCI_COMMAND_SERR
);
9472 err
= tg3_test_interrupt(tp
);
9474 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9479 /* other failures */
9483 /* MSI test failed, go back to INTx mode */
9484 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9485 "to INTx mode. Please report this failure to the PCI "
9486 "maintainer and include system chipset information\n");
9488 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9490 pci_disable_msi(tp
->pdev
);
9492 tg3_flag_clear(tp
, USING_MSI
);
9493 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9495 err
= tg3_request_irq(tp
, 0);
9499 /* Need to reset the chip because the MSI cycle may have terminated
9500 * with Master Abort.
9502 tg3_full_lock(tp
, 1);
9504 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9505 err
= tg3_init_hw(tp
, 1);
9507 tg3_full_unlock(tp
);
9510 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9515 static int tg3_request_firmware(struct tg3
*tp
)
9517 const __be32
*fw_data
;
9519 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9520 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9525 fw_data
= (void *)tp
->fw
->data
;
9527 /* Firmware blob starts with version numbers, followed by
9528 * start address and _full_ length including BSS sections
9529 * (which must be longer than the actual data, of course
9532 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9533 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9534 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9535 tp
->fw_len
, tp
->fw_needed
);
9536 release_firmware(tp
->fw
);
9541 /* We no longer need firmware; we have it. */
9542 tp
->fw_needed
= NULL
;
9546 static bool tg3_enable_msix(struct tg3
*tp
)
9549 struct msix_entry msix_ent
[tp
->irq_max
];
9551 tp
->irq_cnt
= num_online_cpus();
9552 if (tp
->irq_cnt
> 1) {
9553 /* We want as many rx rings enabled as there are cpus.
9554 * In multiqueue MSI-X mode, the first MSI-X vector
9555 * only deals with link interrupts, etc, so we add
9556 * one to the number of vectors we are requesting.
9558 tp
->irq_cnt
= min_t(unsigned, tp
->irq_cnt
+ 1, tp
->irq_max
);
9561 for (i
= 0; i
< tp
->irq_max
; i
++) {
9562 msix_ent
[i
].entry
= i
;
9563 msix_ent
[i
].vector
= 0;
9566 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9569 } else if (rc
!= 0) {
9570 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9572 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9577 for (i
= 0; i
< tp
->irq_max
; i
++)
9578 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9580 netif_set_real_num_tx_queues(tp
->dev
, 1);
9581 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9582 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9583 pci_disable_msix(tp
->pdev
);
9587 if (tp
->irq_cnt
> 1) {
9588 tg3_flag_set(tp
, ENABLE_RSS
);
9590 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9591 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9592 tg3_flag_set(tp
, ENABLE_TSS
);
9593 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9600 static void tg3_ints_init(struct tg3
*tp
)
9602 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9603 !tg3_flag(tp
, TAGGED_STATUS
)) {
9604 /* All MSI supporting chips should support tagged
9605 * status. Assert that this is the case.
9607 netdev_warn(tp
->dev
,
9608 "MSI without TAGGED_STATUS? Not using MSI\n");
9612 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9613 tg3_flag_set(tp
, USING_MSIX
);
9614 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9615 tg3_flag_set(tp
, USING_MSI
);
9617 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9618 u32 msi_mode
= tr32(MSGINT_MODE
);
9619 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9620 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9621 if (!tg3_flag(tp
, 1SHOT_MSI
))
9622 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9623 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9626 if (!tg3_flag(tp
, USING_MSIX
)) {
9628 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9629 netif_set_real_num_tx_queues(tp
->dev
, 1);
9630 netif_set_real_num_rx_queues(tp
->dev
, 1);
9634 static void tg3_ints_fini(struct tg3
*tp
)
9636 if (tg3_flag(tp
, USING_MSIX
))
9637 pci_disable_msix(tp
->pdev
);
9638 else if (tg3_flag(tp
, USING_MSI
))
9639 pci_disable_msi(tp
->pdev
);
9640 tg3_flag_clear(tp
, USING_MSI
);
9641 tg3_flag_clear(tp
, USING_MSIX
);
9642 tg3_flag_clear(tp
, ENABLE_RSS
);
9643 tg3_flag_clear(tp
, ENABLE_TSS
);
9646 static int tg3_open(struct net_device
*dev
)
9648 struct tg3
*tp
= netdev_priv(dev
);
9651 if (tp
->fw_needed
) {
9652 err
= tg3_request_firmware(tp
);
9653 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9657 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9658 tg3_flag_clear(tp
, TSO_CAPABLE
);
9659 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9660 netdev_notice(tp
->dev
, "TSO capability restored\n");
9661 tg3_flag_set(tp
, TSO_CAPABLE
);
9665 netif_carrier_off(tp
->dev
);
9667 err
= tg3_power_up(tp
);
9671 tg3_full_lock(tp
, 0);
9673 tg3_disable_ints(tp
);
9674 tg3_flag_clear(tp
, INIT_COMPLETE
);
9676 tg3_full_unlock(tp
);
9679 * Setup interrupts first so we know how
9680 * many NAPI resources to allocate
9684 tg3_rss_check_indir_tbl(tp
);
9686 /* The placement of this call is tied
9687 * to the setup and use of Host TX descriptors.
9689 err
= tg3_alloc_consistent(tp
);
9695 tg3_napi_enable(tp
);
9697 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9698 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9699 err
= tg3_request_irq(tp
, i
);
9701 for (i
--; i
>= 0; i
--) {
9702 tnapi
= &tp
->napi
[i
];
9703 free_irq(tnapi
->irq_vec
, tnapi
);
9709 tg3_full_lock(tp
, 0);
9711 err
= tg3_init_hw(tp
, 1);
9713 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9716 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9717 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9718 !tg3_flag(tp
, 57765_CLASS
))
9719 tp
->timer_offset
= HZ
;
9721 tp
->timer_offset
= HZ
/ 10;
9723 BUG_ON(tp
->timer_offset
> HZ
);
9724 tp
->timer_counter
= tp
->timer_multiplier
=
9725 (HZ
/ tp
->timer_offset
);
9726 tp
->asf_counter
= tp
->asf_multiplier
=
9727 ((HZ
/ tp
->timer_offset
) * 2);
9729 init_timer(&tp
->timer
);
9730 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9731 tp
->timer
.data
= (unsigned long) tp
;
9732 tp
->timer
.function
= tg3_timer
;
9735 tg3_full_unlock(tp
);
9740 if (tg3_flag(tp
, USING_MSI
)) {
9741 err
= tg3_test_msi(tp
);
9744 tg3_full_lock(tp
, 0);
9745 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9747 tg3_full_unlock(tp
);
9752 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9753 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9755 tw32(PCIE_TRANSACTION_CFG
,
9756 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9762 tg3_full_lock(tp
, 0);
9764 add_timer(&tp
->timer
);
9765 tg3_flag_set(tp
, INIT_COMPLETE
);
9766 tg3_enable_ints(tp
);
9768 tg3_full_unlock(tp
);
9770 netif_tx_start_all_queues(dev
);
9773 * Reset loopback feature if it was turned on while the device was down
9774 * make sure that it's installed properly now.
9776 if (dev
->features
& NETIF_F_LOOPBACK
)
9777 tg3_set_loopback(dev
, dev
->features
);
9782 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9783 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9784 free_irq(tnapi
->irq_vec
, tnapi
);
9788 tg3_napi_disable(tp
);
9790 tg3_free_consistent(tp
);
9794 tg3_frob_aux_power(tp
, false);
9795 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9799 static int tg3_close(struct net_device
*dev
)
9802 struct tg3
*tp
= netdev_priv(dev
);
9804 tg3_napi_disable(tp
);
9805 tg3_reset_task_cancel(tp
);
9807 netif_tx_stop_all_queues(dev
);
9809 del_timer_sync(&tp
->timer
);
9813 tg3_full_lock(tp
, 1);
9815 tg3_disable_ints(tp
);
9817 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9819 tg3_flag_clear(tp
, INIT_COMPLETE
);
9821 tg3_full_unlock(tp
);
9823 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9824 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9825 free_irq(tnapi
->irq_vec
, tnapi
);
9830 /* Clear stats across close / open calls */
9831 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
9832 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
9836 tg3_free_consistent(tp
);
9840 netif_carrier_off(tp
->dev
);
9845 static inline u64
get_stat64(tg3_stat64_t
*val
)
9847 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9850 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
9852 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9854 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9855 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9856 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9859 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9860 tg3_writephy(tp
, MII_TG3_TEST1
,
9861 val
| MII_TG3_TEST1_CRC_EN
);
9862 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9866 tp
->phy_crc_errors
+= val
;
9868 return tp
->phy_crc_errors
;
9871 return get_stat64(&hw_stats
->rx_fcs_errors
);
9874 #define ESTAT_ADD(member) \
9875 estats->member = old_estats->member + \
9876 get_stat64(&hw_stats->member)
9878 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
9880 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9881 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9886 ESTAT_ADD(rx_octets
);
9887 ESTAT_ADD(rx_fragments
);
9888 ESTAT_ADD(rx_ucast_packets
);
9889 ESTAT_ADD(rx_mcast_packets
);
9890 ESTAT_ADD(rx_bcast_packets
);
9891 ESTAT_ADD(rx_fcs_errors
);
9892 ESTAT_ADD(rx_align_errors
);
9893 ESTAT_ADD(rx_xon_pause_rcvd
);
9894 ESTAT_ADD(rx_xoff_pause_rcvd
);
9895 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9896 ESTAT_ADD(rx_xoff_entered
);
9897 ESTAT_ADD(rx_frame_too_long_errors
);
9898 ESTAT_ADD(rx_jabbers
);
9899 ESTAT_ADD(rx_undersize_packets
);
9900 ESTAT_ADD(rx_in_length_errors
);
9901 ESTAT_ADD(rx_out_length_errors
);
9902 ESTAT_ADD(rx_64_or_less_octet_packets
);
9903 ESTAT_ADD(rx_65_to_127_octet_packets
);
9904 ESTAT_ADD(rx_128_to_255_octet_packets
);
9905 ESTAT_ADD(rx_256_to_511_octet_packets
);
9906 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9907 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9908 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9909 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9910 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9911 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9913 ESTAT_ADD(tx_octets
);
9914 ESTAT_ADD(tx_collisions
);
9915 ESTAT_ADD(tx_xon_sent
);
9916 ESTAT_ADD(tx_xoff_sent
);
9917 ESTAT_ADD(tx_flow_control
);
9918 ESTAT_ADD(tx_mac_errors
);
9919 ESTAT_ADD(tx_single_collisions
);
9920 ESTAT_ADD(tx_mult_collisions
);
9921 ESTAT_ADD(tx_deferred
);
9922 ESTAT_ADD(tx_excessive_collisions
);
9923 ESTAT_ADD(tx_late_collisions
);
9924 ESTAT_ADD(tx_collide_2times
);
9925 ESTAT_ADD(tx_collide_3times
);
9926 ESTAT_ADD(tx_collide_4times
);
9927 ESTAT_ADD(tx_collide_5times
);
9928 ESTAT_ADD(tx_collide_6times
);
9929 ESTAT_ADD(tx_collide_7times
);
9930 ESTAT_ADD(tx_collide_8times
);
9931 ESTAT_ADD(tx_collide_9times
);
9932 ESTAT_ADD(tx_collide_10times
);
9933 ESTAT_ADD(tx_collide_11times
);
9934 ESTAT_ADD(tx_collide_12times
);
9935 ESTAT_ADD(tx_collide_13times
);
9936 ESTAT_ADD(tx_collide_14times
);
9937 ESTAT_ADD(tx_collide_15times
);
9938 ESTAT_ADD(tx_ucast_packets
);
9939 ESTAT_ADD(tx_mcast_packets
);
9940 ESTAT_ADD(tx_bcast_packets
);
9941 ESTAT_ADD(tx_carrier_sense_errors
);
9942 ESTAT_ADD(tx_discards
);
9943 ESTAT_ADD(tx_errors
);
9945 ESTAT_ADD(dma_writeq_full
);
9946 ESTAT_ADD(dma_write_prioq_full
);
9947 ESTAT_ADD(rxbds_empty
);
9948 ESTAT_ADD(rx_discards
);
9949 ESTAT_ADD(rx_errors
);
9950 ESTAT_ADD(rx_threshold_hit
);
9952 ESTAT_ADD(dma_readq_full
);
9953 ESTAT_ADD(dma_read_prioq_full
);
9954 ESTAT_ADD(tx_comp_queue_full
);
9956 ESTAT_ADD(ring_set_send_prod_index
);
9957 ESTAT_ADD(ring_status_update
);
9958 ESTAT_ADD(nic_irqs
);
9959 ESTAT_ADD(nic_avoided_irqs
);
9960 ESTAT_ADD(nic_tx_threshold_hit
);
9962 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9965 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
9967 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9968 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9970 stats
->rx_packets
= old_stats
->rx_packets
+
9971 get_stat64(&hw_stats
->rx_ucast_packets
) +
9972 get_stat64(&hw_stats
->rx_mcast_packets
) +
9973 get_stat64(&hw_stats
->rx_bcast_packets
);
9975 stats
->tx_packets
= old_stats
->tx_packets
+
9976 get_stat64(&hw_stats
->tx_ucast_packets
) +
9977 get_stat64(&hw_stats
->tx_mcast_packets
) +
9978 get_stat64(&hw_stats
->tx_bcast_packets
);
9980 stats
->rx_bytes
= old_stats
->rx_bytes
+
9981 get_stat64(&hw_stats
->rx_octets
);
9982 stats
->tx_bytes
= old_stats
->tx_bytes
+
9983 get_stat64(&hw_stats
->tx_octets
);
9985 stats
->rx_errors
= old_stats
->rx_errors
+
9986 get_stat64(&hw_stats
->rx_errors
);
9987 stats
->tx_errors
= old_stats
->tx_errors
+
9988 get_stat64(&hw_stats
->tx_errors
) +
9989 get_stat64(&hw_stats
->tx_mac_errors
) +
9990 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9991 get_stat64(&hw_stats
->tx_discards
);
9993 stats
->multicast
= old_stats
->multicast
+
9994 get_stat64(&hw_stats
->rx_mcast_packets
);
9995 stats
->collisions
= old_stats
->collisions
+
9996 get_stat64(&hw_stats
->tx_collisions
);
9998 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9999 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10000 get_stat64(&hw_stats
->rx_undersize_packets
);
10002 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10003 get_stat64(&hw_stats
->rxbds_empty
);
10004 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10005 get_stat64(&hw_stats
->rx_align_errors
);
10006 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10007 get_stat64(&hw_stats
->tx_discards
);
10008 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10009 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10011 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10012 tg3_calc_crc_errors(tp
);
10014 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10015 get_stat64(&hw_stats
->rx_discards
);
10017 stats
->rx_dropped
= tp
->rx_dropped
;
10018 stats
->tx_dropped
= tp
->tx_dropped
;
10021 static inline u32
calc_crc(unsigned char *buf
, int len
)
10029 for (j
= 0; j
< len
; j
++) {
10032 for (k
= 0; k
< 8; k
++) {
10045 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
10047 /* accept or reject all multicast frames */
10048 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
10049 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
10050 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
10051 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
10054 static void __tg3_set_rx_mode(struct net_device
*dev
)
10056 struct tg3
*tp
= netdev_priv(dev
);
10059 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
10060 RX_MODE_KEEP_VLAN_TAG
);
10062 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10063 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10066 if (!tg3_flag(tp
, ENABLE_ASF
))
10067 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
10070 if (dev
->flags
& IFF_PROMISC
) {
10071 /* Promiscuous mode. */
10072 rx_mode
|= RX_MODE_PROMISC
;
10073 } else if (dev
->flags
& IFF_ALLMULTI
) {
10074 /* Accept all multicast. */
10075 tg3_set_multi(tp
, 1);
10076 } else if (netdev_mc_empty(dev
)) {
10077 /* Reject all multicast. */
10078 tg3_set_multi(tp
, 0);
10080 /* Accept one or more multicast(s). */
10081 struct netdev_hw_addr
*ha
;
10082 u32 mc_filter
[4] = { 0, };
10087 netdev_for_each_mc_addr(ha
, dev
) {
10088 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
10090 regidx
= (bit
& 0x60) >> 5;
10092 mc_filter
[regidx
] |= (1 << bit
);
10095 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
10096 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
10097 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
10098 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
10101 if (rx_mode
!= tp
->rx_mode
) {
10102 tp
->rx_mode
= rx_mode
;
10103 tw32_f(MAC_RX_MODE
, rx_mode
);
10108 static void tg3_set_rx_mode(struct net_device
*dev
)
10110 struct tg3
*tp
= netdev_priv(dev
);
10112 if (!netif_running(dev
))
10115 tg3_full_lock(tp
, 0);
10116 __tg3_set_rx_mode(dev
);
10117 tg3_full_unlock(tp
);
10120 static int tg3_get_regs_len(struct net_device
*dev
)
10122 return TG3_REG_BLK_SIZE
;
10125 static void tg3_get_regs(struct net_device
*dev
,
10126 struct ethtool_regs
*regs
, void *_p
)
10128 struct tg3
*tp
= netdev_priv(dev
);
10132 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10134 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10137 tg3_full_lock(tp
, 0);
10139 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10141 tg3_full_unlock(tp
);
10144 static int tg3_get_eeprom_len(struct net_device
*dev
)
10146 struct tg3
*tp
= netdev_priv(dev
);
10148 return tp
->nvram_size
;
10151 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10153 struct tg3
*tp
= netdev_priv(dev
);
10156 u32 i
, offset
, len
, b_offset
, b_count
;
10159 if (tg3_flag(tp
, NO_NVRAM
))
10162 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10165 offset
= eeprom
->offset
;
10169 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10172 /* adjustments to start on required 4 byte boundary */
10173 b_offset
= offset
& 3;
10174 b_count
= 4 - b_offset
;
10175 if (b_count
> len
) {
10176 /* i.e. offset=1 len=2 */
10179 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10182 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10185 eeprom
->len
+= b_count
;
10188 /* read bytes up to the last 4 byte boundary */
10189 pd
= &data
[eeprom
->len
];
10190 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10191 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10196 memcpy(pd
+ i
, &val
, 4);
10201 /* read last bytes not ending on 4 byte boundary */
10202 pd
= &data
[eeprom
->len
];
10204 b_offset
= offset
+ len
- b_count
;
10205 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10208 memcpy(pd
, &val
, b_count
);
10209 eeprom
->len
+= b_count
;
10214 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10216 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10218 struct tg3
*tp
= netdev_priv(dev
);
10220 u32 offset
, len
, b_offset
, odd_len
;
10224 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10227 if (tg3_flag(tp
, NO_NVRAM
) ||
10228 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10231 offset
= eeprom
->offset
;
10234 if ((b_offset
= (offset
& 3))) {
10235 /* adjustments to start on required 4 byte boundary */
10236 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10247 /* adjustments to end on required 4 byte boundary */
10249 len
= (len
+ 3) & ~3;
10250 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10256 if (b_offset
|| odd_len
) {
10257 buf
= kmalloc(len
, GFP_KERNEL
);
10261 memcpy(buf
, &start
, 4);
10263 memcpy(buf
+len
-4, &end
, 4);
10264 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10267 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10275 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10277 struct tg3
*tp
= netdev_priv(dev
);
10279 if (tg3_flag(tp
, USE_PHYLIB
)) {
10280 struct phy_device
*phydev
;
10281 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10283 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10284 return phy_ethtool_gset(phydev
, cmd
);
10287 cmd
->supported
= (SUPPORTED_Autoneg
);
10289 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10290 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10291 SUPPORTED_1000baseT_Full
);
10293 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10294 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10295 SUPPORTED_100baseT_Full
|
10296 SUPPORTED_10baseT_Half
|
10297 SUPPORTED_10baseT_Full
|
10299 cmd
->port
= PORT_TP
;
10301 cmd
->supported
|= SUPPORTED_FIBRE
;
10302 cmd
->port
= PORT_FIBRE
;
10305 cmd
->advertising
= tp
->link_config
.advertising
;
10306 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10307 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10308 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10309 cmd
->advertising
|= ADVERTISED_Pause
;
10311 cmd
->advertising
|= ADVERTISED_Pause
|
10312 ADVERTISED_Asym_Pause
;
10314 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10315 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10318 if (netif_running(dev
) && netif_carrier_ok(dev
)) {
10319 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10320 cmd
->duplex
= tp
->link_config
.active_duplex
;
10321 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
10322 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10323 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
10324 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
10326 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
10329 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10330 cmd
->duplex
= DUPLEX_INVALID
;
10331 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
10333 cmd
->phy_address
= tp
->phy_addr
;
10334 cmd
->transceiver
= XCVR_INTERNAL
;
10335 cmd
->autoneg
= tp
->link_config
.autoneg
;
10341 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10343 struct tg3
*tp
= netdev_priv(dev
);
10344 u32 speed
= ethtool_cmd_speed(cmd
);
10346 if (tg3_flag(tp
, USE_PHYLIB
)) {
10347 struct phy_device
*phydev
;
10348 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10350 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10351 return phy_ethtool_sset(phydev
, cmd
);
10354 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10355 cmd
->autoneg
!= AUTONEG_DISABLE
)
10358 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10359 cmd
->duplex
!= DUPLEX_FULL
&&
10360 cmd
->duplex
!= DUPLEX_HALF
)
10363 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10364 u32 mask
= ADVERTISED_Autoneg
|
10366 ADVERTISED_Asym_Pause
;
10368 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10369 mask
|= ADVERTISED_1000baseT_Half
|
10370 ADVERTISED_1000baseT_Full
;
10372 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10373 mask
|= ADVERTISED_100baseT_Half
|
10374 ADVERTISED_100baseT_Full
|
10375 ADVERTISED_10baseT_Half
|
10376 ADVERTISED_10baseT_Full
|
10379 mask
|= ADVERTISED_FIBRE
;
10381 if (cmd
->advertising
& ~mask
)
10384 mask
&= (ADVERTISED_1000baseT_Half
|
10385 ADVERTISED_1000baseT_Full
|
10386 ADVERTISED_100baseT_Half
|
10387 ADVERTISED_100baseT_Full
|
10388 ADVERTISED_10baseT_Half
|
10389 ADVERTISED_10baseT_Full
);
10391 cmd
->advertising
&= mask
;
10393 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10394 if (speed
!= SPEED_1000
)
10397 if (cmd
->duplex
!= DUPLEX_FULL
)
10400 if (speed
!= SPEED_100
&&
10406 tg3_full_lock(tp
, 0);
10408 tp
->link_config
.autoneg
= cmd
->autoneg
;
10409 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10410 tp
->link_config
.advertising
= (cmd
->advertising
|
10411 ADVERTISED_Autoneg
);
10412 tp
->link_config
.speed
= SPEED_INVALID
;
10413 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10415 tp
->link_config
.advertising
= 0;
10416 tp
->link_config
.speed
= speed
;
10417 tp
->link_config
.duplex
= cmd
->duplex
;
10420 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10421 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10422 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10424 if (netif_running(dev
))
10425 tg3_setup_phy(tp
, 1);
10427 tg3_full_unlock(tp
);
10432 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10434 struct tg3
*tp
= netdev_priv(dev
);
10436 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
10437 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
10438 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
10439 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
10442 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10444 struct tg3
*tp
= netdev_priv(dev
);
10446 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10447 wol
->supported
= WAKE_MAGIC
;
10449 wol
->supported
= 0;
10451 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10452 wol
->wolopts
= WAKE_MAGIC
;
10453 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10456 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10458 struct tg3
*tp
= netdev_priv(dev
);
10459 struct device
*dp
= &tp
->pdev
->dev
;
10461 if (wol
->wolopts
& ~WAKE_MAGIC
)
10463 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10464 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10467 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10469 spin_lock_bh(&tp
->lock
);
10470 if (device_may_wakeup(dp
))
10471 tg3_flag_set(tp
, WOL_ENABLE
);
10473 tg3_flag_clear(tp
, WOL_ENABLE
);
10474 spin_unlock_bh(&tp
->lock
);
10479 static u32
tg3_get_msglevel(struct net_device
*dev
)
10481 struct tg3
*tp
= netdev_priv(dev
);
10482 return tp
->msg_enable
;
10485 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10487 struct tg3
*tp
= netdev_priv(dev
);
10488 tp
->msg_enable
= value
;
10491 static int tg3_nway_reset(struct net_device
*dev
)
10493 struct tg3
*tp
= netdev_priv(dev
);
10496 if (!netif_running(dev
))
10499 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10502 if (tg3_flag(tp
, USE_PHYLIB
)) {
10503 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10505 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10509 spin_lock_bh(&tp
->lock
);
10511 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10512 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10513 ((bmcr
& BMCR_ANENABLE
) ||
10514 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10515 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10519 spin_unlock_bh(&tp
->lock
);
10525 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10527 struct tg3
*tp
= netdev_priv(dev
);
10529 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10530 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10531 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10533 ering
->rx_jumbo_max_pending
= 0;
10535 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10537 ering
->rx_pending
= tp
->rx_pending
;
10538 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10539 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10541 ering
->rx_jumbo_pending
= 0;
10543 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10546 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10548 struct tg3
*tp
= netdev_priv(dev
);
10549 int i
, irq_sync
= 0, err
= 0;
10551 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10552 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10553 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10554 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10555 (tg3_flag(tp
, TSO_BUG
) &&
10556 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10559 if (netif_running(dev
)) {
10561 tg3_netif_stop(tp
);
10565 tg3_full_lock(tp
, irq_sync
);
10567 tp
->rx_pending
= ering
->rx_pending
;
10569 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10570 tp
->rx_pending
> 63)
10571 tp
->rx_pending
= 63;
10572 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10574 for (i
= 0; i
< tp
->irq_max
; i
++)
10575 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10577 if (netif_running(dev
)) {
10578 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10579 err
= tg3_restart_hw(tp
, 1);
10581 tg3_netif_start(tp
);
10584 tg3_full_unlock(tp
);
10586 if (irq_sync
&& !err
)
10592 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10594 struct tg3
*tp
= netdev_priv(dev
);
10596 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10598 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
10599 epause
->rx_pause
= 1;
10601 epause
->rx_pause
= 0;
10603 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
10604 epause
->tx_pause
= 1;
10606 epause
->tx_pause
= 0;
10609 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10611 struct tg3
*tp
= netdev_priv(dev
);
10614 if (tg3_flag(tp
, USE_PHYLIB
)) {
10616 struct phy_device
*phydev
;
10618 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10620 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10621 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10622 (epause
->rx_pause
!= epause
->tx_pause
)))
10625 tp
->link_config
.flowctrl
= 0;
10626 if (epause
->rx_pause
) {
10627 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10629 if (epause
->tx_pause
) {
10630 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10631 newadv
= ADVERTISED_Pause
;
10633 newadv
= ADVERTISED_Pause
|
10634 ADVERTISED_Asym_Pause
;
10635 } else if (epause
->tx_pause
) {
10636 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10637 newadv
= ADVERTISED_Asym_Pause
;
10641 if (epause
->autoneg
)
10642 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10644 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10646 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10647 u32 oldadv
= phydev
->advertising
&
10648 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10649 if (oldadv
!= newadv
) {
10650 phydev
->advertising
&=
10651 ~(ADVERTISED_Pause
|
10652 ADVERTISED_Asym_Pause
);
10653 phydev
->advertising
|= newadv
;
10654 if (phydev
->autoneg
) {
10656 * Always renegotiate the link to
10657 * inform our link partner of our
10658 * flow control settings, even if the
10659 * flow control is forced. Let
10660 * tg3_adjust_link() do the final
10661 * flow control setup.
10663 return phy_start_aneg(phydev
);
10667 if (!epause
->autoneg
)
10668 tg3_setup_flow_control(tp
, 0, 0);
10670 tp
->link_config
.orig_advertising
&=
10671 ~(ADVERTISED_Pause
|
10672 ADVERTISED_Asym_Pause
);
10673 tp
->link_config
.orig_advertising
|= newadv
;
10678 if (netif_running(dev
)) {
10679 tg3_netif_stop(tp
);
10683 tg3_full_lock(tp
, irq_sync
);
10685 if (epause
->autoneg
)
10686 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10688 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10689 if (epause
->rx_pause
)
10690 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10692 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10693 if (epause
->tx_pause
)
10694 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10696 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10698 if (netif_running(dev
)) {
10699 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10700 err
= tg3_restart_hw(tp
, 1);
10702 tg3_netif_start(tp
);
10705 tg3_full_unlock(tp
);
10711 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10715 return TG3_NUM_TEST
;
10717 return TG3_NUM_STATS
;
10719 return -EOPNOTSUPP
;
10723 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
10724 u32
*rules __always_unused
)
10726 struct tg3
*tp
= netdev_priv(dev
);
10728 if (!tg3_flag(tp
, SUPPORT_MSIX
))
10729 return -EOPNOTSUPP
;
10731 switch (info
->cmd
) {
10732 case ETHTOOL_GRXRINGS
:
10733 if (netif_running(tp
->dev
))
10734 info
->data
= tp
->irq_cnt
;
10736 info
->data
= num_online_cpus();
10737 if (info
->data
> TG3_IRQ_MAX_VECS_RSS
)
10738 info
->data
= TG3_IRQ_MAX_VECS_RSS
;
10741 /* The first interrupt vector only
10742 * handles link interrupts.
10748 return -EOPNOTSUPP
;
10752 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
10755 struct tg3
*tp
= netdev_priv(dev
);
10757 if (tg3_flag(tp
, SUPPORT_MSIX
))
10758 size
= TG3_RSS_INDIR_TBL_SIZE
;
10763 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
10765 struct tg3
*tp
= netdev_priv(dev
);
10768 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
10769 indir
[i
] = tp
->rss_ind_tbl
[i
];
10774 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
10776 struct tg3
*tp
= netdev_priv(dev
);
10779 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
10780 tp
->rss_ind_tbl
[i
] = indir
[i
];
10782 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
10785 /* It is legal to write the indirection
10786 * table while the device is running.
10788 tg3_full_lock(tp
, 0);
10789 tg3_rss_write_indir_tbl(tp
);
10790 tg3_full_unlock(tp
);
10795 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10797 switch (stringset
) {
10799 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10802 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10805 WARN_ON(1); /* we need a WARN() */
10810 static int tg3_set_phys_id(struct net_device
*dev
,
10811 enum ethtool_phys_id_state state
)
10813 struct tg3
*tp
= netdev_priv(dev
);
10815 if (!netif_running(tp
->dev
))
10819 case ETHTOOL_ID_ACTIVE
:
10820 return 1; /* cycle on/off once per second */
10822 case ETHTOOL_ID_ON
:
10823 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10824 LED_CTRL_1000MBPS_ON
|
10825 LED_CTRL_100MBPS_ON
|
10826 LED_CTRL_10MBPS_ON
|
10827 LED_CTRL_TRAFFIC_OVERRIDE
|
10828 LED_CTRL_TRAFFIC_BLINK
|
10829 LED_CTRL_TRAFFIC_LED
);
10832 case ETHTOOL_ID_OFF
:
10833 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10834 LED_CTRL_TRAFFIC_OVERRIDE
);
10837 case ETHTOOL_ID_INACTIVE
:
10838 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10845 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10846 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10848 struct tg3
*tp
= netdev_priv(dev
);
10850 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
10853 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10857 u32 offset
= 0, len
= 0;
10860 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10863 if (magic
== TG3_EEPROM_MAGIC
) {
10864 for (offset
= TG3_NVM_DIR_START
;
10865 offset
< TG3_NVM_DIR_END
;
10866 offset
+= TG3_NVM_DIRENT_SIZE
) {
10867 if (tg3_nvram_read(tp
, offset
, &val
))
10870 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10871 TG3_NVM_DIRTYPE_EXTVPD
)
10875 if (offset
!= TG3_NVM_DIR_END
) {
10876 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10877 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10880 offset
= tg3_nvram_logical_addr(tp
, offset
);
10884 if (!offset
|| !len
) {
10885 offset
= TG3_NVM_VPD_OFF
;
10886 len
= TG3_NVM_VPD_LEN
;
10889 buf
= kmalloc(len
, GFP_KERNEL
);
10893 if (magic
== TG3_EEPROM_MAGIC
) {
10894 for (i
= 0; i
< len
; i
+= 4) {
10895 /* The data is in little-endian format in NVRAM.
10896 * Use the big-endian read routines to preserve
10897 * the byte order as it exists in NVRAM.
10899 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10905 unsigned int pos
= 0;
10907 ptr
= (u8
*)&buf
[0];
10908 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10909 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10911 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10929 #define NVRAM_TEST_SIZE 0x100
10930 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10931 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10932 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10933 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10934 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10935 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10936 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10937 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10939 static int tg3_test_nvram(struct tg3
*tp
)
10941 u32 csum
, magic
, len
;
10943 int i
, j
, k
, err
= 0, size
;
10945 if (tg3_flag(tp
, NO_NVRAM
))
10948 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10951 if (magic
== TG3_EEPROM_MAGIC
)
10952 size
= NVRAM_TEST_SIZE
;
10953 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10954 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10955 TG3_EEPROM_SB_FORMAT_1
) {
10956 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10957 case TG3_EEPROM_SB_REVISION_0
:
10958 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10960 case TG3_EEPROM_SB_REVISION_2
:
10961 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10963 case TG3_EEPROM_SB_REVISION_3
:
10964 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10966 case TG3_EEPROM_SB_REVISION_4
:
10967 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10969 case TG3_EEPROM_SB_REVISION_5
:
10970 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10972 case TG3_EEPROM_SB_REVISION_6
:
10973 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10980 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10981 size
= NVRAM_SELFBOOT_HW_SIZE
;
10985 buf
= kmalloc(size
, GFP_KERNEL
);
10990 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10991 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10998 /* Selfboot format */
10999 magic
= be32_to_cpu(buf
[0]);
11000 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
11001 TG3_EEPROM_MAGIC_FW
) {
11002 u8
*buf8
= (u8
*) buf
, csum8
= 0;
11004 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
11005 TG3_EEPROM_SB_REVISION_2
) {
11006 /* For rev 2, the csum doesn't include the MBA. */
11007 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
11009 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
11012 for (i
= 0; i
< size
; i
++)
11025 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
11026 TG3_EEPROM_MAGIC_HW
) {
11027 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
11028 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
11029 u8
*buf8
= (u8
*) buf
;
11031 /* Separate the parity bits and the data bytes. */
11032 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
11033 if ((i
== 0) || (i
== 8)) {
11037 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
11038 parity
[k
++] = buf8
[i
] & msk
;
11040 } else if (i
== 16) {
11044 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
11045 parity
[k
++] = buf8
[i
] & msk
;
11048 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11049 parity
[k
++] = buf8
[i
] & msk
;
11052 data
[j
++] = buf8
[i
];
11056 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11057 u8 hw8
= hweight8(data
[i
]);
11059 if ((hw8
& 0x1) && parity
[i
])
11061 else if (!(hw8
& 0x1) && !parity
[i
])
11070 /* Bootstrap checksum at offset 0x10 */
11071 csum
= calc_crc((unsigned char *) buf
, 0x10);
11072 if (csum
!= le32_to_cpu(buf
[0x10/4]))
11075 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11076 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
11077 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11082 buf
= tg3_vpd_readblock(tp
, &len
);
11086 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11088 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11092 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11095 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11096 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11097 PCI_VPD_RO_KEYWORD_CHKSUM
);
11101 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11103 for (i
= 0; i
<= j
; i
++)
11104 csum8
+= ((u8
*)buf
)[i
];
11118 #define TG3_SERDES_TIMEOUT_SEC 2
11119 #define TG3_COPPER_TIMEOUT_SEC 6
11121 static int tg3_test_link(struct tg3
*tp
)
11125 if (!netif_running(tp
->dev
))
11128 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11129 max
= TG3_SERDES_TIMEOUT_SEC
;
11131 max
= TG3_COPPER_TIMEOUT_SEC
;
11133 for (i
= 0; i
< max
; i
++) {
11134 if (netif_carrier_ok(tp
->dev
))
11137 if (msleep_interruptible(1000))
11144 /* Only test the commonly used registers */
11145 static int tg3_test_registers(struct tg3
*tp
)
11147 int i
, is_5705
, is_5750
;
11148 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11152 #define TG3_FL_5705 0x1
11153 #define TG3_FL_NOT_5705 0x2
11154 #define TG3_FL_NOT_5788 0x4
11155 #define TG3_FL_NOT_5750 0x8
11159 /* MAC Control Registers */
11160 { MAC_MODE
, TG3_FL_NOT_5705
,
11161 0x00000000, 0x00ef6f8c },
11162 { MAC_MODE
, TG3_FL_5705
,
11163 0x00000000, 0x01ef6b8c },
11164 { MAC_STATUS
, TG3_FL_NOT_5705
,
11165 0x03800107, 0x00000000 },
11166 { MAC_STATUS
, TG3_FL_5705
,
11167 0x03800100, 0x00000000 },
11168 { MAC_ADDR_0_HIGH
, 0x0000,
11169 0x00000000, 0x0000ffff },
11170 { MAC_ADDR_0_LOW
, 0x0000,
11171 0x00000000, 0xffffffff },
11172 { MAC_RX_MTU_SIZE
, 0x0000,
11173 0x00000000, 0x0000ffff },
11174 { MAC_TX_MODE
, 0x0000,
11175 0x00000000, 0x00000070 },
11176 { MAC_TX_LENGTHS
, 0x0000,
11177 0x00000000, 0x00003fff },
11178 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11179 0x00000000, 0x000007fc },
11180 { MAC_RX_MODE
, TG3_FL_5705
,
11181 0x00000000, 0x000007dc },
11182 { MAC_HASH_REG_0
, 0x0000,
11183 0x00000000, 0xffffffff },
11184 { MAC_HASH_REG_1
, 0x0000,
11185 0x00000000, 0xffffffff },
11186 { MAC_HASH_REG_2
, 0x0000,
11187 0x00000000, 0xffffffff },
11188 { MAC_HASH_REG_3
, 0x0000,
11189 0x00000000, 0xffffffff },
11191 /* Receive Data and Receive BD Initiator Control Registers. */
11192 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11193 0x00000000, 0xffffffff },
11194 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11195 0x00000000, 0xffffffff },
11196 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11197 0x00000000, 0x00000003 },
11198 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11199 0x00000000, 0xffffffff },
11200 { RCVDBDI_STD_BD
+0, 0x0000,
11201 0x00000000, 0xffffffff },
11202 { RCVDBDI_STD_BD
+4, 0x0000,
11203 0x00000000, 0xffffffff },
11204 { RCVDBDI_STD_BD
+8, 0x0000,
11205 0x00000000, 0xffff0002 },
11206 { RCVDBDI_STD_BD
+0xc, 0x0000,
11207 0x00000000, 0xffffffff },
11209 /* Receive BD Initiator Control Registers. */
11210 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11211 0x00000000, 0xffffffff },
11212 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11213 0x00000000, 0x000003ff },
11214 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11215 0x00000000, 0xffffffff },
11217 /* Host Coalescing Control Registers. */
11218 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11219 0x00000000, 0x00000004 },
11220 { HOSTCC_MODE
, TG3_FL_5705
,
11221 0x00000000, 0x000000f6 },
11222 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11223 0x00000000, 0xffffffff },
11224 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11225 0x00000000, 0x000003ff },
11226 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11227 0x00000000, 0xffffffff },
11228 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11229 0x00000000, 0x000003ff },
11230 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11231 0x00000000, 0xffffffff },
11232 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11233 0x00000000, 0x000000ff },
11234 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11235 0x00000000, 0xffffffff },
11236 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11237 0x00000000, 0x000000ff },
11238 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11239 0x00000000, 0xffffffff },
11240 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11241 0x00000000, 0xffffffff },
11242 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11243 0x00000000, 0xffffffff },
11244 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11245 0x00000000, 0x000000ff },
11246 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11247 0x00000000, 0xffffffff },
11248 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11249 0x00000000, 0x000000ff },
11250 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11251 0x00000000, 0xffffffff },
11252 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11253 0x00000000, 0xffffffff },
11254 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11255 0x00000000, 0xffffffff },
11256 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11257 0x00000000, 0xffffffff },
11258 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11259 0x00000000, 0xffffffff },
11260 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11261 0xffffffff, 0x00000000 },
11262 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11263 0xffffffff, 0x00000000 },
11265 /* Buffer Manager Control Registers. */
11266 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11267 0x00000000, 0x007fff80 },
11268 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11269 0x00000000, 0x007fffff },
11270 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11271 0x00000000, 0x0000003f },
11272 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11273 0x00000000, 0x000001ff },
11274 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11275 0x00000000, 0x000001ff },
11276 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11277 0xffffffff, 0x00000000 },
11278 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11279 0xffffffff, 0x00000000 },
11281 /* Mailbox Registers */
11282 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11283 0x00000000, 0x000001ff },
11284 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11285 0x00000000, 0x000001ff },
11286 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11287 0x00000000, 0x000007ff },
11288 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11289 0x00000000, 0x000001ff },
11291 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11294 is_5705
= is_5750
= 0;
11295 if (tg3_flag(tp
, 5705_PLUS
)) {
11297 if (tg3_flag(tp
, 5750_PLUS
))
11301 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11302 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11305 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11308 if (tg3_flag(tp
, IS_5788
) &&
11309 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11312 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11315 offset
= (u32
) reg_tbl
[i
].offset
;
11316 read_mask
= reg_tbl
[i
].read_mask
;
11317 write_mask
= reg_tbl
[i
].write_mask
;
11319 /* Save the original register content */
11320 save_val
= tr32(offset
);
11322 /* Determine the read-only value. */
11323 read_val
= save_val
& read_mask
;
11325 /* Write zero to the register, then make sure the read-only bits
11326 * are not changed and the read/write bits are all zeros.
11330 val
= tr32(offset
);
11332 /* Test the read-only and read/write bits. */
11333 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11336 /* Write ones to all the bits defined by RdMask and WrMask, then
11337 * make sure the read-only bits are not changed and the
11338 * read/write bits are all ones.
11340 tw32(offset
, read_mask
| write_mask
);
11342 val
= tr32(offset
);
11344 /* Test the read-only bits. */
11345 if ((val
& read_mask
) != read_val
)
11348 /* Test the read/write bits. */
11349 if ((val
& write_mask
) != write_mask
)
11352 tw32(offset
, save_val
);
11358 if (netif_msg_hw(tp
))
11359 netdev_err(tp
->dev
,
11360 "Register test failed at offset %x\n", offset
);
11361 tw32(offset
, save_val
);
11365 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11367 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11371 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11372 for (j
= 0; j
< len
; j
+= 4) {
11375 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11376 tg3_read_mem(tp
, offset
+ j
, &val
);
11377 if (val
!= test_pattern
[i
])
11384 static int tg3_test_memory(struct tg3
*tp
)
11386 static struct mem_entry
{
11389 } mem_tbl_570x
[] = {
11390 { 0x00000000, 0x00b50},
11391 { 0x00002000, 0x1c000},
11392 { 0xffffffff, 0x00000}
11393 }, mem_tbl_5705
[] = {
11394 { 0x00000100, 0x0000c},
11395 { 0x00000200, 0x00008},
11396 { 0x00004000, 0x00800},
11397 { 0x00006000, 0x01000},
11398 { 0x00008000, 0x02000},
11399 { 0x00010000, 0x0e000},
11400 { 0xffffffff, 0x00000}
11401 }, mem_tbl_5755
[] = {
11402 { 0x00000200, 0x00008},
11403 { 0x00004000, 0x00800},
11404 { 0x00006000, 0x00800},
11405 { 0x00008000, 0x02000},
11406 { 0x00010000, 0x0c000},
11407 { 0xffffffff, 0x00000}
11408 }, mem_tbl_5906
[] = {
11409 { 0x00000200, 0x00008},
11410 { 0x00004000, 0x00400},
11411 { 0x00006000, 0x00400},
11412 { 0x00008000, 0x01000},
11413 { 0x00010000, 0x01000},
11414 { 0xffffffff, 0x00000}
11415 }, mem_tbl_5717
[] = {
11416 { 0x00000200, 0x00008},
11417 { 0x00010000, 0x0a000},
11418 { 0x00020000, 0x13c00},
11419 { 0xffffffff, 0x00000}
11420 }, mem_tbl_57765
[] = {
11421 { 0x00000200, 0x00008},
11422 { 0x00004000, 0x00800},
11423 { 0x00006000, 0x09800},
11424 { 0x00010000, 0x0a000},
11425 { 0xffffffff, 0x00000}
11427 struct mem_entry
*mem_tbl
;
11431 if (tg3_flag(tp
, 5717_PLUS
))
11432 mem_tbl
= mem_tbl_5717
;
11433 else if (tg3_flag(tp
, 57765_CLASS
))
11434 mem_tbl
= mem_tbl_57765
;
11435 else if (tg3_flag(tp
, 5755_PLUS
))
11436 mem_tbl
= mem_tbl_5755
;
11437 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11438 mem_tbl
= mem_tbl_5906
;
11439 else if (tg3_flag(tp
, 5705_PLUS
))
11440 mem_tbl
= mem_tbl_5705
;
11442 mem_tbl
= mem_tbl_570x
;
11444 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11445 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11453 #define TG3_TSO_MSS 500
11455 #define TG3_TSO_IP_HDR_LEN 20
11456 #define TG3_TSO_TCP_HDR_LEN 20
11457 #define TG3_TSO_TCP_OPT_LEN 12
11459 static const u8 tg3_tso_header
[] = {
11461 0x45, 0x00, 0x00, 0x00,
11462 0x00, 0x00, 0x40, 0x00,
11463 0x40, 0x06, 0x00, 0x00,
11464 0x0a, 0x00, 0x00, 0x01,
11465 0x0a, 0x00, 0x00, 0x02,
11466 0x0d, 0x00, 0xe0, 0x00,
11467 0x00, 0x00, 0x01, 0x00,
11468 0x00, 0x00, 0x02, 0x00,
11469 0x80, 0x10, 0x10, 0x00,
11470 0x14, 0x09, 0x00, 0x00,
11471 0x01, 0x01, 0x08, 0x0a,
11472 0x11, 0x11, 0x11, 0x11,
11473 0x11, 0x11, 0x11, 0x11,
11476 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11478 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11479 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11481 struct sk_buff
*skb
;
11482 u8
*tx_data
, *rx_data
;
11484 int num_pkts
, tx_len
, rx_len
, i
, err
;
11485 struct tg3_rx_buffer_desc
*desc
;
11486 struct tg3_napi
*tnapi
, *rnapi
;
11487 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11489 tnapi
= &tp
->napi
[0];
11490 rnapi
= &tp
->napi
[0];
11491 if (tp
->irq_cnt
> 1) {
11492 if (tg3_flag(tp
, ENABLE_RSS
))
11493 rnapi
= &tp
->napi
[1];
11494 if (tg3_flag(tp
, ENABLE_TSS
))
11495 tnapi
= &tp
->napi
[1];
11497 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11502 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11506 tx_data
= skb_put(skb
, tx_len
);
11507 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11508 memset(tx_data
+ 6, 0x0, 8);
11510 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11512 if (tso_loopback
) {
11513 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11515 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11516 TG3_TSO_TCP_OPT_LEN
;
11518 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11519 sizeof(tg3_tso_header
));
11522 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11523 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11525 /* Set the total length field in the IP header */
11526 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11528 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11529 TXD_FLAG_CPU_POST_DMA
);
11531 if (tg3_flag(tp
, HW_TSO_1
) ||
11532 tg3_flag(tp
, HW_TSO_2
) ||
11533 tg3_flag(tp
, HW_TSO_3
)) {
11535 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11536 th
= (struct tcphdr
*)&tx_data
[val
];
11539 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11541 if (tg3_flag(tp
, HW_TSO_3
)) {
11542 mss
|= (hdr_len
& 0xc) << 12;
11543 if (hdr_len
& 0x10)
11544 base_flags
|= 0x00000010;
11545 base_flags
|= (hdr_len
& 0x3e0) << 5;
11546 } else if (tg3_flag(tp
, HW_TSO_2
))
11547 mss
|= hdr_len
<< 9;
11548 else if (tg3_flag(tp
, HW_TSO_1
) ||
11549 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11550 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11552 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11555 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11558 data_off
= ETH_HLEN
;
11561 for (i
= data_off
; i
< tx_len
; i
++)
11562 tx_data
[i
] = (u8
) (i
& 0xff);
11564 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11565 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11566 dev_kfree_skb(skb
);
11570 val
= tnapi
->tx_prod
;
11571 tnapi
->tx_buffers
[val
].skb
= skb
;
11572 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11574 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11579 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11581 budget
= tg3_tx_avail(tnapi
);
11582 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11583 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11584 tnapi
->tx_buffers
[val
].skb
= NULL
;
11585 dev_kfree_skb(skb
);
11591 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11592 tr32_mailbox(tnapi
->prodmbox
);
11596 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11597 for (i
= 0; i
< 35; i
++) {
11598 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11603 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11604 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11605 if ((tx_idx
== tnapi
->tx_prod
) &&
11606 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11610 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
11611 dev_kfree_skb(skb
);
11613 if (tx_idx
!= tnapi
->tx_prod
)
11616 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11620 while (rx_idx
!= rx_start_idx
) {
11621 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11622 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11623 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11625 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11626 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11629 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11632 if (!tso_loopback
) {
11633 if (rx_len
!= tx_len
)
11636 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11637 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11640 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11643 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11644 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11645 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11649 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11650 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
11651 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11653 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11654 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
11655 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11660 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11661 PCI_DMA_FROMDEVICE
);
11663 rx_data
+= TG3_RX_OFFSET(tp
);
11664 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11665 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
11672 /* tg3_free_rings will unmap and free the rx_data */
11677 #define TG3_STD_LOOPBACK_FAILED 1
11678 #define TG3_JMB_LOOPBACK_FAILED 2
11679 #define TG3_TSO_LOOPBACK_FAILED 4
11680 #define TG3_LOOPBACK_FAILED \
11681 (TG3_STD_LOOPBACK_FAILED | \
11682 TG3_JMB_LOOPBACK_FAILED | \
11683 TG3_TSO_LOOPBACK_FAILED)
11685 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
11690 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11691 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11693 if (!netif_running(tp
->dev
)) {
11694 data
[0] = TG3_LOOPBACK_FAILED
;
11695 data
[1] = TG3_LOOPBACK_FAILED
;
11697 data
[2] = TG3_LOOPBACK_FAILED
;
11701 err
= tg3_reset_hw(tp
, 1);
11703 data
[0] = TG3_LOOPBACK_FAILED
;
11704 data
[1] = TG3_LOOPBACK_FAILED
;
11706 data
[2] = TG3_LOOPBACK_FAILED
;
11710 if (tg3_flag(tp
, ENABLE_RSS
)) {
11713 /* Reroute all rx packets to the 1st queue */
11714 for (i
= MAC_RSS_INDIR_TBL_0
;
11715 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11719 /* HW errata - mac loopback fails in some cases on 5780.
11720 * Normal traffic and PHY loopback are not affected by
11721 * errata. Also, the MAC loopback test is deprecated for
11722 * all newer ASIC revisions.
11724 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
11725 !tg3_flag(tp
, CPMU_PRESENT
)) {
11726 tg3_mac_loopback(tp
, true);
11728 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11729 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
11731 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11732 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11733 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
11735 tg3_mac_loopback(tp
, false);
11738 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11739 !tg3_flag(tp
, USE_PHYLIB
)) {
11742 tg3_phy_lpbk_set(tp
, 0, false);
11744 /* Wait for link */
11745 for (i
= 0; i
< 100; i
++) {
11746 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11751 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11752 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
11753 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11754 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11755 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
11756 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11757 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11758 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
11761 tg3_phy_lpbk_set(tp
, 0, true);
11763 /* All link indications report up, but the hardware
11764 * isn't really ready for about 20 msec. Double it
11769 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11770 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
11771 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11772 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11773 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
11774 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11775 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11776 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
11779 /* Re-enable gphy autopowerdown. */
11780 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11781 tg3_phy_toggle_apd(tp
, true);
11784 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
11787 tp
->phy_flags
|= eee_cap
;
11792 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11795 struct tg3
*tp
= netdev_priv(dev
);
11796 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
11798 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11799 tg3_power_up(tp
)) {
11800 etest
->flags
|= ETH_TEST_FL_FAILED
;
11801 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11805 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11807 if (tg3_test_nvram(tp
) != 0) {
11808 etest
->flags
|= ETH_TEST_FL_FAILED
;
11811 if (!doextlpbk
&& tg3_test_link(tp
)) {
11812 etest
->flags
|= ETH_TEST_FL_FAILED
;
11815 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11816 int err
, err2
= 0, irq_sync
= 0;
11818 if (netif_running(dev
)) {
11820 tg3_netif_stop(tp
);
11824 tg3_full_lock(tp
, irq_sync
);
11826 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11827 err
= tg3_nvram_lock(tp
);
11828 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11829 if (!tg3_flag(tp
, 5705_PLUS
))
11830 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11832 tg3_nvram_unlock(tp
);
11834 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11837 if (tg3_test_registers(tp
) != 0) {
11838 etest
->flags
|= ETH_TEST_FL_FAILED
;
11842 if (tg3_test_memory(tp
) != 0) {
11843 etest
->flags
|= ETH_TEST_FL_FAILED
;
11848 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
11850 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
11851 etest
->flags
|= ETH_TEST_FL_FAILED
;
11853 tg3_full_unlock(tp
);
11855 if (tg3_test_interrupt(tp
) != 0) {
11856 etest
->flags
|= ETH_TEST_FL_FAILED
;
11860 tg3_full_lock(tp
, 0);
11862 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11863 if (netif_running(dev
)) {
11864 tg3_flag_set(tp
, INIT_COMPLETE
);
11865 err2
= tg3_restart_hw(tp
, 1);
11867 tg3_netif_start(tp
);
11870 tg3_full_unlock(tp
);
11872 if (irq_sync
&& !err2
)
11875 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11876 tg3_power_down(tp
);
11880 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11882 struct mii_ioctl_data
*data
= if_mii(ifr
);
11883 struct tg3
*tp
= netdev_priv(dev
);
11886 if (tg3_flag(tp
, USE_PHYLIB
)) {
11887 struct phy_device
*phydev
;
11888 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11890 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11891 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11896 data
->phy_id
= tp
->phy_addr
;
11899 case SIOCGMIIREG
: {
11902 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11903 break; /* We have no PHY */
11905 if (!netif_running(dev
))
11908 spin_lock_bh(&tp
->lock
);
11909 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11910 spin_unlock_bh(&tp
->lock
);
11912 data
->val_out
= mii_regval
;
11918 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11919 break; /* We have no PHY */
11921 if (!netif_running(dev
))
11924 spin_lock_bh(&tp
->lock
);
11925 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11926 spin_unlock_bh(&tp
->lock
);
11934 return -EOPNOTSUPP
;
11937 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11939 struct tg3
*tp
= netdev_priv(dev
);
11941 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11945 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11947 struct tg3
*tp
= netdev_priv(dev
);
11948 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11949 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11951 if (!tg3_flag(tp
, 5705_PLUS
)) {
11952 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11953 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11954 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11955 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11958 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11959 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11960 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11961 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11962 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11963 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11964 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11965 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11966 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11967 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11970 /* No rx interrupts will be generated if both are zero */
11971 if ((ec
->rx_coalesce_usecs
== 0) &&
11972 (ec
->rx_max_coalesced_frames
== 0))
11975 /* No tx interrupts will be generated if both are zero */
11976 if ((ec
->tx_coalesce_usecs
== 0) &&
11977 (ec
->tx_max_coalesced_frames
== 0))
11980 /* Only copy relevant parameters, ignore all others. */
11981 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11982 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11983 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11984 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11985 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11986 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11987 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11988 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11989 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11991 if (netif_running(dev
)) {
11992 tg3_full_lock(tp
, 0);
11993 __tg3_set_coalesce(tp
, &tp
->coal
);
11994 tg3_full_unlock(tp
);
11999 static const struct ethtool_ops tg3_ethtool_ops
= {
12000 .get_settings
= tg3_get_settings
,
12001 .set_settings
= tg3_set_settings
,
12002 .get_drvinfo
= tg3_get_drvinfo
,
12003 .get_regs_len
= tg3_get_regs_len
,
12004 .get_regs
= tg3_get_regs
,
12005 .get_wol
= tg3_get_wol
,
12006 .set_wol
= tg3_set_wol
,
12007 .get_msglevel
= tg3_get_msglevel
,
12008 .set_msglevel
= tg3_set_msglevel
,
12009 .nway_reset
= tg3_nway_reset
,
12010 .get_link
= ethtool_op_get_link
,
12011 .get_eeprom_len
= tg3_get_eeprom_len
,
12012 .get_eeprom
= tg3_get_eeprom
,
12013 .set_eeprom
= tg3_set_eeprom
,
12014 .get_ringparam
= tg3_get_ringparam
,
12015 .set_ringparam
= tg3_set_ringparam
,
12016 .get_pauseparam
= tg3_get_pauseparam
,
12017 .set_pauseparam
= tg3_set_pauseparam
,
12018 .self_test
= tg3_self_test
,
12019 .get_strings
= tg3_get_strings
,
12020 .set_phys_id
= tg3_set_phys_id
,
12021 .get_ethtool_stats
= tg3_get_ethtool_stats
,
12022 .get_coalesce
= tg3_get_coalesce
,
12023 .set_coalesce
= tg3_set_coalesce
,
12024 .get_sset_count
= tg3_get_sset_count
,
12025 .get_rxnfc
= tg3_get_rxnfc
,
12026 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
12027 .get_rxfh_indir
= tg3_get_rxfh_indir
,
12028 .set_rxfh_indir
= tg3_set_rxfh_indir
,
12031 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
12033 u32 cursize
, val
, magic
;
12035 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
12037 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12040 if ((magic
!= TG3_EEPROM_MAGIC
) &&
12041 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
12042 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
12046 * Size the chip by reading offsets at increasing powers of two.
12047 * When we encounter our validation signature, we know the addressing
12048 * has wrapped around, and thus have our chip size.
12052 while (cursize
< tp
->nvram_size
) {
12053 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
12062 tp
->nvram_size
= cursize
;
12065 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
12069 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
12072 /* Selfboot format */
12073 if (val
!= TG3_EEPROM_MAGIC
) {
12074 tg3_get_eeprom_size(tp
);
12078 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
12080 /* This is confusing. We want to operate on the
12081 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12082 * call will read from NVRAM and byteswap the data
12083 * according to the byteswapping settings for all
12084 * other register accesses. This ensures the data we
12085 * want will always reside in the lower 16-bits.
12086 * However, the data in NVRAM is in LE format, which
12087 * means the data from the NVRAM read will always be
12088 * opposite the endianness of the CPU. The 16-bit
12089 * byteswap then brings the data to CPU endianness.
12091 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12095 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12098 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12102 nvcfg1
= tr32(NVRAM_CFG1
);
12103 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12104 tg3_flag_set(tp
, FLASH
);
12106 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12107 tw32(NVRAM_CFG1
, nvcfg1
);
12110 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12111 tg3_flag(tp
, 5780_CLASS
)) {
12112 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12113 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12114 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12115 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12116 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12118 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12119 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12120 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12122 case FLASH_VENDOR_ATMEL_EEPROM
:
12123 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12124 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12125 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12127 case FLASH_VENDOR_ST
:
12128 tp
->nvram_jedecnum
= JEDEC_ST
;
12129 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12130 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12132 case FLASH_VENDOR_SAIFUN
:
12133 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12134 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12136 case FLASH_VENDOR_SST_SMALL
:
12137 case FLASH_VENDOR_SST_LARGE
:
12138 tp
->nvram_jedecnum
= JEDEC_SST
;
12139 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12143 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12144 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12145 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12149 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12151 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12152 case FLASH_5752PAGE_SIZE_256
:
12153 tp
->nvram_pagesize
= 256;
12155 case FLASH_5752PAGE_SIZE_512
:
12156 tp
->nvram_pagesize
= 512;
12158 case FLASH_5752PAGE_SIZE_1K
:
12159 tp
->nvram_pagesize
= 1024;
12161 case FLASH_5752PAGE_SIZE_2K
:
12162 tp
->nvram_pagesize
= 2048;
12164 case FLASH_5752PAGE_SIZE_4K
:
12165 tp
->nvram_pagesize
= 4096;
12167 case FLASH_5752PAGE_SIZE_264
:
12168 tp
->nvram_pagesize
= 264;
12170 case FLASH_5752PAGE_SIZE_528
:
12171 tp
->nvram_pagesize
= 528;
12176 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12180 nvcfg1
= tr32(NVRAM_CFG1
);
12182 /* NVRAM protection for TPM */
12183 if (nvcfg1
& (1 << 27))
12184 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12186 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12187 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12188 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12189 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12190 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12192 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12193 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12194 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12195 tg3_flag_set(tp
, FLASH
);
12197 case FLASH_5752VENDOR_ST_M45PE10
:
12198 case FLASH_5752VENDOR_ST_M45PE20
:
12199 case FLASH_5752VENDOR_ST_M45PE40
:
12200 tp
->nvram_jedecnum
= JEDEC_ST
;
12201 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12202 tg3_flag_set(tp
, FLASH
);
12206 if (tg3_flag(tp
, FLASH
)) {
12207 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12209 /* For eeprom, set pagesize to maximum eeprom size */
12210 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12212 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12213 tw32(NVRAM_CFG1
, nvcfg1
);
12217 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12219 u32 nvcfg1
, protect
= 0;
12221 nvcfg1
= tr32(NVRAM_CFG1
);
12223 /* NVRAM protection for TPM */
12224 if (nvcfg1
& (1 << 27)) {
12225 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12229 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12231 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12232 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12233 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12234 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12235 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12236 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12237 tg3_flag_set(tp
, FLASH
);
12238 tp
->nvram_pagesize
= 264;
12239 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12240 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12241 tp
->nvram_size
= (protect
? 0x3e200 :
12242 TG3_NVRAM_SIZE_512KB
);
12243 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12244 tp
->nvram_size
= (protect
? 0x1f200 :
12245 TG3_NVRAM_SIZE_256KB
);
12247 tp
->nvram_size
= (protect
? 0x1f200 :
12248 TG3_NVRAM_SIZE_128KB
);
12250 case FLASH_5752VENDOR_ST_M45PE10
:
12251 case FLASH_5752VENDOR_ST_M45PE20
:
12252 case FLASH_5752VENDOR_ST_M45PE40
:
12253 tp
->nvram_jedecnum
= JEDEC_ST
;
12254 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12255 tg3_flag_set(tp
, FLASH
);
12256 tp
->nvram_pagesize
= 256;
12257 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12258 tp
->nvram_size
= (protect
?
12259 TG3_NVRAM_SIZE_64KB
:
12260 TG3_NVRAM_SIZE_128KB
);
12261 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12262 tp
->nvram_size
= (protect
?
12263 TG3_NVRAM_SIZE_64KB
:
12264 TG3_NVRAM_SIZE_256KB
);
12266 tp
->nvram_size
= (protect
?
12267 TG3_NVRAM_SIZE_128KB
:
12268 TG3_NVRAM_SIZE_512KB
);
12273 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12277 nvcfg1
= tr32(NVRAM_CFG1
);
12279 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12280 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12281 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12282 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12283 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12284 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12285 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12286 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12288 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12289 tw32(NVRAM_CFG1
, nvcfg1
);
12291 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12292 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12293 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12294 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12295 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12296 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12297 tg3_flag_set(tp
, FLASH
);
12298 tp
->nvram_pagesize
= 264;
12300 case FLASH_5752VENDOR_ST_M45PE10
:
12301 case FLASH_5752VENDOR_ST_M45PE20
:
12302 case FLASH_5752VENDOR_ST_M45PE40
:
12303 tp
->nvram_jedecnum
= JEDEC_ST
;
12304 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12305 tg3_flag_set(tp
, FLASH
);
12306 tp
->nvram_pagesize
= 256;
12311 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12313 u32 nvcfg1
, protect
= 0;
12315 nvcfg1
= tr32(NVRAM_CFG1
);
12317 /* NVRAM protection for TPM */
12318 if (nvcfg1
& (1 << 27)) {
12319 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12323 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12325 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12326 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12327 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12328 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12329 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12330 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12331 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12332 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12333 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12334 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12335 tg3_flag_set(tp
, FLASH
);
12336 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12337 tp
->nvram_pagesize
= 256;
12339 case FLASH_5761VENDOR_ST_A_M45PE20
:
12340 case FLASH_5761VENDOR_ST_A_M45PE40
:
12341 case FLASH_5761VENDOR_ST_A_M45PE80
:
12342 case FLASH_5761VENDOR_ST_A_M45PE16
:
12343 case FLASH_5761VENDOR_ST_M_M45PE20
:
12344 case FLASH_5761VENDOR_ST_M_M45PE40
:
12345 case FLASH_5761VENDOR_ST_M_M45PE80
:
12346 case FLASH_5761VENDOR_ST_M_M45PE16
:
12347 tp
->nvram_jedecnum
= JEDEC_ST
;
12348 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12349 tg3_flag_set(tp
, FLASH
);
12350 tp
->nvram_pagesize
= 256;
12355 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12358 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12359 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12360 case FLASH_5761VENDOR_ST_A_M45PE16
:
12361 case FLASH_5761VENDOR_ST_M_M45PE16
:
12362 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12364 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12365 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12366 case FLASH_5761VENDOR_ST_A_M45PE80
:
12367 case FLASH_5761VENDOR_ST_M_M45PE80
:
12368 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12370 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12371 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12372 case FLASH_5761VENDOR_ST_A_M45PE40
:
12373 case FLASH_5761VENDOR_ST_M_M45PE40
:
12374 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12376 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12377 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12378 case FLASH_5761VENDOR_ST_A_M45PE20
:
12379 case FLASH_5761VENDOR_ST_M_M45PE20
:
12380 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12386 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12388 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12389 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12390 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12393 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12397 nvcfg1
= tr32(NVRAM_CFG1
);
12399 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12400 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12401 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12402 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12403 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12404 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12406 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12407 tw32(NVRAM_CFG1
, nvcfg1
);
12409 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12410 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12411 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12412 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12413 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12414 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12415 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12416 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12417 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12418 tg3_flag_set(tp
, FLASH
);
12420 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12421 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12422 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12423 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12424 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12426 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12427 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12428 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12430 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12431 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12432 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12436 case FLASH_5752VENDOR_ST_M45PE10
:
12437 case FLASH_5752VENDOR_ST_M45PE20
:
12438 case FLASH_5752VENDOR_ST_M45PE40
:
12439 tp
->nvram_jedecnum
= JEDEC_ST
;
12440 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12441 tg3_flag_set(tp
, FLASH
);
12443 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12444 case FLASH_5752VENDOR_ST_M45PE10
:
12445 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12447 case FLASH_5752VENDOR_ST_M45PE20
:
12448 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12450 case FLASH_5752VENDOR_ST_M45PE40
:
12451 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12456 tg3_flag_set(tp
, NO_NVRAM
);
12460 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12461 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12462 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12466 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12470 nvcfg1
= tr32(NVRAM_CFG1
);
12472 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12473 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12474 case FLASH_5717VENDOR_MICRO_EEPROM
:
12475 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12476 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12477 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12479 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12480 tw32(NVRAM_CFG1
, nvcfg1
);
12482 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12483 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12484 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12485 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12486 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12487 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12488 case FLASH_5717VENDOR_ATMEL_45USPT
:
12489 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12490 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12491 tg3_flag_set(tp
, FLASH
);
12493 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12494 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12495 /* Detect size with tg3_nvram_get_size() */
12497 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12498 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12499 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12502 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12506 case FLASH_5717VENDOR_ST_M_M25PE10
:
12507 case FLASH_5717VENDOR_ST_A_M25PE10
:
12508 case FLASH_5717VENDOR_ST_M_M45PE10
:
12509 case FLASH_5717VENDOR_ST_A_M45PE10
:
12510 case FLASH_5717VENDOR_ST_M_M25PE20
:
12511 case FLASH_5717VENDOR_ST_A_M25PE20
:
12512 case FLASH_5717VENDOR_ST_M_M45PE20
:
12513 case FLASH_5717VENDOR_ST_A_M45PE20
:
12514 case FLASH_5717VENDOR_ST_25USPT
:
12515 case FLASH_5717VENDOR_ST_45USPT
:
12516 tp
->nvram_jedecnum
= JEDEC_ST
;
12517 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12518 tg3_flag_set(tp
, FLASH
);
12520 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12521 case FLASH_5717VENDOR_ST_M_M25PE20
:
12522 case FLASH_5717VENDOR_ST_M_M45PE20
:
12523 /* Detect size with tg3_nvram_get_size() */
12525 case FLASH_5717VENDOR_ST_A_M25PE20
:
12526 case FLASH_5717VENDOR_ST_A_M45PE20
:
12527 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12530 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12535 tg3_flag_set(tp
, NO_NVRAM
);
12539 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12540 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12541 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12544 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12546 u32 nvcfg1
, nvmpinstrp
;
12548 nvcfg1
= tr32(NVRAM_CFG1
);
12549 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12551 switch (nvmpinstrp
) {
12552 case FLASH_5720_EEPROM_HD
:
12553 case FLASH_5720_EEPROM_LD
:
12554 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12555 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12557 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12558 tw32(NVRAM_CFG1
, nvcfg1
);
12559 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12560 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12562 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12564 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12565 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12566 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12567 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12568 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12569 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12570 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12571 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12572 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12573 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12574 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12575 case FLASH_5720VENDOR_ATMEL_45USPT
:
12576 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12577 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12578 tg3_flag_set(tp
, FLASH
);
12580 switch (nvmpinstrp
) {
12581 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12582 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12583 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12584 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12586 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12587 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12588 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12589 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12591 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12592 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12593 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12596 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12600 case FLASH_5720VENDOR_M_ST_M25PE10
:
12601 case FLASH_5720VENDOR_M_ST_M45PE10
:
12602 case FLASH_5720VENDOR_A_ST_M25PE10
:
12603 case FLASH_5720VENDOR_A_ST_M45PE10
:
12604 case FLASH_5720VENDOR_M_ST_M25PE20
:
12605 case FLASH_5720VENDOR_M_ST_M45PE20
:
12606 case FLASH_5720VENDOR_A_ST_M25PE20
:
12607 case FLASH_5720VENDOR_A_ST_M45PE20
:
12608 case FLASH_5720VENDOR_M_ST_M25PE40
:
12609 case FLASH_5720VENDOR_M_ST_M45PE40
:
12610 case FLASH_5720VENDOR_A_ST_M25PE40
:
12611 case FLASH_5720VENDOR_A_ST_M45PE40
:
12612 case FLASH_5720VENDOR_M_ST_M25PE80
:
12613 case FLASH_5720VENDOR_M_ST_M45PE80
:
12614 case FLASH_5720VENDOR_A_ST_M25PE80
:
12615 case FLASH_5720VENDOR_A_ST_M45PE80
:
12616 case FLASH_5720VENDOR_ST_25USPT
:
12617 case FLASH_5720VENDOR_ST_45USPT
:
12618 tp
->nvram_jedecnum
= JEDEC_ST
;
12619 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12620 tg3_flag_set(tp
, FLASH
);
12622 switch (nvmpinstrp
) {
12623 case FLASH_5720VENDOR_M_ST_M25PE20
:
12624 case FLASH_5720VENDOR_M_ST_M45PE20
:
12625 case FLASH_5720VENDOR_A_ST_M25PE20
:
12626 case FLASH_5720VENDOR_A_ST_M45PE20
:
12627 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12629 case FLASH_5720VENDOR_M_ST_M25PE40
:
12630 case FLASH_5720VENDOR_M_ST_M45PE40
:
12631 case FLASH_5720VENDOR_A_ST_M25PE40
:
12632 case FLASH_5720VENDOR_A_ST_M45PE40
:
12633 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12635 case FLASH_5720VENDOR_M_ST_M25PE80
:
12636 case FLASH_5720VENDOR_M_ST_M45PE80
:
12637 case FLASH_5720VENDOR_A_ST_M25PE80
:
12638 case FLASH_5720VENDOR_A_ST_M45PE80
:
12639 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12642 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12647 tg3_flag_set(tp
, NO_NVRAM
);
12651 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12652 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12653 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12656 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12657 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12659 tw32_f(GRC_EEPROM_ADDR
,
12660 (EEPROM_ADDR_FSM_RESET
|
12661 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12662 EEPROM_ADDR_CLKPERD_SHIFT
)));
12666 /* Enable seeprom accesses. */
12667 tw32_f(GRC_LOCAL_CTRL
,
12668 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12671 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12672 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12673 tg3_flag_set(tp
, NVRAM
);
12675 if (tg3_nvram_lock(tp
)) {
12676 netdev_warn(tp
->dev
,
12677 "Cannot get nvram lock, %s failed\n",
12681 tg3_enable_nvram_access(tp
);
12683 tp
->nvram_size
= 0;
12685 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12686 tg3_get_5752_nvram_info(tp
);
12687 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12688 tg3_get_5755_nvram_info(tp
);
12689 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12690 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12691 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12692 tg3_get_5787_nvram_info(tp
);
12693 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12694 tg3_get_5761_nvram_info(tp
);
12695 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12696 tg3_get_5906_nvram_info(tp
);
12697 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12698 tg3_flag(tp
, 57765_CLASS
))
12699 tg3_get_57780_nvram_info(tp
);
12700 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12701 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12702 tg3_get_5717_nvram_info(tp
);
12703 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12704 tg3_get_5720_nvram_info(tp
);
12706 tg3_get_nvram_info(tp
);
12708 if (tp
->nvram_size
== 0)
12709 tg3_get_nvram_size(tp
);
12711 tg3_disable_nvram_access(tp
);
12712 tg3_nvram_unlock(tp
);
12715 tg3_flag_clear(tp
, NVRAM
);
12716 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12718 tg3_get_eeprom_size(tp
);
12722 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12723 u32 offset
, u32 len
, u8
*buf
)
12728 for (i
= 0; i
< len
; i
+= 4) {
12734 memcpy(&data
, buf
+ i
, 4);
12737 * The SEEPROM interface expects the data to always be opposite
12738 * the native endian format. We accomplish this by reversing
12739 * all the operations that would have been performed on the
12740 * data from a call to tg3_nvram_read_be32().
12742 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12744 val
= tr32(GRC_EEPROM_ADDR
);
12745 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12747 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12749 tw32(GRC_EEPROM_ADDR
, val
|
12750 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12751 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12752 EEPROM_ADDR_START
|
12753 EEPROM_ADDR_WRITE
);
12755 for (j
= 0; j
< 1000; j
++) {
12756 val
= tr32(GRC_EEPROM_ADDR
);
12758 if (val
& EEPROM_ADDR_COMPLETE
)
12762 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12771 /* offset and length are dword aligned */
12772 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12776 u32 pagesize
= tp
->nvram_pagesize
;
12777 u32 pagemask
= pagesize
- 1;
12781 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12787 u32 phy_addr
, page_off
, size
;
12789 phy_addr
= offset
& ~pagemask
;
12791 for (j
= 0; j
< pagesize
; j
+= 4) {
12792 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12793 (__be32
*) (tmp
+ j
));
12800 page_off
= offset
& pagemask
;
12807 memcpy(tmp
+ page_off
, buf
, size
);
12809 offset
= offset
+ (pagesize
- page_off
);
12811 tg3_enable_nvram_access(tp
);
12814 * Before we can erase the flash page, we need
12815 * to issue a special "write enable" command.
12817 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12819 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12822 /* Erase the target page */
12823 tw32(NVRAM_ADDR
, phy_addr
);
12825 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12826 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12828 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12831 /* Issue another write enable to start the write. */
12832 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12834 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12837 for (j
= 0; j
< pagesize
; j
+= 4) {
12840 data
= *((__be32
*) (tmp
+ j
));
12842 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12844 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12846 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12850 nvram_cmd
|= NVRAM_CMD_FIRST
;
12851 else if (j
== (pagesize
- 4))
12852 nvram_cmd
|= NVRAM_CMD_LAST
;
12854 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12861 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12862 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12869 /* offset and length are dword aligned */
12870 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12875 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12876 u32 page_off
, phy_addr
, nvram_cmd
;
12879 memcpy(&data
, buf
+ i
, 4);
12880 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12882 page_off
= offset
% tp
->nvram_pagesize
;
12884 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12886 tw32(NVRAM_ADDR
, phy_addr
);
12888 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12890 if (page_off
== 0 || i
== 0)
12891 nvram_cmd
|= NVRAM_CMD_FIRST
;
12892 if (page_off
== (tp
->nvram_pagesize
- 4))
12893 nvram_cmd
|= NVRAM_CMD_LAST
;
12895 if (i
== (len
- 4))
12896 nvram_cmd
|= NVRAM_CMD_LAST
;
12898 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12899 !tg3_flag(tp
, 5755_PLUS
) &&
12900 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12901 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12903 if ((ret
= tg3_nvram_exec_cmd(tp
,
12904 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12909 if (!tg3_flag(tp
, FLASH
)) {
12910 /* We always do complete word writes to eeprom. */
12911 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12914 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12920 /* offset and length are dword aligned */
12921 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12925 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12926 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12927 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12931 if (!tg3_flag(tp
, NVRAM
)) {
12932 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12936 ret
= tg3_nvram_lock(tp
);
12940 tg3_enable_nvram_access(tp
);
12941 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12942 tw32(NVRAM_WRITE1
, 0x406);
12944 grc_mode
= tr32(GRC_MODE
);
12945 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12947 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12948 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12951 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12955 grc_mode
= tr32(GRC_MODE
);
12956 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12958 tg3_disable_nvram_access(tp
);
12959 tg3_nvram_unlock(tp
);
12962 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12963 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12970 struct subsys_tbl_ent
{
12971 u16 subsys_vendor
, subsys_devid
;
12975 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12976 /* Broadcom boards. */
12977 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12978 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12979 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12980 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12981 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12982 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12983 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12984 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12985 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12986 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12987 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12988 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12989 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12990 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12991 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12992 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12993 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12994 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12995 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12996 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12997 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12998 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
13001 { TG3PCI_SUBVENDOR_ID_3COM
,
13002 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
13003 { TG3PCI_SUBVENDOR_ID_3COM
,
13004 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
13005 { TG3PCI_SUBVENDOR_ID_3COM
,
13006 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
13007 { TG3PCI_SUBVENDOR_ID_3COM
,
13008 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
13009 { TG3PCI_SUBVENDOR_ID_3COM
,
13010 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
13013 { TG3PCI_SUBVENDOR_ID_DELL
,
13014 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
13015 { TG3PCI_SUBVENDOR_ID_DELL
,
13016 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
13017 { TG3PCI_SUBVENDOR_ID_DELL
,
13018 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
13019 { TG3PCI_SUBVENDOR_ID_DELL
,
13020 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
13022 /* Compaq boards. */
13023 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13024 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
13025 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13026 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
13027 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13028 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
13029 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13030 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
13031 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13032 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
13035 { TG3PCI_SUBVENDOR_ID_IBM
,
13036 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
13039 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
13043 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
13044 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
13045 tp
->pdev
->subsystem_vendor
) &&
13046 (subsys_id_to_phy_id
[i
].subsys_devid
==
13047 tp
->pdev
->subsystem_device
))
13048 return &subsys_id_to_phy_id
[i
];
13053 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13057 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13058 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13060 /* Assume an onboard device and WOL capable by default. */
13061 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13062 tg3_flag_set(tp
, WOL_CAP
);
13064 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13065 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13066 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13067 tg3_flag_set(tp
, IS_NIC
);
13069 val
= tr32(VCPU_CFGSHDW
);
13070 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13071 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13072 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13073 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13074 tg3_flag_set(tp
, WOL_ENABLE
);
13075 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13080 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13081 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13082 u32 nic_cfg
, led_cfg
;
13083 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13084 int eeprom_phy_serdes
= 0;
13086 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13087 tp
->nic_sram_data_cfg
= nic_cfg
;
13089 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13090 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13091 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13092 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13093 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13094 (ver
> 0) && (ver
< 0x100))
13095 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13097 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13098 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13100 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13101 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13102 eeprom_phy_serdes
= 1;
13104 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13105 if (nic_phy_id
!= 0) {
13106 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13107 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13109 eeprom_phy_id
= (id1
>> 16) << 10;
13110 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13111 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13115 tp
->phy_id
= eeprom_phy_id
;
13116 if (eeprom_phy_serdes
) {
13117 if (!tg3_flag(tp
, 5705_PLUS
))
13118 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13120 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13123 if (tg3_flag(tp
, 5750_PLUS
))
13124 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13125 SHASTA_EXT_LED_MODE_MASK
);
13127 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13131 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13132 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13135 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13136 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13139 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13140 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13142 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13143 * read on some older 5700/5701 bootcode.
13145 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13147 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13149 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13153 case SHASTA_EXT_LED_SHARED
:
13154 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13155 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13156 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13157 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13158 LED_CTRL_MODE_PHY_2
);
13161 case SHASTA_EXT_LED_MAC
:
13162 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13165 case SHASTA_EXT_LED_COMBO
:
13166 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13167 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13168 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13169 LED_CTRL_MODE_PHY_2
);
13174 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13175 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13176 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13177 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13179 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13180 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13182 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13183 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13184 if ((tp
->pdev
->subsystem_vendor
==
13185 PCI_VENDOR_ID_ARIMA
) &&
13186 (tp
->pdev
->subsystem_device
== 0x205a ||
13187 tp
->pdev
->subsystem_device
== 0x2063))
13188 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13190 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13191 tg3_flag_set(tp
, IS_NIC
);
13194 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13195 tg3_flag_set(tp
, ENABLE_ASF
);
13196 if (tg3_flag(tp
, 5750_PLUS
))
13197 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13200 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13201 tg3_flag(tp
, 5750_PLUS
))
13202 tg3_flag_set(tp
, ENABLE_APE
);
13204 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13205 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13206 tg3_flag_clear(tp
, WOL_CAP
);
13208 if (tg3_flag(tp
, WOL_CAP
) &&
13209 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13210 tg3_flag_set(tp
, WOL_ENABLE
);
13211 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13214 if (cfg2
& (1 << 17))
13215 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13217 /* serdes signal pre-emphasis in register 0x590 set by */
13218 /* bootcode if bit 18 is set */
13219 if (cfg2
& (1 << 18))
13220 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13222 if ((tg3_flag(tp
, 57765_PLUS
) ||
13223 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13224 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13225 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13226 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13228 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13229 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13230 !tg3_flag(tp
, 57765_PLUS
)) {
13233 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13234 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13235 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13238 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13239 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13240 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13241 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13242 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13243 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13246 if (tg3_flag(tp
, WOL_CAP
))
13247 device_set_wakeup_enable(&tp
->pdev
->dev
,
13248 tg3_flag(tp
, WOL_ENABLE
));
13250 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13253 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13258 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13259 tw32(OTP_CTRL
, cmd
);
13261 /* Wait for up to 1 ms for command to execute. */
13262 for (i
= 0; i
< 100; i
++) {
13263 val
= tr32(OTP_STATUS
);
13264 if (val
& OTP_STATUS_CMD_DONE
)
13269 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13272 /* Read the gphy configuration from the OTP region of the chip. The gphy
13273 * configuration is a 32-bit value that straddles the alignment boundary.
13274 * We do two 32-bit reads and then shift and merge the results.
13276 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13278 u32 bhalf_otp
, thalf_otp
;
13280 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13282 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13285 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13287 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13290 thalf_otp
= tr32(OTP_READ_DATA
);
13292 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13294 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13297 bhalf_otp
= tr32(OTP_READ_DATA
);
13299 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13302 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13304 u32 adv
= ADVERTISED_Autoneg
;
13306 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13307 adv
|= ADVERTISED_1000baseT_Half
|
13308 ADVERTISED_1000baseT_Full
;
13310 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13311 adv
|= ADVERTISED_100baseT_Half
|
13312 ADVERTISED_100baseT_Full
|
13313 ADVERTISED_10baseT_Half
|
13314 ADVERTISED_10baseT_Full
|
13317 adv
|= ADVERTISED_FIBRE
;
13319 tp
->link_config
.advertising
= adv
;
13320 tp
->link_config
.speed
= SPEED_INVALID
;
13321 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13322 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13323 tp
->link_config
.active_speed
= SPEED_INVALID
;
13324 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13325 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13326 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13327 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13330 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13332 u32 hw_phy_id_1
, hw_phy_id_2
;
13333 u32 hw_phy_id
, hw_phy_id_masked
;
13336 /* flow control autonegotiation is default behavior */
13337 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13338 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13340 if (tg3_flag(tp
, USE_PHYLIB
))
13341 return tg3_phy_init(tp
);
13343 /* Reading the PHY ID register can conflict with ASF
13344 * firmware access to the PHY hardware.
13347 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13348 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13350 /* Now read the physical PHY_ID from the chip and verify
13351 * that it is sane. If it doesn't look good, we fall back
13352 * to either the hard-coded table based PHY_ID and failing
13353 * that the value found in the eeprom area.
13355 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13356 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13358 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13359 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13360 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13362 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13365 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13366 tp
->phy_id
= hw_phy_id
;
13367 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13368 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13370 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13372 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13373 /* Do nothing, phy ID already set up in
13374 * tg3_get_eeprom_hw_cfg().
13377 struct subsys_tbl_ent
*p
;
13379 /* No eeprom signature? Try the hardcoded
13380 * subsys device table.
13382 p
= tg3_lookup_by_subsys(tp
);
13386 tp
->phy_id
= p
->phy_id
;
13388 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13389 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13393 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13394 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13395 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13396 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13397 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13398 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13399 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13400 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13402 tg3_phy_init_link_config(tp
);
13404 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13405 !tg3_flag(tp
, ENABLE_APE
) &&
13406 !tg3_flag(tp
, ENABLE_ASF
)) {
13409 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13410 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13411 (bmsr
& BMSR_LSTATUS
))
13412 goto skip_phy_reset
;
13414 err
= tg3_phy_reset(tp
);
13418 tg3_phy_set_wirespeed(tp
);
13420 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
13421 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13422 tp
->link_config
.flowctrl
);
13424 tg3_writephy(tp
, MII_BMCR
,
13425 BMCR_ANENABLE
| BMCR_ANRESTART
);
13430 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13431 err
= tg3_init_5401phy_dsp(tp
);
13435 err
= tg3_init_5401phy_dsp(tp
);
13441 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13444 unsigned int block_end
, rosize
, len
;
13448 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13452 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13454 goto out_not_found
;
13456 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13457 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13458 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13460 if (block_end
> vpdlen
)
13461 goto out_not_found
;
13463 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13464 PCI_VPD_RO_KEYWORD_MFR_ID
);
13466 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13468 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13469 if (j
+ len
> block_end
|| len
!= 4 ||
13470 memcmp(&vpd_data
[j
], "1028", 4))
13473 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13474 PCI_VPD_RO_KEYWORD_VENDOR0
);
13478 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13480 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13481 if (j
+ len
> block_end
)
13484 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13485 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13489 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13490 PCI_VPD_RO_KEYWORD_PARTNO
);
13492 goto out_not_found
;
13494 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13496 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13497 if (len
> TG3_BPN_SIZE
||
13498 (len
+ i
) > vpdlen
)
13499 goto out_not_found
;
13501 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13505 if (tp
->board_part_number
[0])
13509 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13510 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13511 strcpy(tp
->board_part_number
, "BCM5717");
13512 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13513 strcpy(tp
->board_part_number
, "BCM5718");
13516 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13517 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13518 strcpy(tp
->board_part_number
, "BCM57780");
13519 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13520 strcpy(tp
->board_part_number
, "BCM57760");
13521 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13522 strcpy(tp
->board_part_number
, "BCM57790");
13523 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13524 strcpy(tp
->board_part_number
, "BCM57788");
13527 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13528 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13529 strcpy(tp
->board_part_number
, "BCM57761");
13530 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13531 strcpy(tp
->board_part_number
, "BCM57765");
13532 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13533 strcpy(tp
->board_part_number
, "BCM57781");
13534 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13535 strcpy(tp
->board_part_number
, "BCM57785");
13536 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13537 strcpy(tp
->board_part_number
, "BCM57791");
13538 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13539 strcpy(tp
->board_part_number
, "BCM57795");
13542 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
) {
13543 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
13544 strcpy(tp
->board_part_number
, "BCM57762");
13545 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
13546 strcpy(tp
->board_part_number
, "BCM57766");
13547 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
13548 strcpy(tp
->board_part_number
, "BCM57782");
13549 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13550 strcpy(tp
->board_part_number
, "BCM57786");
13553 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13554 strcpy(tp
->board_part_number
, "BCM95906");
13557 strcpy(tp
->board_part_number
, "none");
13561 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13565 if (tg3_nvram_read(tp
, offset
, &val
) ||
13566 (val
& 0xfc000000) != 0x0c000000 ||
13567 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13574 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13576 u32 val
, offset
, start
, ver_offset
;
13578 bool newver
= false;
13580 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13581 tg3_nvram_read(tp
, 0x4, &start
))
13584 offset
= tg3_nvram_logical_addr(tp
, offset
);
13586 if (tg3_nvram_read(tp
, offset
, &val
))
13589 if ((val
& 0xfc000000) == 0x0c000000) {
13590 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13597 dst_off
= strlen(tp
->fw_ver
);
13600 if (TG3_VER_SIZE
- dst_off
< 16 ||
13601 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13604 offset
= offset
+ ver_offset
- start
;
13605 for (i
= 0; i
< 16; i
+= 4) {
13607 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13610 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13615 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13618 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13619 TG3_NVM_BCVER_MAJSFT
;
13620 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13621 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13622 "v%d.%02d", major
, minor
);
13626 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13628 u32 val
, major
, minor
;
13630 /* Use native endian representation */
13631 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13634 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13635 TG3_NVM_HWSB_CFG1_MAJSFT
;
13636 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13637 TG3_NVM_HWSB_CFG1_MINSFT
;
13639 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13642 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13644 u32 offset
, major
, minor
, build
;
13646 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13648 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13651 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13652 case TG3_EEPROM_SB_REVISION_0
:
13653 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13655 case TG3_EEPROM_SB_REVISION_2
:
13656 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13658 case TG3_EEPROM_SB_REVISION_3
:
13659 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13661 case TG3_EEPROM_SB_REVISION_4
:
13662 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13664 case TG3_EEPROM_SB_REVISION_5
:
13665 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13667 case TG3_EEPROM_SB_REVISION_6
:
13668 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13674 if (tg3_nvram_read(tp
, offset
, &val
))
13677 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13678 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13679 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13680 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13681 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13683 if (minor
> 99 || build
> 26)
13686 offset
= strlen(tp
->fw_ver
);
13687 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13688 " v%d.%02d", major
, minor
);
13691 offset
= strlen(tp
->fw_ver
);
13692 if (offset
< TG3_VER_SIZE
- 1)
13693 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13697 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13699 u32 val
, offset
, start
;
13702 for (offset
= TG3_NVM_DIR_START
;
13703 offset
< TG3_NVM_DIR_END
;
13704 offset
+= TG3_NVM_DIRENT_SIZE
) {
13705 if (tg3_nvram_read(tp
, offset
, &val
))
13708 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13712 if (offset
== TG3_NVM_DIR_END
)
13715 if (!tg3_flag(tp
, 5705_PLUS
))
13716 start
= 0x08000000;
13717 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13720 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13721 !tg3_fw_img_is_valid(tp
, offset
) ||
13722 tg3_nvram_read(tp
, offset
+ 8, &val
))
13725 offset
+= val
- start
;
13727 vlen
= strlen(tp
->fw_ver
);
13729 tp
->fw_ver
[vlen
++] = ',';
13730 tp
->fw_ver
[vlen
++] = ' ';
13732 for (i
= 0; i
< 4; i
++) {
13734 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13737 offset
+= sizeof(v
);
13739 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13740 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13744 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13749 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13755 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13758 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13759 if (apedata
!= APE_SEG_SIG_MAGIC
)
13762 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13763 if (!(apedata
& APE_FW_STATUS_READY
))
13766 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13768 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13769 tg3_flag_set(tp
, APE_HAS_NCSI
);
13775 vlen
= strlen(tp
->fw_ver
);
13777 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13779 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13780 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13781 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13782 (apedata
& APE_FW_VERSION_BLDMSK
));
13785 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13788 bool vpd_vers
= false;
13790 if (tp
->fw_ver
[0] != 0)
13793 if (tg3_flag(tp
, NO_NVRAM
)) {
13794 strcat(tp
->fw_ver
, "sb");
13798 if (tg3_nvram_read(tp
, 0, &val
))
13801 if (val
== TG3_EEPROM_MAGIC
)
13802 tg3_read_bc_ver(tp
);
13803 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13804 tg3_read_sb_ver(tp
, val
);
13805 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13806 tg3_read_hwsb_ver(tp
);
13813 if (tg3_flag(tp
, ENABLE_APE
)) {
13814 if (tg3_flag(tp
, ENABLE_ASF
))
13815 tg3_read_dash_ver(tp
);
13816 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13817 tg3_read_mgmtfw_ver(tp
);
13821 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13824 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13826 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13828 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13829 return TG3_RX_RET_MAX_SIZE_5717
;
13830 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13831 return TG3_RX_RET_MAX_SIZE_5700
;
13833 return TG3_RX_RET_MAX_SIZE_5705
;
13836 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13837 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13838 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13839 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13843 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13846 u32 pci_state_reg
, grc_misc_cfg
;
13851 /* Force memory write invalidate off. If we leave it on,
13852 * then on 5700_BX chips we have to enable a workaround.
13853 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13854 * to match the cacheline size. The Broadcom driver have this
13855 * workaround but turns MWI off all the times so never uses
13856 * it. This seems to suggest that the workaround is insufficient.
13858 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13859 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13860 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13862 /* Important! -- Make sure register accesses are byteswapped
13863 * correctly. Also, for those chips that require it, make
13864 * sure that indirect register accesses are enabled before
13865 * the first operation.
13867 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13869 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13870 MISC_HOST_CTRL_CHIPREV
);
13871 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13872 tp
->misc_host_ctrl
);
13874 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13875 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13876 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13877 u32 prod_id_asic_rev
;
13879 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13880 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13881 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13882 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13883 pci_read_config_dword(tp
->pdev
,
13884 TG3PCI_GEN2_PRODID_ASICREV
,
13885 &prod_id_asic_rev
);
13886 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13887 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13888 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13889 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13890 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13891 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
13892 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
13893 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
13894 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
13895 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13896 pci_read_config_dword(tp
->pdev
,
13897 TG3PCI_GEN15_PRODID_ASICREV
,
13898 &prod_id_asic_rev
);
13900 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13901 &prod_id_asic_rev
);
13903 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13906 /* Wrong chip ID in 5752 A0. This code can be removed later
13907 * as A0 is not in production.
13909 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13910 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13912 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13913 * we need to disable memory and use config. cycles
13914 * only to access all registers. The 5702/03 chips
13915 * can mistakenly decode the special cycles from the
13916 * ICH chipsets as memory write cycles, causing corruption
13917 * of register and memory space. Only certain ICH bridges
13918 * will drive special cycles with non-zero data during the
13919 * address phase which can fall within the 5703's address
13920 * range. This is not an ICH bug as the PCI spec allows
13921 * non-zero address during special cycles. However, only
13922 * these ICH bridges are known to drive non-zero addresses
13923 * during special cycles.
13925 * Since special cycles do not cross PCI bridges, we only
13926 * enable this workaround if the 5703 is on the secondary
13927 * bus of these ICH bridges.
13929 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13930 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13931 static struct tg3_dev_id
{
13935 } ich_chipsets
[] = {
13936 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13938 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13940 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13942 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13946 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13947 struct pci_dev
*bridge
= NULL
;
13949 while (pci_id
->vendor
!= 0) {
13950 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13956 if (pci_id
->rev
!= PCI_ANY_ID
) {
13957 if (bridge
->revision
> pci_id
->rev
)
13960 if (bridge
->subordinate
&&
13961 (bridge
->subordinate
->number
==
13962 tp
->pdev
->bus
->number
)) {
13963 tg3_flag_set(tp
, ICH_WORKAROUND
);
13964 pci_dev_put(bridge
);
13970 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13971 static struct tg3_dev_id
{
13974 } bridge_chipsets
[] = {
13975 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13976 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13979 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13980 struct pci_dev
*bridge
= NULL
;
13982 while (pci_id
->vendor
!= 0) {
13983 bridge
= pci_get_device(pci_id
->vendor
,
13990 if (bridge
->subordinate
&&
13991 (bridge
->subordinate
->number
<=
13992 tp
->pdev
->bus
->number
) &&
13993 (bridge
->subordinate
->subordinate
>=
13994 tp
->pdev
->bus
->number
)) {
13995 tg3_flag_set(tp
, 5701_DMA_BUG
);
13996 pci_dev_put(bridge
);
14002 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14003 * DMA addresses > 40-bit. This bridge may have other additional
14004 * 57xx devices behind it in some 4-port NIC designs for example.
14005 * Any tg3 device found behind the bridge will also need the 40-bit
14008 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
14009 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14010 tg3_flag_set(tp
, 5780_CLASS
);
14011 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14012 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
14014 struct pci_dev
*bridge
= NULL
;
14017 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
14018 PCI_DEVICE_ID_SERVERWORKS_EPB
,
14020 if (bridge
&& bridge
->subordinate
&&
14021 (bridge
->subordinate
->number
<=
14022 tp
->pdev
->bus
->number
) &&
14023 (bridge
->subordinate
->subordinate
>=
14024 tp
->pdev
->bus
->number
)) {
14025 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14026 pci_dev_put(bridge
);
14032 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14033 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14034 tp
->pdev_peer
= tg3_find_peer(tp
);
14036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14037 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14038 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14039 tg3_flag_set(tp
, 5717_PLUS
);
14041 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
14042 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14043 tg3_flag_set(tp
, 57765_CLASS
);
14045 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
))
14046 tg3_flag_set(tp
, 57765_PLUS
);
14048 /* Intentionally exclude ASIC_REV_5906 */
14049 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14050 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14051 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14052 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14053 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14054 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14055 tg3_flag(tp
, 57765_PLUS
))
14056 tg3_flag_set(tp
, 5755_PLUS
);
14058 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14059 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14060 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
14061 tg3_flag(tp
, 5755_PLUS
) ||
14062 tg3_flag(tp
, 5780_CLASS
))
14063 tg3_flag_set(tp
, 5750_PLUS
);
14065 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14066 tg3_flag(tp
, 5750_PLUS
))
14067 tg3_flag_set(tp
, 5705_PLUS
);
14069 /* Determine TSO capabilities */
14070 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
14071 ; /* Do nothing. HW bug. */
14072 else if (tg3_flag(tp
, 57765_PLUS
))
14073 tg3_flag_set(tp
, HW_TSO_3
);
14074 else if (tg3_flag(tp
, 5755_PLUS
) ||
14075 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14076 tg3_flag_set(tp
, HW_TSO_2
);
14077 else if (tg3_flag(tp
, 5750_PLUS
)) {
14078 tg3_flag_set(tp
, HW_TSO_1
);
14079 tg3_flag_set(tp
, TSO_BUG
);
14080 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
14081 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
14082 tg3_flag_clear(tp
, TSO_BUG
);
14083 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14084 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14085 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
14086 tg3_flag_set(tp
, TSO_BUG
);
14087 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
14088 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
14090 tp
->fw_needed
= FIRMWARE_TG3TSO
;
14093 /* Selectively allow TSO based on operating conditions */
14094 if (tg3_flag(tp
, HW_TSO_1
) ||
14095 tg3_flag(tp
, HW_TSO_2
) ||
14096 tg3_flag(tp
, HW_TSO_3
) ||
14098 /* For firmware TSO, assume ASF is disabled.
14099 * We'll disable TSO later if we discover ASF
14100 * is enabled in tg3_get_eeprom_hw_cfg().
14102 tg3_flag_set(tp
, TSO_CAPABLE
);
14104 tg3_flag_clear(tp
, TSO_CAPABLE
);
14105 tg3_flag_clear(tp
, TSO_BUG
);
14106 tp
->fw_needed
= NULL
;
14109 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14110 tp
->fw_needed
= FIRMWARE_TG3
;
14114 if (tg3_flag(tp
, 5750_PLUS
)) {
14115 tg3_flag_set(tp
, SUPPORT_MSI
);
14116 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14117 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14118 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14119 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14120 tp
->pdev_peer
== tp
->pdev
))
14121 tg3_flag_clear(tp
, SUPPORT_MSI
);
14123 if (tg3_flag(tp
, 5755_PLUS
) ||
14124 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14125 tg3_flag_set(tp
, 1SHOT_MSI
);
14128 if (tg3_flag(tp
, 57765_PLUS
)) {
14129 tg3_flag_set(tp
, SUPPORT_MSIX
);
14130 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14131 tg3_rss_init_dflt_indir_tbl(tp
);
14135 if (tg3_flag(tp
, 5755_PLUS
))
14136 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14138 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14139 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
14140 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14141 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
14143 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14144 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14145 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14146 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14148 if (tg3_flag(tp
, 57765_PLUS
) &&
14149 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14150 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14152 if (!tg3_flag(tp
, 5705_PLUS
) ||
14153 tg3_flag(tp
, 5780_CLASS
) ||
14154 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14155 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14157 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14160 if (pci_is_pcie(tp
->pdev
)) {
14163 tg3_flag_set(tp
, PCI_EXPRESS
);
14165 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
) {
14166 int readrq
= pcie_get_readrq(tp
->pdev
);
14168 pcie_set_readrq(tp
->pdev
, 2048);
14171 pci_read_config_word(tp
->pdev
,
14172 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
14174 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14175 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14177 tg3_flag_clear(tp
, HW_TSO_2
);
14178 tg3_flag_clear(tp
, TSO_CAPABLE
);
14180 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14181 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14182 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14183 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14184 tg3_flag_set(tp
, CLKREQ_BUG
);
14185 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14186 tg3_flag_set(tp
, L1PLLPD_EN
);
14188 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14189 /* BCM5785 devices are effectively PCIe devices, and should
14190 * follow PCIe codepaths, but do not have a PCIe capabilities
14193 tg3_flag_set(tp
, PCI_EXPRESS
);
14194 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14195 tg3_flag(tp
, 5780_CLASS
)) {
14196 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14197 if (!tp
->pcix_cap
) {
14198 dev_err(&tp
->pdev
->dev
,
14199 "Cannot find PCI-X capability, aborting\n");
14203 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14204 tg3_flag_set(tp
, PCIX_MODE
);
14207 /* If we have an AMD 762 or VIA K8T800 chipset, write
14208 * reordering to the mailbox registers done by the host
14209 * controller can cause major troubles. We read back from
14210 * every mailbox register write to force the writes to be
14211 * posted to the chip in order.
14213 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14214 !tg3_flag(tp
, PCI_EXPRESS
))
14215 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14217 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14218 &tp
->pci_cacheline_sz
);
14219 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14220 &tp
->pci_lat_timer
);
14221 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14222 tp
->pci_lat_timer
< 64) {
14223 tp
->pci_lat_timer
= 64;
14224 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14225 tp
->pci_lat_timer
);
14228 /* Important! -- It is critical that the PCI-X hw workaround
14229 * situation is decided before the first MMIO register access.
14231 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14232 /* 5700 BX chips need to have their TX producer index
14233 * mailboxes written twice to workaround a bug.
14235 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14237 /* If we are in PCI-X mode, enable register write workaround.
14239 * The workaround is to use indirect register accesses
14240 * for all chip writes not to mailbox registers.
14242 if (tg3_flag(tp
, PCIX_MODE
)) {
14245 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14247 /* The chip can have it's power management PCI config
14248 * space registers clobbered due to this bug.
14249 * So explicitly force the chip into D0 here.
14251 pci_read_config_dword(tp
->pdev
,
14252 tp
->pm_cap
+ PCI_PM_CTRL
,
14254 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14255 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14256 pci_write_config_dword(tp
->pdev
,
14257 tp
->pm_cap
+ PCI_PM_CTRL
,
14260 /* Also, force SERR#/PERR# in PCI command. */
14261 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14262 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14263 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14267 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14268 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14269 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14270 tg3_flag_set(tp
, PCI_32BIT
);
14272 /* Chip-specific fixup from Broadcom driver */
14273 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14274 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14275 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14276 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14279 /* Default fast path register access methods */
14280 tp
->read32
= tg3_read32
;
14281 tp
->write32
= tg3_write32
;
14282 tp
->read32_mbox
= tg3_read32
;
14283 tp
->write32_mbox
= tg3_write32
;
14284 tp
->write32_tx_mbox
= tg3_write32
;
14285 tp
->write32_rx_mbox
= tg3_write32
;
14287 /* Various workaround register access methods */
14288 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14289 tp
->write32
= tg3_write_indirect_reg32
;
14290 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14291 (tg3_flag(tp
, PCI_EXPRESS
) &&
14292 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14294 * Back to back register writes can cause problems on these
14295 * chips, the workaround is to read back all reg writes
14296 * except those to mailbox regs.
14298 * See tg3_write_indirect_reg32().
14300 tp
->write32
= tg3_write_flush_reg32
;
14303 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14304 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14305 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14306 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14309 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14310 tp
->read32
= tg3_read_indirect_reg32
;
14311 tp
->write32
= tg3_write_indirect_reg32
;
14312 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14313 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14314 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14315 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14320 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14321 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14322 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14324 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14325 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14326 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14327 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14328 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14331 if (tp
->write32
== tg3_write_indirect_reg32
||
14332 (tg3_flag(tp
, PCIX_MODE
) &&
14333 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14334 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14335 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14337 /* The memory arbiter has to be enabled in order for SRAM accesses
14338 * to succeed. Normally on powerup the tg3 chip firmware will make
14339 * sure it is enabled, but other entities such as system netboot
14340 * code might disable it.
14342 val
= tr32(MEMARB_MODE
);
14343 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14345 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14346 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14347 tg3_flag(tp
, 5780_CLASS
)) {
14348 if (tg3_flag(tp
, PCIX_MODE
)) {
14349 pci_read_config_dword(tp
->pdev
,
14350 tp
->pcix_cap
+ PCI_X_STATUS
,
14352 tp
->pci_fn
= val
& 0x7;
14354 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14355 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14356 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14357 NIC_SRAM_CPMUSTAT_SIG
) {
14358 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14359 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14361 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14362 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14363 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14364 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14365 NIC_SRAM_CPMUSTAT_SIG
) {
14366 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14367 TG3_CPMU_STATUS_FSHFT_5719
;
14371 /* Get eeprom hw config before calling tg3_set_power_state().
14372 * In particular, the TG3_FLAG_IS_NIC flag must be
14373 * determined before calling tg3_set_power_state() so that
14374 * we know whether or not to switch out of Vaux power.
14375 * When the flag is set, it means that GPIO1 is used for eeprom
14376 * write protect and also implies that it is a LOM where GPIOs
14377 * are not used to switch power.
14379 tg3_get_eeprom_hw_cfg(tp
);
14381 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
14382 tg3_flag_clear(tp
, TSO_CAPABLE
);
14383 tg3_flag_clear(tp
, TSO_BUG
);
14384 tp
->fw_needed
= NULL
;
14387 if (tg3_flag(tp
, ENABLE_APE
)) {
14388 /* Allow reads and writes to the
14389 * APE register and memory space.
14391 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14392 PCISTATE_ALLOW_APE_SHMEM_WR
|
14393 PCISTATE_ALLOW_APE_PSPACE_WR
;
14394 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14397 tg3_ape_lock_init(tp
);
14400 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14401 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14402 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14403 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14404 tg3_flag(tp
, 57765_PLUS
))
14405 tg3_flag_set(tp
, CPMU_PRESENT
);
14407 /* Set up tp->grc_local_ctrl before calling
14408 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14409 * will bring 5700's external PHY out of reset.
14410 * It is also used as eeprom write protect on LOMs.
14412 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14413 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14414 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14415 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14416 GRC_LCLCTRL_GPIO_OUTPUT1
);
14417 /* Unused GPIO3 must be driven as output on 5752 because there
14418 * are no pull-up resistors on unused GPIO pins.
14420 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14421 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14423 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14424 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14425 tg3_flag(tp
, 57765_CLASS
))
14426 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14428 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14429 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14430 /* Turn off the debug UART. */
14431 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14432 if (tg3_flag(tp
, IS_NIC
))
14433 /* Keep VMain power. */
14434 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14435 GRC_LCLCTRL_GPIO_OUTPUT0
;
14438 /* Switch out of Vaux if it is a NIC */
14439 tg3_pwrsrc_switch_to_vmain(tp
);
14441 /* Derive initial jumbo mode from MTU assigned in
14442 * ether_setup() via the alloc_etherdev() call
14444 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14445 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14447 /* Determine WakeOnLan speed to use. */
14448 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14449 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14450 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14451 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14452 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14454 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14457 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14458 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14460 /* A few boards don't want Ethernet@WireSpeed phy feature */
14461 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14462 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14463 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14464 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14465 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14466 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14467 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14469 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14470 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14471 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14472 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14473 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14475 if (tg3_flag(tp
, 5705_PLUS
) &&
14476 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14477 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14478 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14479 !tg3_flag(tp
, 57765_PLUS
)) {
14480 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14481 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14482 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14483 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14484 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14485 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14486 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14487 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14488 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14490 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14493 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14494 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14495 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14496 if (tp
->phy_otp
== 0)
14497 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14500 if (tg3_flag(tp
, CPMU_PRESENT
))
14501 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14503 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14505 tp
->coalesce_mode
= 0;
14506 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14507 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14508 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14510 /* Set these bits to enable statistics workaround. */
14511 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14512 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14513 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14514 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14515 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14518 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14519 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14520 tg3_flag_set(tp
, USE_PHYLIB
);
14522 err
= tg3_mdio_init(tp
);
14526 /* Initialize data/descriptor byte/word swapping. */
14527 val
= tr32(GRC_MODE
);
14528 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14529 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14530 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14531 GRC_MODE_B2HRX_ENABLE
|
14532 GRC_MODE_HTX2B_ENABLE
|
14533 GRC_MODE_HOST_STACKUP
);
14535 val
&= GRC_MODE_HOST_STACKUP
;
14537 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14539 tg3_switch_clocks(tp
);
14541 /* Clear this out for sanity. */
14542 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14544 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14546 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14547 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14548 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14550 if (chiprevid
== CHIPREV_ID_5701_A0
||
14551 chiprevid
== CHIPREV_ID_5701_B0
||
14552 chiprevid
== CHIPREV_ID_5701_B2
||
14553 chiprevid
== CHIPREV_ID_5701_B5
) {
14554 void __iomem
*sram_base
;
14556 /* Write some dummy words into the SRAM status block
14557 * area, see if it reads back correctly. If the return
14558 * value is bad, force enable the PCIX workaround.
14560 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14562 writel(0x00000000, sram_base
);
14563 writel(0x00000000, sram_base
+ 4);
14564 writel(0xffffffff, sram_base
+ 4);
14565 if (readl(sram_base
) != 0x00000000)
14566 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14571 tg3_nvram_init(tp
);
14573 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14574 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14576 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14577 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14578 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14579 tg3_flag_set(tp
, IS_5788
);
14581 if (!tg3_flag(tp
, IS_5788
) &&
14582 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14583 tg3_flag_set(tp
, TAGGED_STATUS
);
14584 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14585 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14586 HOSTCC_MODE_CLRTICK_TXBD
);
14588 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14589 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14590 tp
->misc_host_ctrl
);
14593 /* Preserve the APE MAC_MODE bits */
14594 if (tg3_flag(tp
, ENABLE_APE
))
14595 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14599 /* these are limited to 10/100 only */
14600 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14601 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14602 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14603 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14604 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14605 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14606 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14607 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14608 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14609 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14610 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14611 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14612 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14613 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14614 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14615 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14617 err
= tg3_phy_probe(tp
);
14619 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14620 /* ... but do not return immediately ... */
14625 tg3_read_fw_ver(tp
);
14627 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14628 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14630 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14631 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14633 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14636 /* 5700 {AX,BX} chips have a broken status block link
14637 * change bit implementation, so we must use the
14638 * status register in those cases.
14640 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14641 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14643 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14645 /* The led_ctrl is set during tg3_phy_probe, here we might
14646 * have to force the link status polling mechanism based
14647 * upon subsystem IDs.
14649 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14650 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14651 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14652 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14653 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14656 /* For all SERDES we poll the MAC status register. */
14657 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14658 tg3_flag_set(tp
, POLL_SERDES
);
14660 tg3_flag_clear(tp
, POLL_SERDES
);
14662 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
14663 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14664 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14665 tg3_flag(tp
, PCIX_MODE
)) {
14666 tp
->rx_offset
= NET_SKB_PAD
;
14667 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14668 tp
->rx_copy_thresh
= ~(u16
)0;
14672 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14673 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14674 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14676 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14678 /* Increment the rx prod index on the rx std ring by at most
14679 * 8 for these chips to workaround hw errata.
14681 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14682 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14683 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14684 tp
->rx_std_max_post
= 8;
14686 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14687 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14688 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14693 #ifdef CONFIG_SPARC
14694 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14696 struct net_device
*dev
= tp
->dev
;
14697 struct pci_dev
*pdev
= tp
->pdev
;
14698 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14699 const unsigned char *addr
;
14702 addr
= of_get_property(dp
, "local-mac-address", &len
);
14703 if (addr
&& len
== 6) {
14704 memcpy(dev
->dev_addr
, addr
, 6);
14705 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14711 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14713 struct net_device
*dev
= tp
->dev
;
14715 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14716 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14721 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14723 struct net_device
*dev
= tp
->dev
;
14724 u32 hi
, lo
, mac_offset
;
14727 #ifdef CONFIG_SPARC
14728 if (!tg3_get_macaddr_sparc(tp
))
14733 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14734 tg3_flag(tp
, 5780_CLASS
)) {
14735 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14737 if (tg3_nvram_lock(tp
))
14738 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14740 tg3_nvram_unlock(tp
);
14741 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14742 if (tp
->pci_fn
& 1)
14744 if (tp
->pci_fn
> 1)
14745 mac_offset
+= 0x18c;
14746 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14749 /* First try to get it from MAC address mailbox. */
14750 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14751 if ((hi
>> 16) == 0x484b) {
14752 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14753 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14755 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14756 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14757 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14758 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14759 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14761 /* Some old bootcode may report a 0 MAC address in SRAM */
14762 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14765 /* Next, try NVRAM. */
14766 if (!tg3_flag(tp
, NO_NVRAM
) &&
14767 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14768 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14769 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14770 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14772 /* Finally just fetch it out of the MAC control regs. */
14774 hi
= tr32(MAC_ADDR_0_HIGH
);
14775 lo
= tr32(MAC_ADDR_0_LOW
);
14777 dev
->dev_addr
[5] = lo
& 0xff;
14778 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14779 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14780 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14781 dev
->dev_addr
[1] = hi
& 0xff;
14782 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14786 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14787 #ifdef CONFIG_SPARC
14788 if (!tg3_get_default_macaddr_sparc(tp
))
14793 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14797 #define BOUNDARY_SINGLE_CACHELINE 1
14798 #define BOUNDARY_MULTI_CACHELINE 2
14800 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14802 int cacheline_size
;
14806 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14808 cacheline_size
= 1024;
14810 cacheline_size
= (int) byte
* 4;
14812 /* On 5703 and later chips, the boundary bits have no
14815 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14816 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14817 !tg3_flag(tp
, PCI_EXPRESS
))
14820 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14821 goal
= BOUNDARY_MULTI_CACHELINE
;
14823 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14824 goal
= BOUNDARY_SINGLE_CACHELINE
;
14830 if (tg3_flag(tp
, 57765_PLUS
)) {
14831 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14838 /* PCI controllers on most RISC systems tend to disconnect
14839 * when a device tries to burst across a cache-line boundary.
14840 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14842 * Unfortunately, for PCI-E there are only limited
14843 * write-side controls for this, and thus for reads
14844 * we will still get the disconnects. We'll also waste
14845 * these PCI cycles for both read and write for chips
14846 * other than 5700 and 5701 which do not implement the
14849 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14850 switch (cacheline_size
) {
14855 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14856 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14857 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14859 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14860 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14865 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14866 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14870 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14871 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14874 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14875 switch (cacheline_size
) {
14879 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14880 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14881 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14887 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14888 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14892 switch (cacheline_size
) {
14894 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14895 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14896 DMA_RWCTRL_WRITE_BNDRY_16
);
14901 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14902 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14903 DMA_RWCTRL_WRITE_BNDRY_32
);
14908 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14909 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14910 DMA_RWCTRL_WRITE_BNDRY_64
);
14915 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14916 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14917 DMA_RWCTRL_WRITE_BNDRY_128
);
14922 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14923 DMA_RWCTRL_WRITE_BNDRY_256
);
14926 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14927 DMA_RWCTRL_WRITE_BNDRY_512
);
14931 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14932 DMA_RWCTRL_WRITE_BNDRY_1024
);
14941 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14943 struct tg3_internal_buffer_desc test_desc
;
14944 u32 sram_dma_descs
;
14947 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14949 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14950 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14951 tw32(RDMAC_STATUS
, 0);
14952 tw32(WDMAC_STATUS
, 0);
14954 tw32(BUFMGR_MODE
, 0);
14955 tw32(FTQ_RESET
, 0);
14957 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14958 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14959 test_desc
.nic_mbuf
= 0x00002100;
14960 test_desc
.len
= size
;
14963 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14964 * the *second* time the tg3 driver was getting loaded after an
14967 * Broadcom tells me:
14968 * ...the DMA engine is connected to the GRC block and a DMA
14969 * reset may affect the GRC block in some unpredictable way...
14970 * The behavior of resets to individual blocks has not been tested.
14972 * Broadcom noted the GRC reset will also reset all sub-components.
14975 test_desc
.cqid_sqid
= (13 << 8) | 2;
14977 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14980 test_desc
.cqid_sqid
= (16 << 8) | 7;
14982 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14985 test_desc
.flags
= 0x00000005;
14987 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14990 val
= *(((u32
*)&test_desc
) + i
);
14991 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14992 sram_dma_descs
+ (i
* sizeof(u32
)));
14993 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14995 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14998 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
15000 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
15003 for (i
= 0; i
< 40; i
++) {
15007 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
15009 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
15010 if ((val
& 0xffff) == sram_dma_descs
) {
15021 #define TEST_BUFFER_SIZE 0x2000
15023 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
15024 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
15028 static int __devinit
tg3_test_dma(struct tg3
*tp
)
15030 dma_addr_t buf_dma
;
15031 u32
*buf
, saved_dma_rwctrl
;
15034 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
15035 &buf_dma
, GFP_KERNEL
);
15041 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
15042 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
15044 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
15046 if (tg3_flag(tp
, 57765_PLUS
))
15049 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15050 /* DMA read watermark not used on PCIE */
15051 tp
->dma_rwctrl
|= 0x00180000;
15052 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
15053 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
15054 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
15055 tp
->dma_rwctrl
|= 0x003f0000;
15057 tp
->dma_rwctrl
|= 0x003f000f;
15059 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15060 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
15061 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
15062 u32 read_water
= 0x7;
15064 /* If the 5704 is behind the EPB bridge, we can
15065 * do the less restrictive ONE_DMA workaround for
15066 * better performance.
15068 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
15069 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15070 tp
->dma_rwctrl
|= 0x8000;
15071 else if (ccval
== 0x6 || ccval
== 0x7)
15072 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
15074 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
15076 /* Set bit 23 to enable PCIX hw bug fix */
15078 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
15079 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
15081 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
15082 /* 5780 always in PCIX mode */
15083 tp
->dma_rwctrl
|= 0x00144000;
15084 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
15085 /* 5714 always in PCIX mode */
15086 tp
->dma_rwctrl
|= 0x00148000;
15088 tp
->dma_rwctrl
|= 0x001b000f;
15092 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15093 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15094 tp
->dma_rwctrl
&= 0xfffffff0;
15096 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15097 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
15098 /* Remove this if it causes problems for some boards. */
15099 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
15101 /* On 5700/5701 chips, we need to set this bit.
15102 * Otherwise the chip will issue cacheline transactions
15103 * to streamable DMA memory with not all the byte
15104 * enables turned on. This is an error on several
15105 * RISC PCI controllers, in particular sparc64.
15107 * On 5703/5704 chips, this bit has been reassigned
15108 * a different meaning. In particular, it is used
15109 * on those chips to enable a PCI-X workaround.
15111 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15114 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15117 /* Unneeded, already done by tg3_get_invariants. */
15118 tg3_switch_clocks(tp
);
15121 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15122 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15125 /* It is best to perform DMA test with maximum write burst size
15126 * to expose the 5700/5701 write DMA bug.
15128 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15129 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15130 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15135 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15138 /* Send the buffer to the chip. */
15139 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15141 dev_err(&tp
->pdev
->dev
,
15142 "%s: Buffer write failed. err = %d\n",
15148 /* validate data reached card RAM correctly. */
15149 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15151 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15152 if (le32_to_cpu(val
) != p
[i
]) {
15153 dev_err(&tp
->pdev
->dev
,
15154 "%s: Buffer corrupted on device! "
15155 "(%d != %d)\n", __func__
, val
, i
);
15156 /* ret = -ENODEV here? */
15161 /* Now read it back. */
15162 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15164 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15165 "err = %d\n", __func__
, ret
);
15170 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15174 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15175 DMA_RWCTRL_WRITE_BNDRY_16
) {
15176 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15177 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15178 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15181 dev_err(&tp
->pdev
->dev
,
15182 "%s: Buffer corrupted on read back! "
15183 "(%d != %d)\n", __func__
, p
[i
], i
);
15189 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15195 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15196 DMA_RWCTRL_WRITE_BNDRY_16
) {
15197 /* DMA test passed without adjusting DMA boundary,
15198 * now look for chipsets that are known to expose the
15199 * DMA bug without failing the test.
15201 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15202 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15203 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15205 /* Safe to use the calculated DMA boundary. */
15206 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15209 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15213 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15218 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15220 if (tg3_flag(tp
, 57765_PLUS
)) {
15221 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15222 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15223 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15224 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15225 tp
->bufmgr_config
.mbuf_high_water
=
15226 DEFAULT_MB_HIGH_WATER_57765
;
15228 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15229 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15230 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15231 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15232 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15233 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15234 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15235 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15236 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15237 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15238 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15239 tp
->bufmgr_config
.mbuf_high_water
=
15240 DEFAULT_MB_HIGH_WATER_5705
;
15241 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15242 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15243 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15244 tp
->bufmgr_config
.mbuf_high_water
=
15245 DEFAULT_MB_HIGH_WATER_5906
;
15248 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15249 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15250 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15251 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15252 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15253 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15255 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15256 DEFAULT_MB_RDMA_LOW_WATER
;
15257 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15258 DEFAULT_MB_MACRX_LOW_WATER
;
15259 tp
->bufmgr_config
.mbuf_high_water
=
15260 DEFAULT_MB_HIGH_WATER
;
15262 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15263 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15264 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15265 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15266 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15267 DEFAULT_MB_HIGH_WATER_JUMBO
;
15270 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15271 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15274 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15276 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15277 case TG3_PHY_ID_BCM5400
: return "5400";
15278 case TG3_PHY_ID_BCM5401
: return "5401";
15279 case TG3_PHY_ID_BCM5411
: return "5411";
15280 case TG3_PHY_ID_BCM5701
: return "5701";
15281 case TG3_PHY_ID_BCM5703
: return "5703";
15282 case TG3_PHY_ID_BCM5704
: return "5704";
15283 case TG3_PHY_ID_BCM5705
: return "5705";
15284 case TG3_PHY_ID_BCM5750
: return "5750";
15285 case TG3_PHY_ID_BCM5752
: return "5752";
15286 case TG3_PHY_ID_BCM5714
: return "5714";
15287 case TG3_PHY_ID_BCM5780
: return "5780";
15288 case TG3_PHY_ID_BCM5755
: return "5755";
15289 case TG3_PHY_ID_BCM5787
: return "5787";
15290 case TG3_PHY_ID_BCM5784
: return "5784";
15291 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15292 case TG3_PHY_ID_BCM5906
: return "5906";
15293 case TG3_PHY_ID_BCM5761
: return "5761";
15294 case TG3_PHY_ID_BCM5718C
: return "5718C";
15295 case TG3_PHY_ID_BCM5718S
: return "5718S";
15296 case TG3_PHY_ID_BCM57765
: return "57765";
15297 case TG3_PHY_ID_BCM5719C
: return "5719C";
15298 case TG3_PHY_ID_BCM5720C
: return "5720C";
15299 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15300 case 0: return "serdes";
15301 default: return "unknown";
15305 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15307 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15308 strcpy(str
, "PCI Express");
15310 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15311 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15313 strcpy(str
, "PCIX:");
15315 if ((clock_ctrl
== 7) ||
15316 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15317 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15318 strcat(str
, "133MHz");
15319 else if (clock_ctrl
== 0)
15320 strcat(str
, "33MHz");
15321 else if (clock_ctrl
== 2)
15322 strcat(str
, "50MHz");
15323 else if (clock_ctrl
== 4)
15324 strcat(str
, "66MHz");
15325 else if (clock_ctrl
== 6)
15326 strcat(str
, "100MHz");
15328 strcpy(str
, "PCI:");
15329 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15330 strcat(str
, "66MHz");
15332 strcat(str
, "33MHz");
15334 if (tg3_flag(tp
, PCI_32BIT
))
15335 strcat(str
, ":32-bit");
15337 strcat(str
, ":64-bit");
15341 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15343 struct pci_dev
*peer
;
15344 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15346 for (func
= 0; func
< 8; func
++) {
15347 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15348 if (peer
&& peer
!= tp
->pdev
)
15352 /* 5704 can be configured in single-port mode, set peer to
15353 * tp->pdev in that case.
15361 * We don't need to keep the refcount elevated; there's no way
15362 * to remove one half of this device without removing the other
15369 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15371 struct ethtool_coalesce
*ec
= &tp
->coal
;
15373 memset(ec
, 0, sizeof(*ec
));
15374 ec
->cmd
= ETHTOOL_GCOALESCE
;
15375 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15376 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15377 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15378 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15379 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15380 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15381 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15382 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15383 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15385 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15386 HOSTCC_MODE_CLRTICK_TXBD
)) {
15387 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15388 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15389 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15390 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15393 if (tg3_flag(tp
, 5705_PLUS
)) {
15394 ec
->rx_coalesce_usecs_irq
= 0;
15395 ec
->tx_coalesce_usecs_irq
= 0;
15396 ec
->stats_block_coalesce_usecs
= 0;
15400 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
15401 struct rtnl_link_stats64
*stats
)
15403 struct tg3
*tp
= netdev_priv(dev
);
15406 return &tp
->net_stats_prev
;
15408 spin_lock_bh(&tp
->lock
);
15409 tg3_get_nstats(tp
, stats
);
15410 spin_unlock_bh(&tp
->lock
);
15415 static const struct net_device_ops tg3_netdev_ops
= {
15416 .ndo_open
= tg3_open
,
15417 .ndo_stop
= tg3_close
,
15418 .ndo_start_xmit
= tg3_start_xmit
,
15419 .ndo_get_stats64
= tg3_get_stats64
,
15420 .ndo_validate_addr
= eth_validate_addr
,
15421 .ndo_set_rx_mode
= tg3_set_rx_mode
,
15422 .ndo_set_mac_address
= tg3_set_mac_addr
,
15423 .ndo_do_ioctl
= tg3_ioctl
,
15424 .ndo_tx_timeout
= tg3_tx_timeout
,
15425 .ndo_change_mtu
= tg3_change_mtu
,
15426 .ndo_fix_features
= tg3_fix_features
,
15427 .ndo_set_features
= tg3_set_features
,
15428 #ifdef CONFIG_NET_POLL_CONTROLLER
15429 .ndo_poll_controller
= tg3_poll_controller
,
15433 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15434 const struct pci_device_id
*ent
)
15436 struct net_device
*dev
;
15438 int i
, err
, pm_cap
;
15439 u32 sndmbx
, rcvmbx
, intmbx
;
15441 u64 dma_mask
, persist_dma_mask
;
15442 netdev_features_t features
= 0;
15444 printk_once(KERN_INFO
"%s\n", version
);
15446 err
= pci_enable_device(pdev
);
15448 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15452 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15454 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15455 goto err_out_disable_pdev
;
15458 pci_set_master(pdev
);
15460 /* Find power-management capability. */
15461 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15463 dev_err(&pdev
->dev
,
15464 "Cannot find Power Management capability, aborting\n");
15466 goto err_out_free_res
;
15469 err
= pci_set_power_state(pdev
, PCI_D0
);
15471 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15472 goto err_out_free_res
;
15475 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15477 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15479 goto err_out_power_down
;
15482 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15484 tp
= netdev_priv(dev
);
15487 tp
->pm_cap
= pm_cap
;
15488 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15489 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15492 tp
->msg_enable
= tg3_debug
;
15494 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15496 /* The word/byte swap controls here control register access byte
15497 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15500 tp
->misc_host_ctrl
=
15501 MISC_HOST_CTRL_MASK_PCI_INT
|
15502 MISC_HOST_CTRL_WORD_SWAP
|
15503 MISC_HOST_CTRL_INDIR_ACCESS
|
15504 MISC_HOST_CTRL_PCISTATE_RW
;
15506 /* The NONFRM (non-frame) byte/word swap controls take effect
15507 * on descriptor entries, anything which isn't packet data.
15509 * The StrongARM chips on the board (one for tx, one for rx)
15510 * are running in big-endian mode.
15512 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15513 GRC_MODE_WSWAP_NONFRM_DATA
);
15514 #ifdef __BIG_ENDIAN
15515 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15517 spin_lock_init(&tp
->lock
);
15518 spin_lock_init(&tp
->indirect_lock
);
15519 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15521 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15523 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15525 goto err_out_free_dev
;
15528 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15529 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15530 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15531 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15532 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15533 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15534 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15535 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15536 tg3_flag_set(tp
, ENABLE_APE
);
15537 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15538 if (!tp
->aperegs
) {
15539 dev_err(&pdev
->dev
,
15540 "Cannot map APE registers, aborting\n");
15542 goto err_out_iounmap
;
15546 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15547 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15549 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15550 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15551 dev
->netdev_ops
= &tg3_netdev_ops
;
15552 dev
->irq
= pdev
->irq
;
15554 err
= tg3_get_invariants(tp
);
15556 dev_err(&pdev
->dev
,
15557 "Problem fetching invariants of chip, aborting\n");
15558 goto err_out_apeunmap
;
15561 /* The EPB bridge inside 5714, 5715, and 5780 and any
15562 * device behind the EPB cannot support DMA addresses > 40-bit.
15563 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15564 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15565 * do DMA address check in tg3_start_xmit().
15567 if (tg3_flag(tp
, IS_5788
))
15568 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15569 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15570 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15571 #ifdef CONFIG_HIGHMEM
15572 dma_mask
= DMA_BIT_MASK(64);
15575 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15577 /* Configure DMA attributes. */
15578 if (dma_mask
> DMA_BIT_MASK(32)) {
15579 err
= pci_set_dma_mask(pdev
, dma_mask
);
15581 features
|= NETIF_F_HIGHDMA
;
15582 err
= pci_set_consistent_dma_mask(pdev
,
15585 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15586 "DMA for consistent allocations\n");
15587 goto err_out_apeunmap
;
15591 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15592 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15594 dev_err(&pdev
->dev
,
15595 "No usable DMA configuration, aborting\n");
15596 goto err_out_apeunmap
;
15600 tg3_init_bufmgr_config(tp
);
15602 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15604 /* 5700 B0 chips do not support checksumming correctly due
15605 * to hardware bugs.
15607 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15608 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15610 if (tg3_flag(tp
, 5755_PLUS
))
15611 features
|= NETIF_F_IPV6_CSUM
;
15614 /* TSO is on by default on chips that support hardware TSO.
15615 * Firmware TSO on older chips gives lower performance, so it
15616 * is off by default, but can be enabled using ethtool.
15618 if ((tg3_flag(tp
, HW_TSO_1
) ||
15619 tg3_flag(tp
, HW_TSO_2
) ||
15620 tg3_flag(tp
, HW_TSO_3
)) &&
15621 (features
& NETIF_F_IP_CSUM
))
15622 features
|= NETIF_F_TSO
;
15623 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15624 if (features
& NETIF_F_IPV6_CSUM
)
15625 features
|= NETIF_F_TSO6
;
15626 if (tg3_flag(tp
, HW_TSO_3
) ||
15627 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15628 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15629 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15630 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15631 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15632 features
|= NETIF_F_TSO_ECN
;
15635 dev
->features
|= features
;
15636 dev
->vlan_features
|= features
;
15639 * Add loopback capability only for a subset of devices that support
15640 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15641 * loopback for the remaining devices.
15643 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15644 !tg3_flag(tp
, CPMU_PRESENT
))
15645 /* Add the loopback capability */
15646 features
|= NETIF_F_LOOPBACK
;
15648 dev
->hw_features
|= features
;
15650 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15651 !tg3_flag(tp
, TSO_CAPABLE
) &&
15652 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15653 tg3_flag_set(tp
, MAX_RXPEND_64
);
15654 tp
->rx_pending
= 63;
15657 err
= tg3_get_device_address(tp
);
15659 dev_err(&pdev
->dev
,
15660 "Could not obtain valid ethernet address, aborting\n");
15661 goto err_out_apeunmap
;
15665 * Reset chip in case UNDI or EFI driver did not shutdown
15666 * DMA self test will enable WDMAC and we'll see (spurious)
15667 * pending DMA on the PCI bus at that point.
15669 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15670 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15671 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15672 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15675 err
= tg3_test_dma(tp
);
15677 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15678 goto err_out_apeunmap
;
15681 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15682 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15683 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15684 for (i
= 0; i
< tp
->irq_max
; i
++) {
15685 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15688 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15690 tnapi
->int_mbox
= intmbx
;
15696 tnapi
->consmbox
= rcvmbx
;
15697 tnapi
->prodmbox
= sndmbx
;
15700 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15702 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15704 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15708 * If we support MSIX, we'll be using RSS. If we're using
15709 * RSS, the first vector only handles link interrupts and the
15710 * remaining vectors handle rx and tx interrupts. Reuse the
15711 * mailbox values for the next iteration. The values we setup
15712 * above are still useful for the single vectored mode.
15727 pci_set_drvdata(pdev
, dev
);
15729 if (tg3_flag(tp
, 5717_PLUS
)) {
15730 /* Resume a low-power mode */
15731 tg3_frob_aux_power(tp
, false);
15734 err
= register_netdev(dev
);
15736 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15737 goto err_out_apeunmap
;
15740 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15741 tp
->board_part_number
,
15742 tp
->pci_chip_rev_id
,
15743 tg3_bus_string(tp
, str
),
15746 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15747 struct phy_device
*phydev
;
15748 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15750 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15751 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15755 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15756 ethtype
= "10/100Base-TX";
15757 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15758 ethtype
= "1000Base-SX";
15760 ethtype
= "10/100/1000Base-T";
15762 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15763 "(WireSpeed[%d], EEE[%d])\n",
15764 tg3_phy_string(tp
), ethtype
,
15765 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15766 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15769 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15770 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15771 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15772 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15773 tg3_flag(tp
, ENABLE_ASF
) != 0,
15774 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15775 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15777 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15778 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15780 pci_save_state(pdev
);
15786 iounmap(tp
->aperegs
);
15787 tp
->aperegs
= NULL
;
15799 err_out_power_down
:
15800 pci_set_power_state(pdev
, PCI_D3hot
);
15803 pci_release_regions(pdev
);
15805 err_out_disable_pdev
:
15806 pci_disable_device(pdev
);
15807 pci_set_drvdata(pdev
, NULL
);
15811 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15813 struct net_device
*dev
= pci_get_drvdata(pdev
);
15816 struct tg3
*tp
= netdev_priv(dev
);
15819 release_firmware(tp
->fw
);
15821 tg3_reset_task_cancel(tp
);
15823 if (tg3_flag(tp
, USE_PHYLIB
)) {
15828 unregister_netdev(dev
);
15830 iounmap(tp
->aperegs
);
15831 tp
->aperegs
= NULL
;
15838 pci_release_regions(pdev
);
15839 pci_disable_device(pdev
);
15840 pci_set_drvdata(pdev
, NULL
);
15844 #ifdef CONFIG_PM_SLEEP
15845 static int tg3_suspend(struct device
*device
)
15847 struct pci_dev
*pdev
= to_pci_dev(device
);
15848 struct net_device
*dev
= pci_get_drvdata(pdev
);
15849 struct tg3
*tp
= netdev_priv(dev
);
15852 if (!netif_running(dev
))
15855 tg3_reset_task_cancel(tp
);
15857 tg3_netif_stop(tp
);
15859 del_timer_sync(&tp
->timer
);
15861 tg3_full_lock(tp
, 1);
15862 tg3_disable_ints(tp
);
15863 tg3_full_unlock(tp
);
15865 netif_device_detach(dev
);
15867 tg3_full_lock(tp
, 0);
15868 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15869 tg3_flag_clear(tp
, INIT_COMPLETE
);
15870 tg3_full_unlock(tp
);
15872 err
= tg3_power_down_prepare(tp
);
15876 tg3_full_lock(tp
, 0);
15878 tg3_flag_set(tp
, INIT_COMPLETE
);
15879 err2
= tg3_restart_hw(tp
, 1);
15883 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15884 add_timer(&tp
->timer
);
15886 netif_device_attach(dev
);
15887 tg3_netif_start(tp
);
15890 tg3_full_unlock(tp
);
15899 static int tg3_resume(struct device
*device
)
15901 struct pci_dev
*pdev
= to_pci_dev(device
);
15902 struct net_device
*dev
= pci_get_drvdata(pdev
);
15903 struct tg3
*tp
= netdev_priv(dev
);
15906 if (!netif_running(dev
))
15909 netif_device_attach(dev
);
15911 tg3_full_lock(tp
, 0);
15913 tg3_flag_set(tp
, INIT_COMPLETE
);
15914 err
= tg3_restart_hw(tp
, 1);
15918 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15919 add_timer(&tp
->timer
);
15921 tg3_netif_start(tp
);
15924 tg3_full_unlock(tp
);
15932 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15933 #define TG3_PM_OPS (&tg3_pm_ops)
15937 #define TG3_PM_OPS NULL
15939 #endif /* CONFIG_PM_SLEEP */
15942 * tg3_io_error_detected - called when PCI error is detected
15943 * @pdev: Pointer to PCI device
15944 * @state: The current pci connection state
15946 * This function is called after a PCI bus error affecting
15947 * this device has been detected.
15949 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15950 pci_channel_state_t state
)
15952 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15953 struct tg3
*tp
= netdev_priv(netdev
);
15954 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15956 netdev_info(netdev
, "PCI I/O error detected\n");
15960 if (!netif_running(netdev
))
15965 tg3_netif_stop(tp
);
15967 del_timer_sync(&tp
->timer
);
15969 /* Want to make sure that the reset task doesn't run */
15970 tg3_reset_task_cancel(tp
);
15971 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15973 netif_device_detach(netdev
);
15975 /* Clean up software state, even if MMIO is blocked */
15976 tg3_full_lock(tp
, 0);
15977 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15978 tg3_full_unlock(tp
);
15981 if (state
== pci_channel_io_perm_failure
)
15982 err
= PCI_ERS_RESULT_DISCONNECT
;
15984 pci_disable_device(pdev
);
15992 * tg3_io_slot_reset - called after the pci bus has been reset.
15993 * @pdev: Pointer to PCI device
15995 * Restart the card from scratch, as if from a cold-boot.
15996 * At this point, the card has exprienced a hard reset,
15997 * followed by fixups by BIOS, and has its config space
15998 * set up identically to what it was at cold boot.
16000 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
16002 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16003 struct tg3
*tp
= netdev_priv(netdev
);
16004 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
16009 if (pci_enable_device(pdev
)) {
16010 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
16014 pci_set_master(pdev
);
16015 pci_restore_state(pdev
);
16016 pci_save_state(pdev
);
16018 if (!netif_running(netdev
)) {
16019 rc
= PCI_ERS_RESULT_RECOVERED
;
16023 err
= tg3_power_up(tp
);
16027 rc
= PCI_ERS_RESULT_RECOVERED
;
16036 * tg3_io_resume - called when traffic can start flowing again.
16037 * @pdev: Pointer to PCI device
16039 * This callback is called when the error recovery driver tells
16040 * us that its OK to resume normal operation.
16042 static void tg3_io_resume(struct pci_dev
*pdev
)
16044 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16045 struct tg3
*tp
= netdev_priv(netdev
);
16050 if (!netif_running(netdev
))
16053 tg3_full_lock(tp
, 0);
16054 tg3_flag_set(tp
, INIT_COMPLETE
);
16055 err
= tg3_restart_hw(tp
, 1);
16056 tg3_full_unlock(tp
);
16058 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
16062 netif_device_attach(netdev
);
16064 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
16065 add_timer(&tp
->timer
);
16067 tg3_netif_start(tp
);
16075 static struct pci_error_handlers tg3_err_handler
= {
16076 .error_detected
= tg3_io_error_detected
,
16077 .slot_reset
= tg3_io_slot_reset
,
16078 .resume
= tg3_io_resume
16081 static struct pci_driver tg3_driver
= {
16082 .name
= DRV_MODULE_NAME
,
16083 .id_table
= tg3_pci_tbl
,
16084 .probe
= tg3_init_one
,
16085 .remove
= __devexit_p(tg3_remove_one
),
16086 .err_handler
= &tg3_err_handler
,
16087 .driver
.pm
= TG3_PM_OPS
,
16090 static int __init
tg3_init(void)
16092 return pci_register_driver(&tg3_driver
);
16095 static void __exit
tg3_cleanup(void)
16097 pci_unregister_driver(&tg3_driver
);
16100 module_init(tg3_init
);
16101 module_exit(tg3_cleanup
);