2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 121
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "November 2, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) 0
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version
[] __devinitdata
=
213 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION
);
219 MODULE_FIRMWARE(FIRMWARE_TG3
);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
223 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug
, int, 0);
225 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
314 static const struct {
315 const char string
[ETH_GSTRING_LEN
];
316 } ethtool_stats_keys
[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string
[ETH_GSTRING_LEN
];
402 } ethtool_test_keys
[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
418 writel(val
, tp
->regs
+ off
);
421 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
423 return readl(tp
->regs
+ off
);
426 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
428 writel(val
, tp
->aperegs
+ off
);
431 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
433 return readl(tp
->aperegs
+ off
);
436 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
440 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
441 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
443 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
446 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
448 writel(val
, tp
->regs
+ off
);
449 readl(tp
->regs
+ off
);
452 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
457 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
458 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
459 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
460 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
464 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
468 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
469 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
470 TG3_64BIT_REG_LOW
, val
);
473 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
475 TG3_64BIT_REG_LOW
, val
);
479 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
480 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
482 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
490 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
494 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
499 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
501 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
502 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
513 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
514 /* Non-posted methods */
515 tp
->write32(tp
, off
, val
);
518 tg3_write32(tp
, off
, val
);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
532 tp
->write32_mbox(tp
, off
, val
);
533 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
534 tp
->read32_mbox(tp
, off
);
537 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
539 void __iomem
*mbox
= tp
->regs
+ off
;
541 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
543 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
547 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
549 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
552 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
554 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
572 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
573 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
576 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
577 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
578 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
579 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
585 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
590 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
593 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
597 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
598 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
603 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
604 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
605 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
606 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
612 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
617 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
620 static void tg3_ape_lock_init(struct tg3
*tp
)
625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
626 regbase
= TG3_APE_LOCK_GRANT
;
628 regbase
= TG3_APE_PER_LOCK_GRANT
;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
633 case TG3_APE_LOCK_PHY0
:
634 case TG3_APE_LOCK_PHY1
:
635 case TG3_APE_LOCK_PHY2
:
636 case TG3_APE_LOCK_PHY3
:
637 bit
= APE_LOCK_GRANT_DRIVER
;
641 bit
= APE_LOCK_GRANT_DRIVER
;
643 bit
= 1 << tp
->pci_fn
;
645 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
650 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
654 u32 status
, req
, gnt
, bit
;
656 if (!tg3_flag(tp
, ENABLE_APE
))
660 case TG3_APE_LOCK_GPIO
:
661 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
663 case TG3_APE_LOCK_GRC
:
664 case TG3_APE_LOCK_MEM
:
666 bit
= APE_LOCK_REQ_DRIVER
;
668 bit
= 1 << tp
->pci_fn
;
674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
675 req
= TG3_APE_LOCK_REQ
;
676 gnt
= TG3_APE_LOCK_GRANT
;
678 req
= TG3_APE_PER_LOCK_REQ
;
679 gnt
= TG3_APE_PER_LOCK_GRANT
;
684 tg3_ape_write32(tp
, req
+ off
, bit
);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i
= 0; i
< 100; i
++) {
688 status
= tg3_ape_read32(tp
, gnt
+ off
);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp
, gnt
+ off
, bit
);
703 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
707 if (!tg3_flag(tp
, ENABLE_APE
))
711 case TG3_APE_LOCK_GPIO
:
712 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
714 case TG3_APE_LOCK_GRC
:
715 case TG3_APE_LOCK_MEM
:
717 bit
= APE_LOCK_GRANT_DRIVER
;
719 bit
= 1 << tp
->pci_fn
;
725 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
726 gnt
= TG3_APE_LOCK_GRANT
;
728 gnt
= TG3_APE_PER_LOCK_GRANT
;
730 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
733 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp
, APE_HAS_NCSI
))
742 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
743 if (apedata
!= APE_SEG_SIG_MAGIC
)
746 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
747 if (!(apedata
& APE_FW_STATUS_READY
))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i
= 0; i
< 10; i
++) {
752 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
755 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
757 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
758 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
759 event
| APE_EVENT_STATUS_EVENT_PENDING
);
761 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
763 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
769 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
770 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
773 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case RESET_KIND_INIT
:
783 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
784 APE_HOST_SEG_SIG_MAGIC
);
785 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
786 APE_HOST_SEG_LEN_MAGIC
);
787 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
788 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
789 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
791 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
792 APE_HOST_BEHAV_NO_PHYLOCK
);
793 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
794 TG3_APE_HOST_DRVR_STATE_START
);
796 event
= APE_EVENT_STATUS_STATE_START
;
798 case RESET_KIND_SHUTDOWN
:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
806 if (device_may_wakeup(&tp
->pdev
->dev
) &&
807 tg3_flag(tp
, WOL_ENABLE
)) {
808 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
809 TG3_APE_HOST_WOL_SPEED_AUTO
);
810 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
812 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
814 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
816 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
818 case RESET_KIND_SUSPEND
:
819 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
825 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
827 tg3_ape_send_event(tp
, event
);
830 static void tg3_disable_ints(struct tg3
*tp
)
834 tw32(TG3PCI_MISC_HOST_CTRL
,
835 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
836 for (i
= 0; i
< tp
->irq_max
; i
++)
837 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
840 static void tg3_enable_ints(struct tg3
*tp
)
847 tw32(TG3PCI_MISC_HOST_CTRL
,
848 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
850 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
851 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
852 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
854 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
855 if (tg3_flag(tp
, 1SHOT_MSI
))
856 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
858 tp
->coal_now
|= tnapi
->coal_now
;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
863 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
864 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
866 tw32(HOSTCC_MODE
, tp
->coal_now
);
868 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
871 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
873 struct tg3
*tp
= tnapi
->tp
;
874 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
875 unsigned int work_exists
= 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
879 if (sblk
->status
& SD_STATUS_LINK_CHG
)
883 /* check for TX work to do */
884 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
887 /* check for RX work to do */
888 if (tnapi
->rx_rcb_prod_idx
&&
889 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
896 * similar to tg3_enable_ints, but it accurately determines whether there
897 * is new work pending and can return without flushing the PIO write
898 * which reenables interrupts
900 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
902 struct tg3
*tp
= tnapi
->tp
;
904 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
907 /* When doing tagged status, this work check is unnecessary.
908 * The last_tag we write above tells the chip which piece of
909 * work we've completed.
911 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
912 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
913 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
916 static void tg3_switch_clocks(struct tg3
*tp
)
921 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
924 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
926 orig_clock_ctrl
= clock_ctrl
;
927 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
928 CLOCK_CTRL_CLKRUN_OENABLE
|
930 tp
->pci_clock_ctrl
= clock_ctrl
;
932 if (tg3_flag(tp
, 5705_PLUS
)) {
933 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
935 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
937 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
938 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
940 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
943 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
946 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
949 #define PHY_BUSY_LOOPS 5000
951 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
957 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
959 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
965 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
966 MI_COM_PHY_ADDR_MASK
);
967 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
968 MI_COM_REG_ADDR_MASK
);
969 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
971 tw32_f(MAC_MI_COM
, frame_val
);
973 loops
= PHY_BUSY_LOOPS
;
976 frame_val
= tr32(MAC_MI_COM
);
978 if ((frame_val
& MI_COM_BUSY
) == 0) {
980 frame_val
= tr32(MAC_MI_COM
);
988 *val
= frame_val
& MI_COM_DATA_MASK
;
992 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
993 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1000 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1006 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1007 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1010 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1012 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1016 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1017 MI_COM_PHY_ADDR_MASK
);
1018 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1019 MI_COM_REG_ADDR_MASK
);
1020 frame_val
|= (val
& MI_COM_DATA_MASK
);
1021 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1023 tw32_f(MAC_MI_COM
, frame_val
);
1025 loops
= PHY_BUSY_LOOPS
;
1026 while (loops
!= 0) {
1028 frame_val
= tr32(MAC_MI_COM
);
1029 if ((frame_val
& MI_COM_BUSY
) == 0) {
1031 frame_val
= tr32(MAC_MI_COM
);
1041 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1042 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1049 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1053 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1057 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1061 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1062 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1066 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1072 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1076 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1080 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1084 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1085 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1089 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1095 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1099 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1101 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1106 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1110 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1112 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1117 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1121 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1122 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1123 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1125 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1130 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1132 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1133 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1135 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1138 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1143 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1149 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1151 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1153 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1154 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1159 static int tg3_bmcr_reset(struct tg3
*tp
)
1164 /* OK, reset it, and poll the BMCR_RESET bit until it
1165 * clears or we time out.
1167 phy_control
= BMCR_RESET
;
1168 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1174 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1178 if ((phy_control
& BMCR_RESET
) == 0) {
1190 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1192 struct tg3
*tp
= bp
->priv
;
1195 spin_lock_bh(&tp
->lock
);
1197 if (tg3_readphy(tp
, reg
, &val
))
1200 spin_unlock_bh(&tp
->lock
);
1205 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1207 struct tg3
*tp
= bp
->priv
;
1210 spin_lock_bh(&tp
->lock
);
1212 if (tg3_writephy(tp
, reg
, val
))
1215 spin_unlock_bh(&tp
->lock
);
1220 static int tg3_mdio_reset(struct mii_bus
*bp
)
1225 static void tg3_mdio_config_5785(struct tg3
*tp
)
1228 struct phy_device
*phydev
;
1230 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1231 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1232 case PHY_ID_BCM50610
:
1233 case PHY_ID_BCM50610M
:
1234 val
= MAC_PHYCFG2_50610_LED_MODES
;
1236 case PHY_ID_BCMAC131
:
1237 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1239 case PHY_ID_RTL8211C
:
1240 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1242 case PHY_ID_RTL8201E
:
1243 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1249 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1250 tw32(MAC_PHYCFG2
, val
);
1252 val
= tr32(MAC_PHYCFG1
);
1253 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1254 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1255 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1256 tw32(MAC_PHYCFG1
, val
);
1261 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1262 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1263 MAC_PHYCFG2_FMODE_MASK_MASK
|
1264 MAC_PHYCFG2_GMODE_MASK_MASK
|
1265 MAC_PHYCFG2_ACT_MASK_MASK
|
1266 MAC_PHYCFG2_QUAL_MASK_MASK
|
1267 MAC_PHYCFG2_INBAND_ENABLE
;
1269 tw32(MAC_PHYCFG2
, val
);
1271 val
= tr32(MAC_PHYCFG1
);
1272 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1273 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1274 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1275 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1276 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1277 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1278 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1280 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1281 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1282 tw32(MAC_PHYCFG1
, val
);
1284 val
= tr32(MAC_EXT_RGMII_MODE
);
1285 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1286 MAC_RGMII_MODE_RX_QUALITY
|
1287 MAC_RGMII_MODE_RX_ACTIVITY
|
1288 MAC_RGMII_MODE_RX_ENG_DET
|
1289 MAC_RGMII_MODE_TX_ENABLE
|
1290 MAC_RGMII_MODE_TX_LOWPWR
|
1291 MAC_RGMII_MODE_TX_RESET
);
1292 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1293 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1294 val
|= MAC_RGMII_MODE_RX_INT_B
|
1295 MAC_RGMII_MODE_RX_QUALITY
|
1296 MAC_RGMII_MODE_RX_ACTIVITY
|
1297 MAC_RGMII_MODE_RX_ENG_DET
;
1298 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1299 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1300 MAC_RGMII_MODE_TX_LOWPWR
|
1301 MAC_RGMII_MODE_TX_RESET
;
1303 tw32(MAC_EXT_RGMII_MODE
, val
);
1306 static void tg3_mdio_start(struct tg3
*tp
)
1308 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1309 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1312 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1313 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1314 tg3_mdio_config_5785(tp
);
1317 static int tg3_mdio_init(struct tg3
*tp
)
1321 struct phy_device
*phydev
;
1323 if (tg3_flag(tp
, 5717_PLUS
)) {
1326 tp
->phy_addr
= tp
->pci_fn
+ 1;
1328 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1329 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1331 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1332 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1336 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1340 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1343 tp
->mdio_bus
= mdiobus_alloc();
1344 if (tp
->mdio_bus
== NULL
)
1347 tp
->mdio_bus
->name
= "tg3 mdio bus";
1348 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1349 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1350 tp
->mdio_bus
->priv
= tp
;
1351 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1352 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1353 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1354 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1355 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1356 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1358 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1359 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1361 /* The bus registration will look for all the PHYs on the mdio bus.
1362 * Unfortunately, it does not ensure the PHY is powered up before
1363 * accessing the PHY ID registers. A chip reset is the
1364 * quickest way to bring the device back to an operational state..
1366 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1369 i
= mdiobus_register(tp
->mdio_bus
);
1371 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1372 mdiobus_free(tp
->mdio_bus
);
1376 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1378 if (!phydev
|| !phydev
->drv
) {
1379 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1380 mdiobus_unregister(tp
->mdio_bus
);
1381 mdiobus_free(tp
->mdio_bus
);
1385 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1386 case PHY_ID_BCM57780
:
1387 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1388 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1390 case PHY_ID_BCM50610
:
1391 case PHY_ID_BCM50610M
:
1392 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1393 PHY_BRCM_RX_REFCLK_UNUSED
|
1394 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1395 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1396 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1397 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1398 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1399 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1400 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1401 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1403 case PHY_ID_RTL8211C
:
1404 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1406 case PHY_ID_RTL8201E
:
1407 case PHY_ID_BCMAC131
:
1408 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1409 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1410 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1414 tg3_flag_set(tp
, MDIOBUS_INITED
);
1416 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1417 tg3_mdio_config_5785(tp
);
1422 static void tg3_mdio_fini(struct tg3
*tp
)
1424 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1425 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1426 mdiobus_unregister(tp
->mdio_bus
);
1427 mdiobus_free(tp
->mdio_bus
);
1431 /* tp->lock is held. */
1432 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1436 val
= tr32(GRC_RX_CPU_EVENT
);
1437 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1438 tw32_f(GRC_RX_CPU_EVENT
, val
);
1440 tp
->last_event_jiffies
= jiffies
;
1443 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1445 /* tp->lock is held. */
1446 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1449 unsigned int delay_cnt
;
1452 /* If enough time has passed, no wait is necessary. */
1453 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1454 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1456 if (time_remain
< 0)
1459 /* Check if we can shorten the wait time. */
1460 delay_cnt
= jiffies_to_usecs(time_remain
);
1461 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1462 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1463 delay_cnt
= (delay_cnt
>> 3) + 1;
1465 for (i
= 0; i
< delay_cnt
; i
++) {
1466 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1472 /* tp->lock is held. */
1473 static void tg3_ump_link_report(struct tg3
*tp
)
1478 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1481 tg3_wait_for_event_ack(tp
);
1483 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1485 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1488 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1490 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1491 val
|= (reg
& 0xffff);
1492 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1495 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1497 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1498 val
|= (reg
& 0xffff);
1499 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1502 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1503 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1505 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1506 val
|= (reg
& 0xffff);
1508 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1510 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1514 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1516 tg3_generate_fw_event(tp
);
1519 /* tp->lock is held. */
1520 static void tg3_stop_fw(struct tg3
*tp
)
1522 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1523 /* Wait for RX cpu to ACK the previous event. */
1524 tg3_wait_for_event_ack(tp
);
1526 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1528 tg3_generate_fw_event(tp
);
1530 /* Wait for RX cpu to ACK this event. */
1531 tg3_wait_for_event_ack(tp
);
1535 /* tp->lock is held. */
1536 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1538 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1539 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1541 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1543 case RESET_KIND_INIT
:
1544 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1548 case RESET_KIND_SHUTDOWN
:
1549 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1553 case RESET_KIND_SUSPEND
:
1554 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1563 if (kind
== RESET_KIND_INIT
||
1564 kind
== RESET_KIND_SUSPEND
)
1565 tg3_ape_driver_state_change(tp
, kind
);
1568 /* tp->lock is held. */
1569 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1571 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1573 case RESET_KIND_INIT
:
1574 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1575 DRV_STATE_START_DONE
);
1578 case RESET_KIND_SHUTDOWN
:
1579 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1580 DRV_STATE_UNLOAD_DONE
);
1588 if (kind
== RESET_KIND_SHUTDOWN
)
1589 tg3_ape_driver_state_change(tp
, kind
);
1592 /* tp->lock is held. */
1593 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1595 if (tg3_flag(tp
, ENABLE_ASF
)) {
1597 case RESET_KIND_INIT
:
1598 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1602 case RESET_KIND_SHUTDOWN
:
1603 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1607 case RESET_KIND_SUSPEND
:
1608 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1618 static int tg3_poll_fw(struct tg3
*tp
)
1623 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1624 /* Wait up to 20ms for init done. */
1625 for (i
= 0; i
< 200; i
++) {
1626 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1633 /* Wait for firmware initialization to complete. */
1634 for (i
= 0; i
< 100000; i
++) {
1635 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1636 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1641 /* Chip might not be fitted with firmware. Some Sun onboard
1642 * parts are configured like that. So don't signal the timeout
1643 * of the above loop as an error, but do report the lack of
1644 * running firmware once.
1646 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1647 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1649 netdev_info(tp
->dev
, "No firmware running\n");
1652 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1653 /* The 57765 A0 needs a little more
1654 * time to do some important work.
1662 static void tg3_link_report(struct tg3
*tp
)
1664 if (!netif_carrier_ok(tp
->dev
)) {
1665 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1666 tg3_ump_link_report(tp
);
1667 } else if (netif_msg_link(tp
)) {
1668 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1669 (tp
->link_config
.active_speed
== SPEED_1000
?
1671 (tp
->link_config
.active_speed
== SPEED_100
?
1673 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1676 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1677 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1679 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1682 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1683 netdev_info(tp
->dev
, "EEE is %s\n",
1684 tp
->setlpicnt
? "enabled" : "disabled");
1686 tg3_ump_link_report(tp
);
1690 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1694 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1695 miireg
= ADVERTISE_PAUSE_CAP
;
1696 else if (flow_ctrl
& FLOW_CTRL_TX
)
1697 miireg
= ADVERTISE_PAUSE_ASYM
;
1698 else if (flow_ctrl
& FLOW_CTRL_RX
)
1699 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1706 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1710 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1711 miireg
= ADVERTISE_1000XPAUSE
;
1712 else if (flow_ctrl
& FLOW_CTRL_TX
)
1713 miireg
= ADVERTISE_1000XPSE_ASYM
;
1714 else if (flow_ctrl
& FLOW_CTRL_RX
)
1715 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1722 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1726 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1727 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1728 if (rmtadv
& LPA_1000XPAUSE
)
1729 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1730 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1733 if (rmtadv
& LPA_1000XPAUSE
)
1734 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1736 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1737 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1744 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1748 u32 old_rx_mode
= tp
->rx_mode
;
1749 u32 old_tx_mode
= tp
->tx_mode
;
1751 if (tg3_flag(tp
, USE_PHYLIB
))
1752 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1754 autoneg
= tp
->link_config
.autoneg
;
1756 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1757 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1758 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1760 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1762 flowctrl
= tp
->link_config
.flowctrl
;
1764 tp
->link_config
.active_flowctrl
= flowctrl
;
1766 if (flowctrl
& FLOW_CTRL_RX
)
1767 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1769 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1771 if (old_rx_mode
!= tp
->rx_mode
)
1772 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1774 if (flowctrl
& FLOW_CTRL_TX
)
1775 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1777 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1779 if (old_tx_mode
!= tp
->tx_mode
)
1780 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1783 static void tg3_adjust_link(struct net_device
*dev
)
1785 u8 oldflowctrl
, linkmesg
= 0;
1786 u32 mac_mode
, lcl_adv
, rmt_adv
;
1787 struct tg3
*tp
= netdev_priv(dev
);
1788 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1790 spin_lock_bh(&tp
->lock
);
1792 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1793 MAC_MODE_HALF_DUPLEX
);
1795 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1801 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1802 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1803 else if (phydev
->speed
== SPEED_1000
||
1804 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1805 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1807 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1809 if (phydev
->duplex
== DUPLEX_HALF
)
1810 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1812 lcl_adv
= tg3_advert_flowctrl_1000T(
1813 tp
->link_config
.flowctrl
);
1816 rmt_adv
= LPA_PAUSE_CAP
;
1817 if (phydev
->asym_pause
)
1818 rmt_adv
|= LPA_PAUSE_ASYM
;
1821 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1823 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1825 if (mac_mode
!= tp
->mac_mode
) {
1826 tp
->mac_mode
= mac_mode
;
1827 tw32_f(MAC_MODE
, tp
->mac_mode
);
1831 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1832 if (phydev
->speed
== SPEED_10
)
1834 MAC_MI_STAT_10MBPS_MODE
|
1835 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1837 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1840 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1841 tw32(MAC_TX_LENGTHS
,
1842 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1843 (6 << TX_LENGTHS_IPG_SHIFT
) |
1844 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1846 tw32(MAC_TX_LENGTHS
,
1847 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1848 (6 << TX_LENGTHS_IPG_SHIFT
) |
1849 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1851 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1852 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1853 phydev
->speed
!= tp
->link_config
.active_speed
||
1854 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1855 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1858 tp
->link_config
.active_speed
= phydev
->speed
;
1859 tp
->link_config
.active_duplex
= phydev
->duplex
;
1861 spin_unlock_bh(&tp
->lock
);
1864 tg3_link_report(tp
);
1867 static int tg3_phy_init(struct tg3
*tp
)
1869 struct phy_device
*phydev
;
1871 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1874 /* Bring the PHY back to a known state. */
1877 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1879 /* Attach the MAC to the PHY. */
1880 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1881 phydev
->dev_flags
, phydev
->interface
);
1882 if (IS_ERR(phydev
)) {
1883 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1884 return PTR_ERR(phydev
);
1887 /* Mask with MAC supported features. */
1888 switch (phydev
->interface
) {
1889 case PHY_INTERFACE_MODE_GMII
:
1890 case PHY_INTERFACE_MODE_RGMII
:
1891 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1892 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1894 SUPPORTED_Asym_Pause
);
1898 case PHY_INTERFACE_MODE_MII
:
1899 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1901 SUPPORTED_Asym_Pause
);
1904 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1908 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1910 phydev
->advertising
= phydev
->supported
;
1915 static void tg3_phy_start(struct tg3
*tp
)
1917 struct phy_device
*phydev
;
1919 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1922 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1924 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1925 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1926 phydev
->speed
= tp
->link_config
.orig_speed
;
1927 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1928 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1929 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1934 phy_start_aneg(phydev
);
1937 static void tg3_phy_stop(struct tg3
*tp
)
1939 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1942 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1945 static void tg3_phy_fini(struct tg3
*tp
)
1947 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1948 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1949 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1953 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
1958 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
1961 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
1962 /* Cannot do read-modify-write on 5401 */
1963 err
= tg3_phy_auxctl_write(tp
,
1964 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1965 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
1970 err
= tg3_phy_auxctl_read(tp
,
1971 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1975 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
1976 err
= tg3_phy_auxctl_write(tp
,
1977 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
1983 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1987 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1990 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1991 phytest
| MII_TG3_FET_SHADOW_EN
);
1992 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1994 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1996 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1997 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1999 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2003 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2007 if (!tg3_flag(tp
, 5705_PLUS
) ||
2008 (tg3_flag(tp
, 5717_PLUS
) &&
2009 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2012 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2013 tg3_phy_fet_toggle_apd(tp
, enable
);
2017 reg
= MII_TG3_MISC_SHDW_WREN
|
2018 MII_TG3_MISC_SHDW_SCR5_SEL
|
2019 MII_TG3_MISC_SHDW_SCR5_LPED
|
2020 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2021 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2022 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2023 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
2024 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2026 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2029 reg
= MII_TG3_MISC_SHDW_WREN
|
2030 MII_TG3_MISC_SHDW_APD_SEL
|
2031 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2033 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2035 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2038 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2042 if (!tg3_flag(tp
, 5705_PLUS
) ||
2043 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2046 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2049 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2050 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2052 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2053 ephy
| MII_TG3_FET_SHADOW_EN
);
2054 if (!tg3_readphy(tp
, reg
, &phy
)) {
2056 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2058 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2059 tg3_writephy(tp
, reg
, phy
);
2061 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2066 ret
= tg3_phy_auxctl_read(tp
,
2067 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2070 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2072 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2073 tg3_phy_auxctl_write(tp
,
2074 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2079 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2084 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2087 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2089 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2090 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2093 static void tg3_phy_apply_otp(struct tg3
*tp
)
2102 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2105 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2106 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2107 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2109 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2110 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2111 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2113 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2114 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2115 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2117 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2118 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2120 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2121 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2123 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2124 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2125 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2127 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2130 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2134 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2139 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2140 current_link_up
== 1 &&
2141 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2142 (tp
->link_config
.active_speed
== SPEED_100
||
2143 tp
->link_config
.active_speed
== SPEED_1000
)) {
2146 if (tp
->link_config
.active_speed
== SPEED_1000
)
2147 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2149 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2151 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2153 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2154 TG3_CL45_D7_EEERES_STAT
, &val
);
2156 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2157 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2161 if (!tp
->setlpicnt
) {
2162 if (current_link_up
== 1 &&
2163 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2164 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2165 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2168 val
= tr32(TG3_CPMU_EEE_MODE
);
2169 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2173 static void tg3_phy_eee_enable(struct tg3
*tp
)
2177 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2178 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2179 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2180 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
2181 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2182 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2183 MII_TG3_DSP_TAP26_RMRXSTO
;
2184 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2185 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2188 val
= tr32(TG3_CPMU_EEE_MODE
);
2189 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2192 static int tg3_wait_macro_done(struct tg3
*tp
)
2199 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2200 if ((tmp32
& 0x1000) == 0)
2210 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2212 static const u32 test_pat
[4][6] = {
2213 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2214 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2215 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2216 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2220 for (chan
= 0; chan
< 4; chan
++) {
2223 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2224 (chan
* 0x2000) | 0x0200);
2225 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2227 for (i
= 0; i
< 6; i
++)
2228 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2231 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2232 if (tg3_wait_macro_done(tp
)) {
2237 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2238 (chan
* 0x2000) | 0x0200);
2239 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2240 if (tg3_wait_macro_done(tp
)) {
2245 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2246 if (tg3_wait_macro_done(tp
)) {
2251 for (i
= 0; i
< 6; i
+= 2) {
2254 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2255 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2256 tg3_wait_macro_done(tp
)) {
2262 if (low
!= test_pat
[chan
][i
] ||
2263 high
!= test_pat
[chan
][i
+1]) {
2264 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2265 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2266 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2276 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2280 for (chan
= 0; chan
< 4; chan
++) {
2283 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2284 (chan
* 0x2000) | 0x0200);
2285 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2286 for (i
= 0; i
< 6; i
++)
2287 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2288 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2289 if (tg3_wait_macro_done(tp
))
2296 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2298 u32 reg32
, phy9_orig
;
2299 int retries
, do_phy_reset
, err
;
2305 err
= tg3_bmcr_reset(tp
);
2311 /* Disable transmitter and interrupt. */
2312 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2316 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2318 /* Set full-duplex, 1000 mbps. */
2319 tg3_writephy(tp
, MII_BMCR
,
2320 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2322 /* Set to master mode. */
2323 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2326 tg3_writephy(tp
, MII_CTRL1000
,
2327 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2329 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2333 /* Block the PHY control access. */
2334 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2336 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2339 } while (--retries
);
2341 err
= tg3_phy_reset_chanpat(tp
);
2345 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2347 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2348 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2350 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2352 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2354 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2356 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2363 /* This will reset the tigon3 PHY if there is no valid
2364 * link unless the FORCE argument is non-zero.
2366 static int tg3_phy_reset(struct tg3
*tp
)
2371 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2372 val
= tr32(GRC_MISC_CFG
);
2373 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2376 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2377 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2381 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2382 netif_carrier_off(tp
->dev
);
2383 tg3_link_report(tp
);
2386 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2387 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2388 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2389 err
= tg3_phy_reset_5703_4_5(tp
);
2396 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2397 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2398 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2399 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2401 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2404 err
= tg3_bmcr_reset(tp
);
2408 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2409 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2410 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2412 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2415 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2416 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2417 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2418 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2419 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2420 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2422 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2426 if (tg3_flag(tp
, 5717_PLUS
) &&
2427 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2430 tg3_phy_apply_otp(tp
);
2432 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2433 tg3_phy_toggle_apd(tp
, true);
2435 tg3_phy_toggle_apd(tp
, false);
2438 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2439 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2440 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2441 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2442 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2445 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2446 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2447 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2450 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2451 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2452 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2453 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2454 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2455 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2457 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2458 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2459 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2460 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2461 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2462 tg3_writephy(tp
, MII_TG3_TEST1
,
2463 MII_TG3_TEST1_TRIM_EN
| 0x4);
2465 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2467 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2471 /* Set Extended packet length bit (bit 14) on all chips that */
2472 /* support jumbo frames */
2473 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2474 /* Cannot do read-modify-write on 5401 */
2475 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2476 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2477 /* Set bit 14 with read-modify-write to preserve other bits */
2478 err
= tg3_phy_auxctl_read(tp
,
2479 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2481 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2482 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2485 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2486 * jumbo frames transmission.
2488 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2489 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2490 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2491 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2494 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2495 /* adjust output voltage */
2496 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2499 tg3_phy_toggle_automdix(tp
, 1);
2500 tg3_phy_set_wirespeed(tp
);
2504 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2505 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2506 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2507 TG3_GPIO_MSG_NEED_VAUX)
2508 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2509 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2510 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2511 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2512 (TG3_GPIO_MSG_DRVR_PRES << 12))
2514 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2515 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2516 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2517 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2518 (TG3_GPIO_MSG_NEED_VAUX << 12))
2520 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2525 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2526 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2528 status
= tr32(TG3_CPMU_DRV_STATUS
);
2530 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2531 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2532 status
|= (newstat
<< shift
);
2534 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2535 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2536 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2538 tw32(TG3_CPMU_DRV_STATUS
, status
);
2540 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2543 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2545 if (!tg3_flag(tp
, IS_NIC
))
2548 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2549 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2550 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2551 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2554 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2556 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2557 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2559 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2561 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2568 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2572 if (!tg3_flag(tp
, IS_NIC
) ||
2573 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2574 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2577 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2579 tw32_wait_f(GRC_LOCAL_CTRL
,
2580 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2581 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2583 tw32_wait_f(GRC_LOCAL_CTRL
,
2585 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2587 tw32_wait_f(GRC_LOCAL_CTRL
,
2588 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2589 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2592 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2594 if (!tg3_flag(tp
, IS_NIC
))
2597 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2598 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2599 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2600 (GRC_LCLCTRL_GPIO_OE0
|
2601 GRC_LCLCTRL_GPIO_OE1
|
2602 GRC_LCLCTRL_GPIO_OE2
|
2603 GRC_LCLCTRL_GPIO_OUTPUT0
|
2604 GRC_LCLCTRL_GPIO_OUTPUT1
),
2605 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2606 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2607 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2608 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2609 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2610 GRC_LCLCTRL_GPIO_OE1
|
2611 GRC_LCLCTRL_GPIO_OE2
|
2612 GRC_LCLCTRL_GPIO_OUTPUT0
|
2613 GRC_LCLCTRL_GPIO_OUTPUT1
|
2615 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2616 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2618 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2619 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2620 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2622 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2623 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2624 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2627 u32 grc_local_ctrl
= 0;
2629 /* Workaround to prevent overdrawing Amps. */
2630 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2631 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2632 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2634 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2637 /* On 5753 and variants, GPIO2 cannot be used. */
2638 no_gpio2
= tp
->nic_sram_data_cfg
&
2639 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2641 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2642 GRC_LCLCTRL_GPIO_OE1
|
2643 GRC_LCLCTRL_GPIO_OE2
|
2644 GRC_LCLCTRL_GPIO_OUTPUT1
|
2645 GRC_LCLCTRL_GPIO_OUTPUT2
;
2647 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2648 GRC_LCLCTRL_GPIO_OUTPUT2
);
2650 tw32_wait_f(GRC_LOCAL_CTRL
,
2651 tp
->grc_local_ctrl
| grc_local_ctrl
,
2652 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2654 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2656 tw32_wait_f(GRC_LOCAL_CTRL
,
2657 tp
->grc_local_ctrl
| grc_local_ctrl
,
2658 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2661 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2662 tw32_wait_f(GRC_LOCAL_CTRL
,
2663 tp
->grc_local_ctrl
| grc_local_ctrl
,
2664 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2669 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2673 /* Serialize power state transitions */
2674 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2677 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2678 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2680 msg
= tg3_set_function_status(tp
, msg
);
2682 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2685 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2686 tg3_pwrsrc_switch_to_vaux(tp
);
2688 tg3_pwrsrc_die_with_vmain(tp
);
2691 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2694 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2696 bool need_vaux
= false;
2698 /* The GPIOs do something completely different on 57765. */
2699 if (!tg3_flag(tp
, IS_NIC
) ||
2700 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2703 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2704 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2705 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2706 tg3_frob_aux_power_5717(tp
, include_wol
?
2707 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2711 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2712 struct net_device
*dev_peer
;
2714 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2716 /* remove_one() may have been run on the peer. */
2718 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2720 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2723 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2724 tg3_flag(tp_peer
, ENABLE_ASF
))
2729 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2730 tg3_flag(tp
, ENABLE_ASF
))
2734 tg3_pwrsrc_switch_to_vaux(tp
);
2736 tg3_pwrsrc_die_with_vmain(tp
);
2739 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2741 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2743 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2744 if (speed
!= SPEED_10
)
2746 } else if (speed
== SPEED_10
)
2752 static int tg3_setup_phy(struct tg3
*, int);
2753 static int tg3_halt_cpu(struct tg3
*, u32
);
2755 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2759 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2760 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2761 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2762 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2765 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2766 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2767 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2772 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2774 val
= tr32(GRC_MISC_CFG
);
2775 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2778 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2780 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2783 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2784 tg3_writephy(tp
, MII_BMCR
,
2785 BMCR_ANENABLE
| BMCR_ANRESTART
);
2787 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2788 phytest
| MII_TG3_FET_SHADOW_EN
);
2789 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2790 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2792 MII_TG3_FET_SHDW_AUXMODE4
,
2795 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2798 } else if (do_low_power
) {
2799 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2800 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2802 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2803 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2804 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2805 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2808 /* The PHY should not be powered down on some chips because
2811 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2812 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2813 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2814 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2815 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
&&
2819 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2820 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2821 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2822 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2823 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2824 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2827 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2830 /* tp->lock is held. */
2831 static int tg3_nvram_lock(struct tg3
*tp
)
2833 if (tg3_flag(tp
, NVRAM
)) {
2836 if (tp
->nvram_lock_cnt
== 0) {
2837 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2838 for (i
= 0; i
< 8000; i
++) {
2839 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2844 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2848 tp
->nvram_lock_cnt
++;
2853 /* tp->lock is held. */
2854 static void tg3_nvram_unlock(struct tg3
*tp
)
2856 if (tg3_flag(tp
, NVRAM
)) {
2857 if (tp
->nvram_lock_cnt
> 0)
2858 tp
->nvram_lock_cnt
--;
2859 if (tp
->nvram_lock_cnt
== 0)
2860 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2864 /* tp->lock is held. */
2865 static void tg3_enable_nvram_access(struct tg3
*tp
)
2867 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2868 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2870 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2874 /* tp->lock is held. */
2875 static void tg3_disable_nvram_access(struct tg3
*tp
)
2877 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2878 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2880 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2884 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2885 u32 offset
, u32
*val
)
2890 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2893 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2894 EEPROM_ADDR_DEVID_MASK
|
2896 tw32(GRC_EEPROM_ADDR
,
2898 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2899 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2900 EEPROM_ADDR_ADDR_MASK
) |
2901 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2903 for (i
= 0; i
< 1000; i
++) {
2904 tmp
= tr32(GRC_EEPROM_ADDR
);
2906 if (tmp
& EEPROM_ADDR_COMPLETE
)
2910 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2913 tmp
= tr32(GRC_EEPROM_DATA
);
2916 * The data will always be opposite the native endian
2917 * format. Perform a blind byteswap to compensate.
2924 #define NVRAM_CMD_TIMEOUT 10000
2926 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2930 tw32(NVRAM_CMD
, nvram_cmd
);
2931 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2933 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2939 if (i
== NVRAM_CMD_TIMEOUT
)
2945 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2947 if (tg3_flag(tp
, NVRAM
) &&
2948 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2949 tg3_flag(tp
, FLASH
) &&
2950 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2951 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2953 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2954 ATMEL_AT45DB0X1B_PAGE_POS
) +
2955 (addr
% tp
->nvram_pagesize
);
2960 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2962 if (tg3_flag(tp
, NVRAM
) &&
2963 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2964 tg3_flag(tp
, FLASH
) &&
2965 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2966 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2968 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2969 tp
->nvram_pagesize
) +
2970 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2975 /* NOTE: Data read in from NVRAM is byteswapped according to
2976 * the byteswapping settings for all other register accesses.
2977 * tg3 devices are BE devices, so on a BE machine, the data
2978 * returned will be exactly as it is seen in NVRAM. On a LE
2979 * machine, the 32-bit value will be byteswapped.
2981 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2985 if (!tg3_flag(tp
, NVRAM
))
2986 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2988 offset
= tg3_nvram_phys_addr(tp
, offset
);
2990 if (offset
> NVRAM_ADDR_MSK
)
2993 ret
= tg3_nvram_lock(tp
);
2997 tg3_enable_nvram_access(tp
);
2999 tw32(NVRAM_ADDR
, offset
);
3000 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3001 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3004 *val
= tr32(NVRAM_RDDATA
);
3006 tg3_disable_nvram_access(tp
);
3008 tg3_nvram_unlock(tp
);
3013 /* Ensures NVRAM data is in bytestream format. */
3014 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3017 int res
= tg3_nvram_read(tp
, offset
, &v
);
3019 *val
= cpu_to_be32(v
);
3023 #define RX_CPU_SCRATCH_BASE 0x30000
3024 #define RX_CPU_SCRATCH_SIZE 0x04000
3025 #define TX_CPU_SCRATCH_BASE 0x34000
3026 #define TX_CPU_SCRATCH_SIZE 0x04000
3028 /* tp->lock is held. */
3029 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3033 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3035 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3036 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3038 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3041 if (offset
== RX_CPU_BASE
) {
3042 for (i
= 0; i
< 10000; i
++) {
3043 tw32(offset
+ CPU_STATE
, 0xffffffff);
3044 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3045 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3049 tw32(offset
+ CPU_STATE
, 0xffffffff);
3050 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3053 for (i
= 0; i
< 10000; i
++) {
3054 tw32(offset
+ CPU_STATE
, 0xffffffff);
3055 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3056 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3062 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3063 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3067 /* Clear firmware's nvram arbitration. */
3068 if (tg3_flag(tp
, NVRAM
))
3069 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3074 unsigned int fw_base
;
3075 unsigned int fw_len
;
3076 const __be32
*fw_data
;
3079 /* tp->lock is held. */
3080 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3081 u32 cpu_scratch_base
, int cpu_scratch_size
,
3082 struct fw_info
*info
)
3084 int err
, lock_err
, i
;
3085 void (*write_op
)(struct tg3
*, u32
, u32
);
3087 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3089 "%s: Trying to load TX cpu firmware which is 5705\n",
3094 if (tg3_flag(tp
, 5705_PLUS
))
3095 write_op
= tg3_write_mem
;
3097 write_op
= tg3_write_indirect_reg32
;
3099 /* It is possible that bootcode is still loading at this point.
3100 * Get the nvram lock first before halting the cpu.
3102 lock_err
= tg3_nvram_lock(tp
);
3103 err
= tg3_halt_cpu(tp
, cpu_base
);
3105 tg3_nvram_unlock(tp
);
3109 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3110 write_op(tp
, cpu_scratch_base
+ i
, 0);
3111 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3112 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3113 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3114 write_op(tp
, (cpu_scratch_base
+
3115 (info
->fw_base
& 0xffff) +
3117 be32_to_cpu(info
->fw_data
[i
]));
3125 /* tp->lock is held. */
3126 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3128 struct fw_info info
;
3129 const __be32
*fw_data
;
3132 fw_data
= (void *)tp
->fw
->data
;
3134 /* Firmware blob starts with version numbers, followed by
3135 start address and length. We are setting complete length.
3136 length = end_address_of_bss - start_address_of_text.
3137 Remainder is the blob to be loaded contiguously
3138 from start address. */
3140 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3141 info
.fw_len
= tp
->fw
->size
- 12;
3142 info
.fw_data
= &fw_data
[3];
3144 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3145 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3150 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3151 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3156 /* Now startup only the RX cpu. */
3157 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3158 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3160 for (i
= 0; i
< 5; i
++) {
3161 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3163 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3164 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3165 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3169 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3170 "should be %08x\n", __func__
,
3171 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3174 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3175 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3180 /* tp->lock is held. */
3181 static int tg3_load_tso_firmware(struct tg3
*tp
)
3183 struct fw_info info
;
3184 const __be32
*fw_data
;
3185 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3188 if (tg3_flag(tp
, HW_TSO_1
) ||
3189 tg3_flag(tp
, HW_TSO_2
) ||
3190 tg3_flag(tp
, HW_TSO_3
))
3193 fw_data
= (void *)tp
->fw
->data
;
3195 /* Firmware blob starts with version numbers, followed by
3196 start address and length. We are setting complete length.
3197 length = end_address_of_bss - start_address_of_text.
3198 Remainder is the blob to be loaded contiguously
3199 from start address. */
3201 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3202 cpu_scratch_size
= tp
->fw_len
;
3203 info
.fw_len
= tp
->fw
->size
- 12;
3204 info
.fw_data
= &fw_data
[3];
3206 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3207 cpu_base
= RX_CPU_BASE
;
3208 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3210 cpu_base
= TX_CPU_BASE
;
3211 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3212 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3215 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3216 cpu_scratch_base
, cpu_scratch_size
,
3221 /* Now startup the cpu. */
3222 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3223 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3225 for (i
= 0; i
< 5; i
++) {
3226 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3228 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3229 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3230 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3235 "%s fails to set CPU PC, is %08x should be %08x\n",
3236 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3239 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3240 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3245 /* tp->lock is held. */
3246 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3248 u32 addr_high
, addr_low
;
3251 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3252 tp
->dev
->dev_addr
[1]);
3253 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3254 (tp
->dev
->dev_addr
[3] << 16) |
3255 (tp
->dev
->dev_addr
[4] << 8) |
3256 (tp
->dev
->dev_addr
[5] << 0));
3257 for (i
= 0; i
< 4; i
++) {
3258 if (i
== 1 && skip_mac_1
)
3260 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3261 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3264 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3265 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3266 for (i
= 0; i
< 12; i
++) {
3267 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3268 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3272 addr_high
= (tp
->dev
->dev_addr
[0] +
3273 tp
->dev
->dev_addr
[1] +
3274 tp
->dev
->dev_addr
[2] +
3275 tp
->dev
->dev_addr
[3] +
3276 tp
->dev
->dev_addr
[4] +
3277 tp
->dev
->dev_addr
[5]) &
3278 TX_BACKOFF_SEED_MASK
;
3279 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3282 static void tg3_enable_register_access(struct tg3
*tp
)
3285 * Make sure register accesses (indirect or otherwise) will function
3288 pci_write_config_dword(tp
->pdev
,
3289 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3292 static int tg3_power_up(struct tg3
*tp
)
3296 tg3_enable_register_access(tp
);
3298 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3300 /* Switch out of Vaux if it is a NIC */
3301 tg3_pwrsrc_switch_to_vmain(tp
);
3303 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3309 static int tg3_power_down_prepare(struct tg3
*tp
)
3312 bool device_should_wake
, do_low_power
;
3314 tg3_enable_register_access(tp
);
3316 /* Restore the CLKREQ setting. */
3317 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3320 pci_read_config_word(tp
->pdev
,
3321 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3323 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
3324 pci_write_config_word(tp
->pdev
,
3325 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3329 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3330 tw32(TG3PCI_MISC_HOST_CTRL
,
3331 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3333 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3334 tg3_flag(tp
, WOL_ENABLE
);
3336 if (tg3_flag(tp
, USE_PHYLIB
)) {
3337 do_low_power
= false;
3338 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3339 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3340 struct phy_device
*phydev
;
3341 u32 phyid
, advertising
;
3343 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3345 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3347 tp
->link_config
.orig_speed
= phydev
->speed
;
3348 tp
->link_config
.orig_duplex
= phydev
->duplex
;
3349 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
3350 tp
->link_config
.orig_advertising
= phydev
->advertising
;
3352 advertising
= ADVERTISED_TP
|
3354 ADVERTISED_Autoneg
|
3355 ADVERTISED_10baseT_Half
;
3357 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3358 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3360 ADVERTISED_100baseT_Half
|
3361 ADVERTISED_100baseT_Full
|
3362 ADVERTISED_10baseT_Full
;
3364 advertising
|= ADVERTISED_10baseT_Full
;
3367 phydev
->advertising
= advertising
;
3369 phy_start_aneg(phydev
);
3371 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3372 if (phyid
!= PHY_ID_BCMAC131
) {
3373 phyid
&= PHY_BCM_OUI_MASK
;
3374 if (phyid
== PHY_BCM_OUI_1
||
3375 phyid
== PHY_BCM_OUI_2
||
3376 phyid
== PHY_BCM_OUI_3
)
3377 do_low_power
= true;
3381 do_low_power
= true;
3383 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3384 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3385 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
3386 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
3387 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
3390 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
3391 tp
->link_config
.speed
= SPEED_10
;
3392 tp
->link_config
.duplex
= DUPLEX_HALF
;
3393 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
3394 tg3_setup_phy(tp
, 0);
3398 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3401 val
= tr32(GRC_VCPU_EXT_CTRL
);
3402 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3403 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3407 for (i
= 0; i
< 200; i
++) {
3408 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3409 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3414 if (tg3_flag(tp
, WOL_CAP
))
3415 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3416 WOL_DRV_STATE_SHUTDOWN
|
3420 if (device_should_wake
) {
3423 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3425 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3426 tg3_phy_auxctl_write(tp
,
3427 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3428 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3429 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3430 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3434 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3435 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3437 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3439 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3440 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3442 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3443 SPEED_100
: SPEED_10
;
3444 if (tg3_5700_link_polarity(tp
, speed
))
3445 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3447 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3450 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3453 if (!tg3_flag(tp
, 5750_PLUS
))
3454 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3456 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3457 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3458 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3459 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3461 if (tg3_flag(tp
, ENABLE_APE
))
3462 mac_mode
|= MAC_MODE_APE_TX_EN
|
3463 MAC_MODE_APE_RX_EN
|
3464 MAC_MODE_TDE_ENABLE
;
3466 tw32_f(MAC_MODE
, mac_mode
);
3469 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3473 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3474 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3475 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3478 base_val
= tp
->pci_clock_ctrl
;
3479 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3480 CLOCK_CTRL_TXCLK_DISABLE
);
3482 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3483 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3484 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3485 tg3_flag(tp
, CPMU_PRESENT
) ||
3486 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3488 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3489 u32 newbits1
, newbits2
;
3491 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3492 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3493 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3494 CLOCK_CTRL_TXCLK_DISABLE
|
3496 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3497 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3498 newbits1
= CLOCK_CTRL_625_CORE
;
3499 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3501 newbits1
= CLOCK_CTRL_ALTCLK
;
3502 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3505 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3508 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3511 if (!tg3_flag(tp
, 5705_PLUS
)) {
3514 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3515 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3516 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3517 CLOCK_CTRL_TXCLK_DISABLE
|
3518 CLOCK_CTRL_44MHZ_CORE
);
3520 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3523 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3524 tp
->pci_clock_ctrl
| newbits3
, 40);
3528 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3529 tg3_power_down_phy(tp
, do_low_power
);
3531 tg3_frob_aux_power(tp
, true);
3533 /* Workaround for unstable PLL clock */
3534 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3535 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3536 u32 val
= tr32(0x7d00);
3538 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3540 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3543 err
= tg3_nvram_lock(tp
);
3544 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3546 tg3_nvram_unlock(tp
);
3550 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3555 static void tg3_power_down(struct tg3
*tp
)
3557 tg3_power_down_prepare(tp
);
3559 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3560 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3563 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3565 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3566 case MII_TG3_AUX_STAT_10HALF
:
3568 *duplex
= DUPLEX_HALF
;
3571 case MII_TG3_AUX_STAT_10FULL
:
3573 *duplex
= DUPLEX_FULL
;
3576 case MII_TG3_AUX_STAT_100HALF
:
3578 *duplex
= DUPLEX_HALF
;
3581 case MII_TG3_AUX_STAT_100FULL
:
3583 *duplex
= DUPLEX_FULL
;
3586 case MII_TG3_AUX_STAT_1000HALF
:
3587 *speed
= SPEED_1000
;
3588 *duplex
= DUPLEX_HALF
;
3591 case MII_TG3_AUX_STAT_1000FULL
:
3592 *speed
= SPEED_1000
;
3593 *duplex
= DUPLEX_FULL
;
3597 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3598 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3600 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3604 *speed
= SPEED_INVALID
;
3605 *duplex
= DUPLEX_INVALID
;
3610 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3615 new_adv
= ADVERTISE_CSMA
;
3616 if (advertise
& ADVERTISED_10baseT_Half
)
3617 new_adv
|= ADVERTISE_10HALF
;
3618 if (advertise
& ADVERTISED_10baseT_Full
)
3619 new_adv
|= ADVERTISE_10FULL
;
3620 if (advertise
& ADVERTISED_100baseT_Half
)
3621 new_adv
|= ADVERTISE_100HALF
;
3622 if (advertise
& ADVERTISED_100baseT_Full
)
3623 new_adv
|= ADVERTISE_100FULL
;
3625 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
3627 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3631 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3635 if (advertise
& ADVERTISED_1000baseT_Half
)
3636 new_adv
|= ADVERTISE_1000HALF
;
3637 if (advertise
& ADVERTISED_1000baseT_Full
)
3638 new_adv
|= ADVERTISE_1000FULL
;
3640 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3641 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3642 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3644 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3648 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3651 tw32(TG3_CPMU_EEE_MODE
,
3652 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3654 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
3659 /* Advertise 100-BaseTX EEE ability */
3660 if (advertise
& ADVERTISED_100baseT_Full
)
3661 val
|= MDIO_AN_EEE_ADV_100TX
;
3662 /* Advertise 1000-BaseT EEE ability */
3663 if (advertise
& ADVERTISED_1000baseT_Full
)
3664 val
|= MDIO_AN_EEE_ADV_1000T
;
3665 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3669 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3671 case ASIC_REV_57765
:
3673 /* If we advertised any eee advertisements above... */
3675 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3676 MII_TG3_DSP_TAP26_RMRXSTO
|
3677 MII_TG3_DSP_TAP26_OPCSINPT
;
3678 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3681 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3682 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3683 MII_TG3_DSP_CH34TP2_HIBW01
);
3686 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
3695 static void tg3_phy_copper_begin(struct tg3
*tp
)
3700 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3701 new_adv
= ADVERTISED_10baseT_Half
|
3702 ADVERTISED_10baseT_Full
;
3703 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3704 new_adv
|= ADVERTISED_100baseT_Half
|
3705 ADVERTISED_100baseT_Full
;
3707 tg3_phy_autoneg_cfg(tp
, new_adv
,
3708 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3709 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3710 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3711 tp
->link_config
.advertising
&=
3712 ~(ADVERTISED_1000baseT_Half
|
3713 ADVERTISED_1000baseT_Full
);
3715 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3716 tp
->link_config
.flowctrl
);
3718 /* Asking for a specific link mode. */
3719 if (tp
->link_config
.speed
== SPEED_1000
) {
3720 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3721 new_adv
= ADVERTISED_1000baseT_Full
;
3723 new_adv
= ADVERTISED_1000baseT_Half
;
3724 } else if (tp
->link_config
.speed
== SPEED_100
) {
3725 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3726 new_adv
= ADVERTISED_100baseT_Full
;
3728 new_adv
= ADVERTISED_100baseT_Half
;
3730 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3731 new_adv
= ADVERTISED_10baseT_Full
;
3733 new_adv
= ADVERTISED_10baseT_Half
;
3736 tg3_phy_autoneg_cfg(tp
, new_adv
,
3737 tp
->link_config
.flowctrl
);
3740 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3741 tp
->link_config
.speed
!= SPEED_INVALID
) {
3742 u32 bmcr
, orig_bmcr
;
3744 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3745 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3748 switch (tp
->link_config
.speed
) {
3754 bmcr
|= BMCR_SPEED100
;
3758 bmcr
|= BMCR_SPEED1000
;
3762 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3763 bmcr
|= BMCR_FULLDPLX
;
3765 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3766 (bmcr
!= orig_bmcr
)) {
3767 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3768 for (i
= 0; i
< 1500; i
++) {
3772 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3773 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3775 if (!(tmp
& BMSR_LSTATUS
)) {
3780 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3784 tg3_writephy(tp
, MII_BMCR
,
3785 BMCR_ANENABLE
| BMCR_ANRESTART
);
3789 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3793 /* Turn off tap power management. */
3794 /* Set Extended packet length bit */
3795 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3797 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3798 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3799 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3800 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3801 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3808 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3810 u32 adv_reg
, all_mask
= 0;
3812 if (mask
& ADVERTISED_10baseT_Half
)
3813 all_mask
|= ADVERTISE_10HALF
;
3814 if (mask
& ADVERTISED_10baseT_Full
)
3815 all_mask
|= ADVERTISE_10FULL
;
3816 if (mask
& ADVERTISED_100baseT_Half
)
3817 all_mask
|= ADVERTISE_100HALF
;
3818 if (mask
& ADVERTISED_100baseT_Full
)
3819 all_mask
|= ADVERTISE_100FULL
;
3821 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3824 if ((adv_reg
& ADVERTISE_ALL
) != all_mask
)
3827 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3831 if (mask
& ADVERTISED_1000baseT_Half
)
3832 all_mask
|= ADVERTISE_1000HALF
;
3833 if (mask
& ADVERTISED_1000baseT_Full
)
3834 all_mask
|= ADVERTISE_1000FULL
;
3836 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3839 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
3840 if (tg3_ctrl
!= all_mask
)
3847 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3851 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3854 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3855 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3857 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3858 if (curadv
!= reqadv
)
3861 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3862 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3864 /* Reprogram the advertisement register, even if it
3865 * does not affect the current link. If the link
3866 * gets renegotiated in the future, we can save an
3867 * additional renegotiation cycle by advertising
3868 * it correctly in the first place.
3870 if (curadv
!= reqadv
) {
3871 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3872 ADVERTISE_PAUSE_ASYM
);
3873 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3880 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3882 int current_link_up
;
3884 u32 lcl_adv
, rmt_adv
;
3892 (MAC_STATUS_SYNC_CHANGED
|
3893 MAC_STATUS_CFG_CHANGED
|
3894 MAC_STATUS_MI_COMPLETION
|
3895 MAC_STATUS_LNKSTATE_CHANGED
));
3898 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3900 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3904 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3906 /* Some third-party PHYs need to be reset on link going
3909 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3910 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3911 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3912 netif_carrier_ok(tp
->dev
)) {
3913 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3914 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3915 !(bmsr
& BMSR_LSTATUS
))
3921 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3922 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3923 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3924 !tg3_flag(tp
, INIT_COMPLETE
))
3927 if (!(bmsr
& BMSR_LSTATUS
)) {
3928 err
= tg3_init_5401phy_dsp(tp
);
3932 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3933 for (i
= 0; i
< 1000; i
++) {
3935 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3936 (bmsr
& BMSR_LSTATUS
)) {
3942 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3943 TG3_PHY_REV_BCM5401_B0
&&
3944 !(bmsr
& BMSR_LSTATUS
) &&
3945 tp
->link_config
.active_speed
== SPEED_1000
) {
3946 err
= tg3_phy_reset(tp
);
3948 err
= tg3_init_5401phy_dsp(tp
);
3953 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3954 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3955 /* 5701 {A0,B0} CRC bug workaround */
3956 tg3_writephy(tp
, 0x15, 0x0a75);
3957 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3958 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3959 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3962 /* Clear pending interrupts... */
3963 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3964 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3966 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3967 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3968 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3969 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3971 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3972 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3973 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3974 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3975 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3977 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3980 current_link_up
= 0;
3981 current_speed
= SPEED_INVALID
;
3982 current_duplex
= DUPLEX_INVALID
;
3984 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3985 err
= tg3_phy_auxctl_read(tp
,
3986 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3988 if (!err
&& !(val
& (1 << 10))) {
3989 tg3_phy_auxctl_write(tp
,
3990 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3997 for (i
= 0; i
< 100; i
++) {
3998 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3999 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4000 (bmsr
& BMSR_LSTATUS
))
4005 if (bmsr
& BMSR_LSTATUS
) {
4008 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4009 for (i
= 0; i
< 2000; i
++) {
4011 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4016 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4021 for (i
= 0; i
< 200; i
++) {
4022 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4023 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4025 if (bmcr
&& bmcr
!= 0x7fff)
4033 tp
->link_config
.active_speed
= current_speed
;
4034 tp
->link_config
.active_duplex
= current_duplex
;
4036 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4037 if ((bmcr
& BMCR_ANENABLE
) &&
4038 tg3_copper_is_advertising_all(tp
,
4039 tp
->link_config
.advertising
)) {
4040 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
4042 current_link_up
= 1;
4045 if (!(bmcr
& BMCR_ANENABLE
) &&
4046 tp
->link_config
.speed
== current_speed
&&
4047 tp
->link_config
.duplex
== current_duplex
&&
4048 tp
->link_config
.flowctrl
==
4049 tp
->link_config
.active_flowctrl
) {
4050 current_link_up
= 1;
4054 if (current_link_up
== 1 &&
4055 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
4056 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4060 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4061 tg3_phy_copper_begin(tp
);
4063 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4064 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4065 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4066 current_link_up
= 1;
4069 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4070 if (current_link_up
== 1) {
4071 if (tp
->link_config
.active_speed
== SPEED_100
||
4072 tp
->link_config
.active_speed
== SPEED_10
)
4073 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4075 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4076 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4077 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4079 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4081 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4082 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4083 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4085 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4086 if (current_link_up
== 1 &&
4087 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4088 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4090 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4093 /* ??? Without this setting Netgear GA302T PHY does not
4094 * ??? send/receive packets...
4096 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4097 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4098 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4099 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4103 tw32_f(MAC_MODE
, tp
->mac_mode
);
4106 tg3_phy_eee_adjust(tp
, current_link_up
);
4108 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4109 /* Polled via timer. */
4110 tw32_f(MAC_EVENT
, 0);
4112 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4116 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4117 current_link_up
== 1 &&
4118 tp
->link_config
.active_speed
== SPEED_1000
&&
4119 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4122 (MAC_STATUS_SYNC_CHANGED
|
4123 MAC_STATUS_CFG_CHANGED
));
4126 NIC_SRAM_FIRMWARE_MBOX
,
4127 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4130 /* Prevent send BD corruption. */
4131 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4132 u16 oldlnkctl
, newlnkctl
;
4134 pci_read_config_word(tp
->pdev
,
4135 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
4137 if (tp
->link_config
.active_speed
== SPEED_100
||
4138 tp
->link_config
.active_speed
== SPEED_10
)
4139 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4141 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
4142 if (newlnkctl
!= oldlnkctl
)
4143 pci_write_config_word(tp
->pdev
,
4144 pci_pcie_cap(tp
->pdev
) +
4145 PCI_EXP_LNKCTL
, newlnkctl
);
4148 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4149 if (current_link_up
)
4150 netif_carrier_on(tp
->dev
);
4152 netif_carrier_off(tp
->dev
);
4153 tg3_link_report(tp
);
4159 struct tg3_fiber_aneginfo
{
4161 #define ANEG_STATE_UNKNOWN 0
4162 #define ANEG_STATE_AN_ENABLE 1
4163 #define ANEG_STATE_RESTART_INIT 2
4164 #define ANEG_STATE_RESTART 3
4165 #define ANEG_STATE_DISABLE_LINK_OK 4
4166 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4167 #define ANEG_STATE_ABILITY_DETECT 6
4168 #define ANEG_STATE_ACK_DETECT_INIT 7
4169 #define ANEG_STATE_ACK_DETECT 8
4170 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4171 #define ANEG_STATE_COMPLETE_ACK 10
4172 #define ANEG_STATE_IDLE_DETECT_INIT 11
4173 #define ANEG_STATE_IDLE_DETECT 12
4174 #define ANEG_STATE_LINK_OK 13
4175 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4176 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4179 #define MR_AN_ENABLE 0x00000001
4180 #define MR_RESTART_AN 0x00000002
4181 #define MR_AN_COMPLETE 0x00000004
4182 #define MR_PAGE_RX 0x00000008
4183 #define MR_NP_LOADED 0x00000010
4184 #define MR_TOGGLE_TX 0x00000020
4185 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4186 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4187 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4188 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4189 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4190 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4191 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4192 #define MR_TOGGLE_RX 0x00002000
4193 #define MR_NP_RX 0x00004000
4195 #define MR_LINK_OK 0x80000000
4197 unsigned long link_time
, cur_time
;
4199 u32 ability_match_cfg
;
4200 int ability_match_count
;
4202 char ability_match
, idle_match
, ack_match
;
4204 u32 txconfig
, rxconfig
;
4205 #define ANEG_CFG_NP 0x00000080
4206 #define ANEG_CFG_ACK 0x00000040
4207 #define ANEG_CFG_RF2 0x00000020
4208 #define ANEG_CFG_RF1 0x00000010
4209 #define ANEG_CFG_PS2 0x00000001
4210 #define ANEG_CFG_PS1 0x00008000
4211 #define ANEG_CFG_HD 0x00004000
4212 #define ANEG_CFG_FD 0x00002000
4213 #define ANEG_CFG_INVAL 0x00001f06
4218 #define ANEG_TIMER_ENAB 2
4219 #define ANEG_FAILED -1
4221 #define ANEG_STATE_SETTLE_TIME 10000
4223 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4224 struct tg3_fiber_aneginfo
*ap
)
4227 unsigned long delta
;
4231 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4235 ap
->ability_match_cfg
= 0;
4236 ap
->ability_match_count
= 0;
4237 ap
->ability_match
= 0;
4243 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4244 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4246 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4247 ap
->ability_match_cfg
= rx_cfg_reg
;
4248 ap
->ability_match
= 0;
4249 ap
->ability_match_count
= 0;
4251 if (++ap
->ability_match_count
> 1) {
4252 ap
->ability_match
= 1;
4253 ap
->ability_match_cfg
= rx_cfg_reg
;
4256 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4264 ap
->ability_match_cfg
= 0;
4265 ap
->ability_match_count
= 0;
4266 ap
->ability_match
= 0;
4272 ap
->rxconfig
= rx_cfg_reg
;
4275 switch (ap
->state
) {
4276 case ANEG_STATE_UNKNOWN
:
4277 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4278 ap
->state
= ANEG_STATE_AN_ENABLE
;
4281 case ANEG_STATE_AN_ENABLE
:
4282 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4283 if (ap
->flags
& MR_AN_ENABLE
) {
4286 ap
->ability_match_cfg
= 0;
4287 ap
->ability_match_count
= 0;
4288 ap
->ability_match
= 0;
4292 ap
->state
= ANEG_STATE_RESTART_INIT
;
4294 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4298 case ANEG_STATE_RESTART_INIT
:
4299 ap
->link_time
= ap
->cur_time
;
4300 ap
->flags
&= ~(MR_NP_LOADED
);
4302 tw32(MAC_TX_AUTO_NEG
, 0);
4303 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4304 tw32_f(MAC_MODE
, tp
->mac_mode
);
4307 ret
= ANEG_TIMER_ENAB
;
4308 ap
->state
= ANEG_STATE_RESTART
;
4311 case ANEG_STATE_RESTART
:
4312 delta
= ap
->cur_time
- ap
->link_time
;
4313 if (delta
> ANEG_STATE_SETTLE_TIME
)
4314 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4316 ret
= ANEG_TIMER_ENAB
;
4319 case ANEG_STATE_DISABLE_LINK_OK
:
4323 case ANEG_STATE_ABILITY_DETECT_INIT
:
4324 ap
->flags
&= ~(MR_TOGGLE_TX
);
4325 ap
->txconfig
= ANEG_CFG_FD
;
4326 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4327 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4328 ap
->txconfig
|= ANEG_CFG_PS1
;
4329 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4330 ap
->txconfig
|= ANEG_CFG_PS2
;
4331 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4332 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4333 tw32_f(MAC_MODE
, tp
->mac_mode
);
4336 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4339 case ANEG_STATE_ABILITY_DETECT
:
4340 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4341 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4344 case ANEG_STATE_ACK_DETECT_INIT
:
4345 ap
->txconfig
|= ANEG_CFG_ACK
;
4346 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4347 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4348 tw32_f(MAC_MODE
, tp
->mac_mode
);
4351 ap
->state
= ANEG_STATE_ACK_DETECT
;
4354 case ANEG_STATE_ACK_DETECT
:
4355 if (ap
->ack_match
!= 0) {
4356 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4357 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4358 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4360 ap
->state
= ANEG_STATE_AN_ENABLE
;
4362 } else if (ap
->ability_match
!= 0 &&
4363 ap
->rxconfig
== 0) {
4364 ap
->state
= ANEG_STATE_AN_ENABLE
;
4368 case ANEG_STATE_COMPLETE_ACK_INIT
:
4369 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4373 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4374 MR_LP_ADV_HALF_DUPLEX
|
4375 MR_LP_ADV_SYM_PAUSE
|
4376 MR_LP_ADV_ASYM_PAUSE
|
4377 MR_LP_ADV_REMOTE_FAULT1
|
4378 MR_LP_ADV_REMOTE_FAULT2
|
4379 MR_LP_ADV_NEXT_PAGE
|
4382 if (ap
->rxconfig
& ANEG_CFG_FD
)
4383 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4384 if (ap
->rxconfig
& ANEG_CFG_HD
)
4385 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4386 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4387 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4388 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4389 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4390 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4391 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4392 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4393 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4394 if (ap
->rxconfig
& ANEG_CFG_NP
)
4395 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4397 ap
->link_time
= ap
->cur_time
;
4399 ap
->flags
^= (MR_TOGGLE_TX
);
4400 if (ap
->rxconfig
& 0x0008)
4401 ap
->flags
|= MR_TOGGLE_RX
;
4402 if (ap
->rxconfig
& ANEG_CFG_NP
)
4403 ap
->flags
|= MR_NP_RX
;
4404 ap
->flags
|= MR_PAGE_RX
;
4406 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4407 ret
= ANEG_TIMER_ENAB
;
4410 case ANEG_STATE_COMPLETE_ACK
:
4411 if (ap
->ability_match
!= 0 &&
4412 ap
->rxconfig
== 0) {
4413 ap
->state
= ANEG_STATE_AN_ENABLE
;
4416 delta
= ap
->cur_time
- ap
->link_time
;
4417 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4418 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4419 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4421 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4422 !(ap
->flags
& MR_NP_RX
)) {
4423 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4431 case ANEG_STATE_IDLE_DETECT_INIT
:
4432 ap
->link_time
= ap
->cur_time
;
4433 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4434 tw32_f(MAC_MODE
, tp
->mac_mode
);
4437 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4438 ret
= ANEG_TIMER_ENAB
;
4441 case ANEG_STATE_IDLE_DETECT
:
4442 if (ap
->ability_match
!= 0 &&
4443 ap
->rxconfig
== 0) {
4444 ap
->state
= ANEG_STATE_AN_ENABLE
;
4447 delta
= ap
->cur_time
- ap
->link_time
;
4448 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4449 /* XXX another gem from the Broadcom driver :( */
4450 ap
->state
= ANEG_STATE_LINK_OK
;
4454 case ANEG_STATE_LINK_OK
:
4455 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4459 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4460 /* ??? unimplemented */
4463 case ANEG_STATE_NEXT_PAGE_WAIT
:
4464 /* ??? unimplemented */
4475 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4478 struct tg3_fiber_aneginfo aninfo
;
4479 int status
= ANEG_FAILED
;
4483 tw32_f(MAC_TX_AUTO_NEG
, 0);
4485 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4486 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4489 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4492 memset(&aninfo
, 0, sizeof(aninfo
));
4493 aninfo
.flags
|= MR_AN_ENABLE
;
4494 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4495 aninfo
.cur_time
= 0;
4497 while (++tick
< 195000) {
4498 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4499 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4505 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4506 tw32_f(MAC_MODE
, tp
->mac_mode
);
4509 *txflags
= aninfo
.txconfig
;
4510 *rxflags
= aninfo
.flags
;
4512 if (status
== ANEG_DONE
&&
4513 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4514 MR_LP_ADV_FULL_DUPLEX
)))
4520 static void tg3_init_bcm8002(struct tg3
*tp
)
4522 u32 mac_status
= tr32(MAC_STATUS
);
4525 /* Reset when initting first time or we have a link. */
4526 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4527 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4530 /* Set PLL lock range. */
4531 tg3_writephy(tp
, 0x16, 0x8007);
4534 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4536 /* Wait for reset to complete. */
4537 /* XXX schedule_timeout() ... */
4538 for (i
= 0; i
< 500; i
++)
4541 /* Config mode; select PMA/Ch 1 regs. */
4542 tg3_writephy(tp
, 0x10, 0x8411);
4544 /* Enable auto-lock and comdet, select txclk for tx. */
4545 tg3_writephy(tp
, 0x11, 0x0a10);
4547 tg3_writephy(tp
, 0x18, 0x00a0);
4548 tg3_writephy(tp
, 0x16, 0x41ff);
4550 /* Assert and deassert POR. */
4551 tg3_writephy(tp
, 0x13, 0x0400);
4553 tg3_writephy(tp
, 0x13, 0x0000);
4555 tg3_writephy(tp
, 0x11, 0x0a50);
4557 tg3_writephy(tp
, 0x11, 0x0a10);
4559 /* Wait for signal to stabilize */
4560 /* XXX schedule_timeout() ... */
4561 for (i
= 0; i
< 15000; i
++)
4564 /* Deselect the channel register so we can read the PHYID
4567 tg3_writephy(tp
, 0x10, 0x8011);
4570 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4573 u32 sg_dig_ctrl
, sg_dig_status
;
4574 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4575 int workaround
, port_a
;
4576 int current_link_up
;
4579 expected_sg_dig_ctrl
= 0;
4582 current_link_up
= 0;
4584 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4585 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4587 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4590 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4591 /* preserve bits 20-23 for voltage regulator */
4592 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4595 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4597 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4598 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4600 u32 val
= serdes_cfg
;
4606 tw32_f(MAC_SERDES_CFG
, val
);
4609 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4611 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4612 tg3_setup_flow_control(tp
, 0, 0);
4613 current_link_up
= 1;
4618 /* Want auto-negotiation. */
4619 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4621 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4622 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4623 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4624 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4625 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4627 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4628 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4629 tp
->serdes_counter
&&
4630 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4631 MAC_STATUS_RCVD_CFG
)) ==
4632 MAC_STATUS_PCS_SYNCED
)) {
4633 tp
->serdes_counter
--;
4634 current_link_up
= 1;
4639 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4640 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4642 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4644 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4645 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4646 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4647 MAC_STATUS_SIGNAL_DET
)) {
4648 sg_dig_status
= tr32(SG_DIG_STATUS
);
4649 mac_status
= tr32(MAC_STATUS
);
4651 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4652 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4653 u32 local_adv
= 0, remote_adv
= 0;
4655 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4656 local_adv
|= ADVERTISE_1000XPAUSE
;
4657 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4658 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4660 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4661 remote_adv
|= LPA_1000XPAUSE
;
4662 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4663 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4665 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4666 current_link_up
= 1;
4667 tp
->serdes_counter
= 0;
4668 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4669 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4670 if (tp
->serdes_counter
)
4671 tp
->serdes_counter
--;
4674 u32 val
= serdes_cfg
;
4681 tw32_f(MAC_SERDES_CFG
, val
);
4684 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4687 /* Link parallel detection - link is up */
4688 /* only if we have PCS_SYNC and not */
4689 /* receiving config code words */
4690 mac_status
= tr32(MAC_STATUS
);
4691 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4692 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4693 tg3_setup_flow_control(tp
, 0, 0);
4694 current_link_up
= 1;
4696 TG3_PHYFLG_PARALLEL_DETECT
;
4697 tp
->serdes_counter
=
4698 SERDES_PARALLEL_DET_TIMEOUT
;
4700 goto restart_autoneg
;
4704 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4705 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4709 return current_link_up
;
4712 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4714 int current_link_up
= 0;
4716 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4719 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4720 u32 txflags
, rxflags
;
4723 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4724 u32 local_adv
= 0, remote_adv
= 0;
4726 if (txflags
& ANEG_CFG_PS1
)
4727 local_adv
|= ADVERTISE_1000XPAUSE
;
4728 if (txflags
& ANEG_CFG_PS2
)
4729 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4731 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4732 remote_adv
|= LPA_1000XPAUSE
;
4733 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4734 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4736 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4738 current_link_up
= 1;
4740 for (i
= 0; i
< 30; i
++) {
4743 (MAC_STATUS_SYNC_CHANGED
|
4744 MAC_STATUS_CFG_CHANGED
));
4746 if ((tr32(MAC_STATUS
) &
4747 (MAC_STATUS_SYNC_CHANGED
|
4748 MAC_STATUS_CFG_CHANGED
)) == 0)
4752 mac_status
= tr32(MAC_STATUS
);
4753 if (current_link_up
== 0 &&
4754 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4755 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4756 current_link_up
= 1;
4758 tg3_setup_flow_control(tp
, 0, 0);
4760 /* Forcing 1000FD link up. */
4761 current_link_up
= 1;
4763 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4766 tw32_f(MAC_MODE
, tp
->mac_mode
);
4771 return current_link_up
;
4774 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4777 u16 orig_active_speed
;
4778 u8 orig_active_duplex
;
4780 int current_link_up
;
4783 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4784 orig_active_speed
= tp
->link_config
.active_speed
;
4785 orig_active_duplex
= tp
->link_config
.active_duplex
;
4787 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4788 netif_carrier_ok(tp
->dev
) &&
4789 tg3_flag(tp
, INIT_COMPLETE
)) {
4790 mac_status
= tr32(MAC_STATUS
);
4791 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4792 MAC_STATUS_SIGNAL_DET
|
4793 MAC_STATUS_CFG_CHANGED
|
4794 MAC_STATUS_RCVD_CFG
);
4795 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4796 MAC_STATUS_SIGNAL_DET
)) {
4797 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4798 MAC_STATUS_CFG_CHANGED
));
4803 tw32_f(MAC_TX_AUTO_NEG
, 0);
4805 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4806 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4807 tw32_f(MAC_MODE
, tp
->mac_mode
);
4810 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4811 tg3_init_bcm8002(tp
);
4813 /* Enable link change event even when serdes polling. */
4814 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4817 current_link_up
= 0;
4818 mac_status
= tr32(MAC_STATUS
);
4820 if (tg3_flag(tp
, HW_AUTONEG
))
4821 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4823 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4825 tp
->napi
[0].hw_status
->status
=
4826 (SD_STATUS_UPDATED
|
4827 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4829 for (i
= 0; i
< 100; i
++) {
4830 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4831 MAC_STATUS_CFG_CHANGED
));
4833 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4834 MAC_STATUS_CFG_CHANGED
|
4835 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4839 mac_status
= tr32(MAC_STATUS
);
4840 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4841 current_link_up
= 0;
4842 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4843 tp
->serdes_counter
== 0) {
4844 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4845 MAC_MODE_SEND_CONFIGS
));
4847 tw32_f(MAC_MODE
, tp
->mac_mode
);
4851 if (current_link_up
== 1) {
4852 tp
->link_config
.active_speed
= SPEED_1000
;
4853 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4854 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4855 LED_CTRL_LNKLED_OVERRIDE
|
4856 LED_CTRL_1000MBPS_ON
));
4858 tp
->link_config
.active_speed
= SPEED_INVALID
;
4859 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4860 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4861 LED_CTRL_LNKLED_OVERRIDE
|
4862 LED_CTRL_TRAFFIC_OVERRIDE
));
4865 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4866 if (current_link_up
)
4867 netif_carrier_on(tp
->dev
);
4869 netif_carrier_off(tp
->dev
);
4870 tg3_link_report(tp
);
4872 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4873 if (orig_pause_cfg
!= now_pause_cfg
||
4874 orig_active_speed
!= tp
->link_config
.active_speed
||
4875 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4876 tg3_link_report(tp
);
4882 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4884 int current_link_up
, err
= 0;
4888 u32 local_adv
, remote_adv
;
4890 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4891 tw32_f(MAC_MODE
, tp
->mac_mode
);
4897 (MAC_STATUS_SYNC_CHANGED
|
4898 MAC_STATUS_CFG_CHANGED
|
4899 MAC_STATUS_MI_COMPLETION
|
4900 MAC_STATUS_LNKSTATE_CHANGED
));
4906 current_link_up
= 0;
4907 current_speed
= SPEED_INVALID
;
4908 current_duplex
= DUPLEX_INVALID
;
4910 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4911 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4912 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4913 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4914 bmsr
|= BMSR_LSTATUS
;
4916 bmsr
&= ~BMSR_LSTATUS
;
4919 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4921 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4922 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4923 /* do nothing, just check for link up at the end */
4924 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4927 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4928 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4929 ADVERTISE_1000XPAUSE
|
4930 ADVERTISE_1000XPSE_ASYM
|
4933 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4935 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4936 new_adv
|= ADVERTISE_1000XHALF
;
4937 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4938 new_adv
|= ADVERTISE_1000XFULL
;
4940 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4941 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4942 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4943 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4945 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4946 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4947 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4954 bmcr
&= ~BMCR_SPEED1000
;
4955 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4957 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4958 new_bmcr
|= BMCR_FULLDPLX
;
4960 if (new_bmcr
!= bmcr
) {
4961 /* BMCR_SPEED1000 is a reserved bit that needs
4962 * to be set on write.
4964 new_bmcr
|= BMCR_SPEED1000
;
4966 /* Force a linkdown */
4967 if (netif_carrier_ok(tp
->dev
)) {
4970 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4971 adv
&= ~(ADVERTISE_1000XFULL
|
4972 ADVERTISE_1000XHALF
|
4974 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4975 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4979 netif_carrier_off(tp
->dev
);
4981 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4983 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4984 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4985 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4987 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4988 bmsr
|= BMSR_LSTATUS
;
4990 bmsr
&= ~BMSR_LSTATUS
;
4992 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4996 if (bmsr
& BMSR_LSTATUS
) {
4997 current_speed
= SPEED_1000
;
4998 current_link_up
= 1;
4999 if (bmcr
& BMCR_FULLDPLX
)
5000 current_duplex
= DUPLEX_FULL
;
5002 current_duplex
= DUPLEX_HALF
;
5007 if (bmcr
& BMCR_ANENABLE
) {
5010 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5011 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5012 common
= local_adv
& remote_adv
;
5013 if (common
& (ADVERTISE_1000XHALF
|
5014 ADVERTISE_1000XFULL
)) {
5015 if (common
& ADVERTISE_1000XFULL
)
5016 current_duplex
= DUPLEX_FULL
;
5018 current_duplex
= DUPLEX_HALF
;
5019 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5020 /* Link is up via parallel detect */
5022 current_link_up
= 0;
5027 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5028 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5030 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5031 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5032 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5034 tw32_f(MAC_MODE
, tp
->mac_mode
);
5037 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5039 tp
->link_config
.active_speed
= current_speed
;
5040 tp
->link_config
.active_duplex
= current_duplex
;
5042 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5043 if (current_link_up
)
5044 netif_carrier_on(tp
->dev
);
5046 netif_carrier_off(tp
->dev
);
5047 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5049 tg3_link_report(tp
);
5054 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5056 if (tp
->serdes_counter
) {
5057 /* Give autoneg time to complete. */
5058 tp
->serdes_counter
--;
5062 if (!netif_carrier_ok(tp
->dev
) &&
5063 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5066 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5067 if (bmcr
& BMCR_ANENABLE
) {
5070 /* Select shadow register 0x1f */
5071 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5072 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5074 /* Select expansion interrupt status register */
5075 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5076 MII_TG3_DSP_EXP1_INT_STAT
);
5077 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5078 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5080 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5081 /* We have signal detect and not receiving
5082 * config code words, link is up by parallel
5086 bmcr
&= ~BMCR_ANENABLE
;
5087 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5088 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5089 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5092 } else if (netif_carrier_ok(tp
->dev
) &&
5093 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5094 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5097 /* Select expansion interrupt status register */
5098 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5099 MII_TG3_DSP_EXP1_INT_STAT
);
5100 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5104 /* Config code words received, turn on autoneg. */
5105 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5106 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5108 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5114 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5119 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5120 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5121 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5122 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5124 err
= tg3_setup_copper_phy(tp
, force_reset
);
5126 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5129 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5130 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5132 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5137 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5138 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5139 tw32(GRC_MISC_CFG
, val
);
5142 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5143 (6 << TX_LENGTHS_IPG_SHIFT
);
5144 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5145 val
|= tr32(MAC_TX_LENGTHS
) &
5146 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5147 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5149 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5150 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5151 tw32(MAC_TX_LENGTHS
, val
|
5152 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5154 tw32(MAC_TX_LENGTHS
, val
|
5155 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5157 if (!tg3_flag(tp
, 5705_PLUS
)) {
5158 if (netif_carrier_ok(tp
->dev
)) {
5159 tw32(HOSTCC_STAT_COAL_TICKS
,
5160 tp
->coal
.stats_block_coalesce_usecs
);
5162 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5166 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5167 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5168 if (!netif_carrier_ok(tp
->dev
))
5169 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5172 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5173 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5179 static inline int tg3_irq_sync(struct tg3
*tp
)
5181 return tp
->irq_sync
;
5184 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5188 dst
= (u32
*)((u8
*)dst
+ off
);
5189 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5190 *dst
++ = tr32(off
+ i
);
5193 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5195 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5196 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5197 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5198 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5199 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5200 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5201 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5202 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5203 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5204 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5205 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5206 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5207 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5208 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5209 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5210 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5211 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5212 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5213 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5215 if (tg3_flag(tp
, SUPPORT_MSIX
))
5216 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5218 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5219 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5220 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5221 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5222 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5223 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5224 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5225 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5227 if (!tg3_flag(tp
, 5705_PLUS
)) {
5228 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5229 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5230 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5233 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5234 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5235 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5236 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5237 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5239 if (tg3_flag(tp
, NVRAM
))
5240 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5243 static void tg3_dump_state(struct tg3
*tp
)
5248 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5250 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5254 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5255 /* Read up to but not including private PCI registers */
5256 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5257 regs
[i
/ sizeof(u32
)] = tr32(i
);
5259 tg3_dump_legacy_regs(tp
, regs
);
5261 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5262 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5263 !regs
[i
+ 2] && !regs
[i
+ 3])
5266 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5268 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5273 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5274 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5276 /* SW status block */
5278 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5280 tnapi
->hw_status
->status
,
5281 tnapi
->hw_status
->status_tag
,
5282 tnapi
->hw_status
->rx_jumbo_consumer
,
5283 tnapi
->hw_status
->rx_consumer
,
5284 tnapi
->hw_status
->rx_mini_consumer
,
5285 tnapi
->hw_status
->idx
[0].rx_producer
,
5286 tnapi
->hw_status
->idx
[0].tx_consumer
);
5289 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5291 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5292 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5294 tnapi
->prodring
.rx_std_prod_idx
,
5295 tnapi
->prodring
.rx_std_cons_idx
,
5296 tnapi
->prodring
.rx_jmb_prod_idx
,
5297 tnapi
->prodring
.rx_jmb_cons_idx
);
5301 /* This is called whenever we suspect that the system chipset is re-
5302 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5303 * is bogus tx completions. We try to recover by setting the
5304 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5307 static void tg3_tx_recover(struct tg3
*tp
)
5309 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5310 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5312 netdev_warn(tp
->dev
,
5313 "The system may be re-ordering memory-mapped I/O "
5314 "cycles to the network device, attempting to recover. "
5315 "Please report the problem to the driver maintainer "
5316 "and include system chipset information.\n");
5318 spin_lock(&tp
->lock
);
5319 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5320 spin_unlock(&tp
->lock
);
5323 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5325 /* Tell compiler to fetch tx indices from memory. */
5327 return tnapi
->tx_pending
-
5328 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5331 /* Tigon3 never reports partial packet sends. So we do not
5332 * need special logic to handle SKBs that have not had all
5333 * of their frags sent yet, like SunGEM does.
5335 static void tg3_tx(struct tg3_napi
*tnapi
)
5337 struct tg3
*tp
= tnapi
->tp
;
5338 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5339 u32 sw_idx
= tnapi
->tx_cons
;
5340 struct netdev_queue
*txq
;
5341 int index
= tnapi
- tp
->napi
;
5343 if (tg3_flag(tp
, ENABLE_TSS
))
5346 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5348 while (sw_idx
!= hw_idx
) {
5349 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5350 struct sk_buff
*skb
= ri
->skb
;
5353 if (unlikely(skb
== NULL
)) {
5358 pci_unmap_single(tp
->pdev
,
5359 dma_unmap_addr(ri
, mapping
),
5365 while (ri
->fragmented
) {
5366 ri
->fragmented
= false;
5367 sw_idx
= NEXT_TX(sw_idx
);
5368 ri
= &tnapi
->tx_buffers
[sw_idx
];
5371 sw_idx
= NEXT_TX(sw_idx
);
5373 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5374 ri
= &tnapi
->tx_buffers
[sw_idx
];
5375 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5378 pci_unmap_page(tp
->pdev
,
5379 dma_unmap_addr(ri
, mapping
),
5380 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5383 while (ri
->fragmented
) {
5384 ri
->fragmented
= false;
5385 sw_idx
= NEXT_TX(sw_idx
);
5386 ri
= &tnapi
->tx_buffers
[sw_idx
];
5389 sw_idx
= NEXT_TX(sw_idx
);
5394 if (unlikely(tx_bug
)) {
5400 tnapi
->tx_cons
= sw_idx
;
5402 /* Need to make the tx_cons update visible to tg3_start_xmit()
5403 * before checking for netif_queue_stopped(). Without the
5404 * memory barrier, there is a small possibility that tg3_start_xmit()
5405 * will miss it and cause the queue to be stopped forever.
5409 if (unlikely(netif_tx_queue_stopped(txq
) &&
5410 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5411 __netif_tx_lock(txq
, smp_processor_id());
5412 if (netif_tx_queue_stopped(txq
) &&
5413 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5414 netif_tx_wake_queue(txq
);
5415 __netif_tx_unlock(txq
);
5419 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5424 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5425 map_sz
, PCI_DMA_FROMDEVICE
);
5426 dev_kfree_skb_any(ri
->skb
);
5430 /* Returns size of skb allocated or < 0 on error.
5432 * We only need to fill in the address because the other members
5433 * of the RX descriptor are invariant, see tg3_init_rings.
5435 * Note the purposeful assymetry of cpu vs. chip accesses. For
5436 * posting buffers we only dirty the first cache line of the RX
5437 * descriptor (containing the address). Whereas for the RX status
5438 * buffers the cpu only reads the last cacheline of the RX descriptor
5439 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5441 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5442 u32 opaque_key
, u32 dest_idx_unmasked
)
5444 struct tg3_rx_buffer_desc
*desc
;
5445 struct ring_info
*map
;
5446 struct sk_buff
*skb
;
5448 int skb_size
, dest_idx
;
5450 switch (opaque_key
) {
5451 case RXD_OPAQUE_RING_STD
:
5452 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5453 desc
= &tpr
->rx_std
[dest_idx
];
5454 map
= &tpr
->rx_std_buffers
[dest_idx
];
5455 skb_size
= tp
->rx_pkt_map_sz
;
5458 case RXD_OPAQUE_RING_JUMBO
:
5459 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5460 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5461 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5462 skb_size
= TG3_RX_JMB_MAP_SZ
;
5469 /* Do not overwrite any of the map or rp information
5470 * until we are sure we can commit to a new buffer.
5472 * Callers depend upon this behavior and assume that
5473 * we leave everything unchanged if we fail.
5475 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ TG3_RX_OFFSET(tp
));
5479 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5481 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
5482 PCI_DMA_FROMDEVICE
);
5483 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5489 dma_unmap_addr_set(map
, mapping
, mapping
);
5491 desc
->addr_hi
= ((u64
)mapping
>> 32);
5492 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5497 /* We only need to move over in the address because the other
5498 * members of the RX descriptor are invariant. See notes above
5499 * tg3_alloc_rx_skb for full details.
5501 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5502 struct tg3_rx_prodring_set
*dpr
,
5503 u32 opaque_key
, int src_idx
,
5504 u32 dest_idx_unmasked
)
5506 struct tg3
*tp
= tnapi
->tp
;
5507 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5508 struct ring_info
*src_map
, *dest_map
;
5509 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5512 switch (opaque_key
) {
5513 case RXD_OPAQUE_RING_STD
:
5514 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5515 dest_desc
= &dpr
->rx_std
[dest_idx
];
5516 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5517 src_desc
= &spr
->rx_std
[src_idx
];
5518 src_map
= &spr
->rx_std_buffers
[src_idx
];
5521 case RXD_OPAQUE_RING_JUMBO
:
5522 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5523 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5524 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5525 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5526 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5533 dest_map
->skb
= src_map
->skb
;
5534 dma_unmap_addr_set(dest_map
, mapping
,
5535 dma_unmap_addr(src_map
, mapping
));
5536 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5537 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5539 /* Ensure that the update to the skb happens after the physical
5540 * addresses have been transferred to the new BD location.
5544 src_map
->skb
= NULL
;
5547 /* The RX ring scheme is composed of multiple rings which post fresh
5548 * buffers to the chip, and one special ring the chip uses to report
5549 * status back to the host.
5551 * The special ring reports the status of received packets to the
5552 * host. The chip does not write into the original descriptor the
5553 * RX buffer was obtained from. The chip simply takes the original
5554 * descriptor as provided by the host, updates the status and length
5555 * field, then writes this into the next status ring entry.
5557 * Each ring the host uses to post buffers to the chip is described
5558 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5559 * it is first placed into the on-chip ram. When the packet's length
5560 * is known, it walks down the TG3_BDINFO entries to select the ring.
5561 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5562 * which is within the range of the new packet's length is chosen.
5564 * The "separate ring for rx status" scheme may sound queer, but it makes
5565 * sense from a cache coherency perspective. If only the host writes
5566 * to the buffer post rings, and only the chip writes to the rx status
5567 * rings, then cache lines never move beyond shared-modified state.
5568 * If both the host and chip were to write into the same ring, cache line
5569 * eviction could occur since both entities want it in an exclusive state.
5571 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5573 struct tg3
*tp
= tnapi
->tp
;
5574 u32 work_mask
, rx_std_posted
= 0;
5575 u32 std_prod_idx
, jmb_prod_idx
;
5576 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5579 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5581 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5583 * We need to order the read of hw_idx and the read of
5584 * the opaque cookie.
5589 std_prod_idx
= tpr
->rx_std_prod_idx
;
5590 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5591 while (sw_idx
!= hw_idx
&& budget
> 0) {
5592 struct ring_info
*ri
;
5593 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5595 struct sk_buff
*skb
;
5596 dma_addr_t dma_addr
;
5597 u32 opaque_key
, desc_idx
, *post_ptr
;
5599 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5600 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5601 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5602 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5603 dma_addr
= dma_unmap_addr(ri
, mapping
);
5605 post_ptr
= &std_prod_idx
;
5607 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5608 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5609 dma_addr
= dma_unmap_addr(ri
, mapping
);
5611 post_ptr
= &jmb_prod_idx
;
5613 goto next_pkt_nopost
;
5615 work_mask
|= opaque_key
;
5617 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5618 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5620 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5621 desc_idx
, *post_ptr
);
5623 /* Other statistics kept track of by card. */
5628 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5631 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5634 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
5639 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5640 PCI_DMA_FROMDEVICE
);
5642 /* Ensure that the update to the skb happens
5643 * after the usage of the old DMA mapping.
5651 struct sk_buff
*copy_skb
;
5653 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5654 desc_idx
, *post_ptr
);
5656 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
5658 if (copy_skb
== NULL
)
5659 goto drop_it_no_recycle
;
5661 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
5662 skb_put(copy_skb
, len
);
5663 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5664 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
5665 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5667 /* We'll reuse the original ring buffer. */
5671 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5672 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5673 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5674 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5675 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5677 skb_checksum_none_assert(skb
);
5679 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5681 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5682 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5684 goto drop_it_no_recycle
;
5687 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5688 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5689 __vlan_hwaccel_put_tag(skb
,
5690 desc
->err_vlan
& RXD_VLAN_MASK
);
5692 napi_gro_receive(&tnapi
->napi
, skb
);
5700 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5701 tpr
->rx_std_prod_idx
= std_prod_idx
&
5702 tp
->rx_std_ring_mask
;
5703 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5704 tpr
->rx_std_prod_idx
);
5705 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5710 sw_idx
&= tp
->rx_ret_ring_mask
;
5712 /* Refresh hw_idx to see if there is new work */
5713 if (sw_idx
== hw_idx
) {
5714 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5719 /* ACK the status ring. */
5720 tnapi
->rx_rcb_ptr
= sw_idx
;
5721 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5723 /* Refill RX ring(s). */
5724 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5725 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5726 tpr
->rx_std_prod_idx
= std_prod_idx
&
5727 tp
->rx_std_ring_mask
;
5728 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5729 tpr
->rx_std_prod_idx
);
5731 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5732 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5733 tp
->rx_jmb_ring_mask
;
5734 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5735 tpr
->rx_jmb_prod_idx
);
5738 } else if (work_mask
) {
5739 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5740 * updated before the producer indices can be updated.
5744 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5745 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5747 if (tnapi
!= &tp
->napi
[1])
5748 napi_schedule(&tp
->napi
[1].napi
);
5754 static void tg3_poll_link(struct tg3
*tp
)
5756 /* handle link change and other phy events */
5757 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5758 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5760 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5761 sblk
->status
= SD_STATUS_UPDATED
|
5762 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5763 spin_lock(&tp
->lock
);
5764 if (tg3_flag(tp
, USE_PHYLIB
)) {
5766 (MAC_STATUS_SYNC_CHANGED
|
5767 MAC_STATUS_CFG_CHANGED
|
5768 MAC_STATUS_MI_COMPLETION
|
5769 MAC_STATUS_LNKSTATE_CHANGED
));
5772 tg3_setup_phy(tp
, 0);
5773 spin_unlock(&tp
->lock
);
5778 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5779 struct tg3_rx_prodring_set
*dpr
,
5780 struct tg3_rx_prodring_set
*spr
)
5782 u32 si
, di
, cpycnt
, src_prod_idx
;
5786 src_prod_idx
= spr
->rx_std_prod_idx
;
5788 /* Make sure updates to the rx_std_buffers[] entries and the
5789 * standard producer index are seen in the correct order.
5793 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5796 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5797 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5799 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5800 spr
->rx_std_cons_idx
;
5802 cpycnt
= min(cpycnt
,
5803 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5805 si
= spr
->rx_std_cons_idx
;
5806 di
= dpr
->rx_std_prod_idx
;
5808 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5809 if (dpr
->rx_std_buffers
[i
].skb
) {
5819 /* Ensure that updates to the rx_std_buffers ring and the
5820 * shadowed hardware producer ring from tg3_recycle_skb() are
5821 * ordered correctly WRT the skb check above.
5825 memcpy(&dpr
->rx_std_buffers
[di
],
5826 &spr
->rx_std_buffers
[si
],
5827 cpycnt
* sizeof(struct ring_info
));
5829 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5830 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5831 sbd
= &spr
->rx_std
[si
];
5832 dbd
= &dpr
->rx_std
[di
];
5833 dbd
->addr_hi
= sbd
->addr_hi
;
5834 dbd
->addr_lo
= sbd
->addr_lo
;
5837 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5838 tp
->rx_std_ring_mask
;
5839 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5840 tp
->rx_std_ring_mask
;
5844 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5846 /* Make sure updates to the rx_jmb_buffers[] entries and
5847 * the jumbo producer index are seen in the correct order.
5851 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5854 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5855 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5857 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5858 spr
->rx_jmb_cons_idx
;
5860 cpycnt
= min(cpycnt
,
5861 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5863 si
= spr
->rx_jmb_cons_idx
;
5864 di
= dpr
->rx_jmb_prod_idx
;
5866 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5867 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5877 /* Ensure that updates to the rx_jmb_buffers ring and the
5878 * shadowed hardware producer ring from tg3_recycle_skb() are
5879 * ordered correctly WRT the skb check above.
5883 memcpy(&dpr
->rx_jmb_buffers
[di
],
5884 &spr
->rx_jmb_buffers
[si
],
5885 cpycnt
* sizeof(struct ring_info
));
5887 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5888 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5889 sbd
= &spr
->rx_jmb
[si
].std
;
5890 dbd
= &dpr
->rx_jmb
[di
].std
;
5891 dbd
->addr_hi
= sbd
->addr_hi
;
5892 dbd
->addr_lo
= sbd
->addr_lo
;
5895 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5896 tp
->rx_jmb_ring_mask
;
5897 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5898 tp
->rx_jmb_ring_mask
;
5904 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5906 struct tg3
*tp
= tnapi
->tp
;
5908 /* run TX completion thread */
5909 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5911 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5915 if (!tnapi
->rx_rcb_prod_idx
)
5918 /* run RX thread, within the bounds set by NAPI.
5919 * All RX "locking" is done by ensuring outside
5920 * code synchronizes with tg3->napi.poll()
5922 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5923 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5925 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5926 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5928 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5929 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5931 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5932 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5933 &tp
->napi
[i
].prodring
);
5937 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5938 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5939 dpr
->rx_std_prod_idx
);
5941 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5942 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5943 dpr
->rx_jmb_prod_idx
);
5948 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5954 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
5956 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
5957 schedule_work(&tp
->reset_task
);
5960 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
5962 cancel_work_sync(&tp
->reset_task
);
5963 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
5966 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5968 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5969 struct tg3
*tp
= tnapi
->tp
;
5971 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5974 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5976 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5979 if (unlikely(work_done
>= budget
))
5982 /* tp->last_tag is used in tg3_int_reenable() below
5983 * to tell the hw how much work has been processed,
5984 * so we must read it before checking for more work.
5986 tnapi
->last_tag
= sblk
->status_tag
;
5987 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5990 /* check for RX/TX work to do */
5991 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5992 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5993 napi_complete(napi
);
5994 /* Reenable interrupts. */
5995 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6004 /* work_done is guaranteed to be less than budget. */
6005 napi_complete(napi
);
6006 tg3_reset_task_schedule(tp
);
6010 static void tg3_process_error(struct tg3
*tp
)
6013 bool real_error
= false;
6015 if (tg3_flag(tp
, ERROR_PROCESSED
))
6018 /* Check Flow Attention register */
6019 val
= tr32(HOSTCC_FLOW_ATTN
);
6020 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6021 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6025 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6026 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6030 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6031 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6040 tg3_flag_set(tp
, ERROR_PROCESSED
);
6041 tg3_reset_task_schedule(tp
);
6044 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6046 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6047 struct tg3
*tp
= tnapi
->tp
;
6049 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6052 if (sblk
->status
& SD_STATUS_ERROR
)
6053 tg3_process_error(tp
);
6057 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6059 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6062 if (unlikely(work_done
>= budget
))
6065 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6066 /* tp->last_tag is used in tg3_int_reenable() below
6067 * to tell the hw how much work has been processed,
6068 * so we must read it before checking for more work.
6070 tnapi
->last_tag
= sblk
->status_tag
;
6071 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6074 sblk
->status
&= ~SD_STATUS_UPDATED
;
6076 if (likely(!tg3_has_work(tnapi
))) {
6077 napi_complete(napi
);
6078 tg3_int_reenable(tnapi
);
6086 /* work_done is guaranteed to be less than budget. */
6087 napi_complete(napi
);
6088 tg3_reset_task_schedule(tp
);
6092 static void tg3_napi_disable(struct tg3
*tp
)
6096 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6097 napi_disable(&tp
->napi
[i
].napi
);
6100 static void tg3_napi_enable(struct tg3
*tp
)
6104 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6105 napi_enable(&tp
->napi
[i
].napi
);
6108 static void tg3_napi_init(struct tg3
*tp
)
6112 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6113 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6114 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6117 static void tg3_napi_fini(struct tg3
*tp
)
6121 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6122 netif_napi_del(&tp
->napi
[i
].napi
);
6125 static inline void tg3_netif_stop(struct tg3
*tp
)
6127 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6128 tg3_napi_disable(tp
);
6129 netif_tx_disable(tp
->dev
);
6132 static inline void tg3_netif_start(struct tg3
*tp
)
6134 /* NOTE: unconditional netif_tx_wake_all_queues is only
6135 * appropriate so long as all callers are assured to
6136 * have free tx slots (such as after tg3_init_hw)
6138 netif_tx_wake_all_queues(tp
->dev
);
6140 tg3_napi_enable(tp
);
6141 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6142 tg3_enable_ints(tp
);
6145 static void tg3_irq_quiesce(struct tg3
*tp
)
6149 BUG_ON(tp
->irq_sync
);
6154 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6155 synchronize_irq(tp
->napi
[i
].irq_vec
);
6158 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6159 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6160 * with as well. Most of the time, this is not necessary except when
6161 * shutting down the device.
6163 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6165 spin_lock_bh(&tp
->lock
);
6167 tg3_irq_quiesce(tp
);
6170 static inline void tg3_full_unlock(struct tg3
*tp
)
6172 spin_unlock_bh(&tp
->lock
);
6175 /* One-shot MSI handler - Chip automatically disables interrupt
6176 * after sending MSI so driver doesn't have to do it.
6178 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6180 struct tg3_napi
*tnapi
= dev_id
;
6181 struct tg3
*tp
= tnapi
->tp
;
6183 prefetch(tnapi
->hw_status
);
6185 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6187 if (likely(!tg3_irq_sync(tp
)))
6188 napi_schedule(&tnapi
->napi
);
6193 /* MSI ISR - No need to check for interrupt sharing and no need to
6194 * flush status block and interrupt mailbox. PCI ordering rules
6195 * guarantee that MSI will arrive after the status block.
6197 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6199 struct tg3_napi
*tnapi
= dev_id
;
6200 struct tg3
*tp
= tnapi
->tp
;
6202 prefetch(tnapi
->hw_status
);
6204 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6206 * Writing any value to intr-mbox-0 clears PCI INTA# and
6207 * chip-internal interrupt pending events.
6208 * Writing non-zero to intr-mbox-0 additional tells the
6209 * NIC to stop sending us irqs, engaging "in-intr-handler"
6212 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6213 if (likely(!tg3_irq_sync(tp
)))
6214 napi_schedule(&tnapi
->napi
);
6216 return IRQ_RETVAL(1);
6219 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6221 struct tg3_napi
*tnapi
= dev_id
;
6222 struct tg3
*tp
= tnapi
->tp
;
6223 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6224 unsigned int handled
= 1;
6226 /* In INTx mode, it is possible for the interrupt to arrive at
6227 * the CPU before the status block posted prior to the interrupt.
6228 * Reading the PCI State register will confirm whether the
6229 * interrupt is ours and will flush the status block.
6231 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6232 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6233 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6240 * Writing any value to intr-mbox-0 clears PCI INTA# and
6241 * chip-internal interrupt pending events.
6242 * Writing non-zero to intr-mbox-0 additional tells the
6243 * NIC to stop sending us irqs, engaging "in-intr-handler"
6246 * Flush the mailbox to de-assert the IRQ immediately to prevent
6247 * spurious interrupts. The flush impacts performance but
6248 * excessive spurious interrupts can be worse in some cases.
6250 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6251 if (tg3_irq_sync(tp
))
6253 sblk
->status
&= ~SD_STATUS_UPDATED
;
6254 if (likely(tg3_has_work(tnapi
))) {
6255 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6256 napi_schedule(&tnapi
->napi
);
6258 /* No work, shared interrupt perhaps? re-enable
6259 * interrupts, and flush that PCI write
6261 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6265 return IRQ_RETVAL(handled
);
6268 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6270 struct tg3_napi
*tnapi
= dev_id
;
6271 struct tg3
*tp
= tnapi
->tp
;
6272 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6273 unsigned int handled
= 1;
6275 /* In INTx mode, it is possible for the interrupt to arrive at
6276 * the CPU before the status block posted prior to the interrupt.
6277 * Reading the PCI State register will confirm whether the
6278 * interrupt is ours and will flush the status block.
6280 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6281 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6282 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6289 * writing any value to intr-mbox-0 clears PCI INTA# and
6290 * chip-internal interrupt pending events.
6291 * writing non-zero to intr-mbox-0 additional tells the
6292 * NIC to stop sending us irqs, engaging "in-intr-handler"
6295 * Flush the mailbox to de-assert the IRQ immediately to prevent
6296 * spurious interrupts. The flush impacts performance but
6297 * excessive spurious interrupts can be worse in some cases.
6299 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6302 * In a shared interrupt configuration, sometimes other devices'
6303 * interrupts will scream. We record the current status tag here
6304 * so that the above check can report that the screaming interrupts
6305 * are unhandled. Eventually they will be silenced.
6307 tnapi
->last_irq_tag
= sblk
->status_tag
;
6309 if (tg3_irq_sync(tp
))
6312 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6314 napi_schedule(&tnapi
->napi
);
6317 return IRQ_RETVAL(handled
);
6320 /* ISR for interrupt test */
6321 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6323 struct tg3_napi
*tnapi
= dev_id
;
6324 struct tg3
*tp
= tnapi
->tp
;
6325 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6327 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6328 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6329 tg3_disable_ints(tp
);
6330 return IRQ_RETVAL(1);
6332 return IRQ_RETVAL(0);
6335 static int tg3_init_hw(struct tg3
*, int);
6336 static int tg3_halt(struct tg3
*, int, int);
6338 /* Restart hardware after configuration changes, self-test, etc.
6339 * Invoked with tp->lock held.
6341 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
6342 __releases(tp
->lock
)
6343 __acquires(tp
->lock
)
6347 err
= tg3_init_hw(tp
, reset_phy
);
6350 "Failed to re-initialize device, aborting\n");
6351 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6352 tg3_full_unlock(tp
);
6353 del_timer_sync(&tp
->timer
);
6355 tg3_napi_enable(tp
);
6357 tg3_full_lock(tp
, 0);
6362 #ifdef CONFIG_NET_POLL_CONTROLLER
6363 static void tg3_poll_controller(struct net_device
*dev
)
6366 struct tg3
*tp
= netdev_priv(dev
);
6368 if (tg3_irq_sync(tp
))
6371 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6372 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6376 static void tg3_reset_task(struct work_struct
*work
)
6378 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
6381 tg3_full_lock(tp
, 0);
6383 if (!netif_running(tp
->dev
)) {
6384 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6385 tg3_full_unlock(tp
);
6389 tg3_full_unlock(tp
);
6395 tg3_full_lock(tp
, 1);
6397 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
6398 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
6399 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
6400 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
6401 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6404 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
6405 err
= tg3_init_hw(tp
, 1);
6409 tg3_netif_start(tp
);
6412 tg3_full_unlock(tp
);
6417 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6420 static void tg3_tx_timeout(struct net_device
*dev
)
6422 struct tg3
*tp
= netdev_priv(dev
);
6424 if (netif_msg_tx_err(tp
)) {
6425 netdev_err(dev
, "transmit timed out, resetting\n");
6429 tg3_reset_task_schedule(tp
);
6432 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6433 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6435 u32 base
= (u32
) mapping
& 0xffffffff;
6437 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6440 /* Test for DMA addresses > 40-bit */
6441 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6444 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6445 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6446 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6453 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6454 dma_addr_t mapping
, u32 len
, u32 flags
,
6457 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6458 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6459 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6460 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6463 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6464 dma_addr_t map
, u32 len
, u32 flags
,
6467 struct tg3
*tp
= tnapi
->tp
;
6470 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6473 if (tg3_4g_overflow_test(map
, len
))
6476 if (tg3_40bit_overflow_test(tp
, map
, len
))
6479 if (tg3_flag(tp
, 4K_FIFO_LIMIT
)) {
6480 u32 prvidx
= *entry
;
6481 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6482 while (len
> TG3_TX_BD_DMA_MAX
&& *budget
) {
6483 u32 frag_len
= TG3_TX_BD_DMA_MAX
;
6484 len
-= TG3_TX_BD_DMA_MAX
;
6486 /* Avoid the 8byte DMA problem */
6488 len
+= TG3_TX_BD_DMA_MAX
/ 2;
6489 frag_len
= TG3_TX_BD_DMA_MAX
/ 2;
6492 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6494 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6495 frag_len
, tmp_flag
, mss
, vlan
);
6498 *entry
= NEXT_TX(*entry
);
6505 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6506 len
, flags
, mss
, vlan
);
6508 *entry
= NEXT_TX(*entry
);
6511 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6515 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6516 len
, flags
, mss
, vlan
);
6517 *entry
= NEXT_TX(*entry
);
6523 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6526 struct sk_buff
*skb
;
6527 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6532 pci_unmap_single(tnapi
->tp
->pdev
,
6533 dma_unmap_addr(txb
, mapping
),
6537 while (txb
->fragmented
) {
6538 txb
->fragmented
= false;
6539 entry
= NEXT_TX(entry
);
6540 txb
= &tnapi
->tx_buffers
[entry
];
6543 for (i
= 0; i
<= last
; i
++) {
6544 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6546 entry
= NEXT_TX(entry
);
6547 txb
= &tnapi
->tx_buffers
[entry
];
6549 pci_unmap_page(tnapi
->tp
->pdev
,
6550 dma_unmap_addr(txb
, mapping
),
6551 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6553 while (txb
->fragmented
) {
6554 txb
->fragmented
= false;
6555 entry
= NEXT_TX(entry
);
6556 txb
= &tnapi
->tx_buffers
[entry
];
6561 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6562 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6563 struct sk_buff
**pskb
,
6564 u32
*entry
, u32
*budget
,
6565 u32 base_flags
, u32 mss
, u32 vlan
)
6567 struct tg3
*tp
= tnapi
->tp
;
6568 struct sk_buff
*new_skb
, *skb
= *pskb
;
6569 dma_addr_t new_addr
= 0;
6572 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6573 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6575 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6577 new_skb
= skb_copy_expand(skb
,
6578 skb_headroom(skb
) + more_headroom
,
6579 skb_tailroom(skb
), GFP_ATOMIC
);
6585 /* New SKB is guaranteed to be linear. */
6586 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6588 /* Make sure the mapping succeeded */
6589 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6590 dev_kfree_skb(new_skb
);
6593 u32 save_entry
= *entry
;
6595 base_flags
|= TXD_FLAG_END
;
6597 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6598 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6601 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6602 new_skb
->len
, base_flags
,
6604 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6605 dev_kfree_skb(new_skb
);
6616 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6618 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6619 * TSO header is greater than 80 bytes.
6621 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6623 struct sk_buff
*segs
, *nskb
;
6624 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6626 /* Estimate the number of fragments in the worst case */
6627 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6628 netif_stop_queue(tp
->dev
);
6630 /* netif_tx_stop_queue() must be done before checking
6631 * checking tx index in tg3_tx_avail() below, because in
6632 * tg3_tx(), we update tx index before checking for
6633 * netif_tx_queue_stopped().
6636 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6637 return NETDEV_TX_BUSY
;
6639 netif_wake_queue(tp
->dev
);
6642 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6644 goto tg3_tso_bug_end
;
6650 tg3_start_xmit(nskb
, tp
->dev
);
6656 return NETDEV_TX_OK
;
6659 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6660 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6662 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6664 struct tg3
*tp
= netdev_priv(dev
);
6665 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6667 int i
= -1, would_hit_hwbug
;
6669 struct tg3_napi
*tnapi
;
6670 struct netdev_queue
*txq
;
6673 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6674 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6675 if (tg3_flag(tp
, ENABLE_TSS
))
6678 budget
= tg3_tx_avail(tnapi
);
6680 /* We are running in BH disabled context with netif_tx_lock
6681 * and TX reclaim runs via tp->napi.poll inside of a software
6682 * interrupt. Furthermore, IRQ processing runs lockless so we have
6683 * no IRQ context deadlocks to worry about either. Rejoice!
6685 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6686 if (!netif_tx_queue_stopped(txq
)) {
6687 netif_tx_stop_queue(txq
);
6689 /* This is a hard error, log it. */
6691 "BUG! Tx Ring full when queue awake!\n");
6693 return NETDEV_TX_BUSY
;
6696 entry
= tnapi
->tx_prod
;
6698 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6699 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6701 mss
= skb_shinfo(skb
)->gso_size
;
6704 u32 tcp_opt_len
, hdr_len
;
6706 if (skb_header_cloned(skb
) &&
6707 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6711 tcp_opt_len
= tcp_optlen(skb
);
6713 if (skb_is_gso_v6(skb
)) {
6714 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
6718 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
6719 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
6722 iph
->tot_len
= htons(mss
+ hdr_len
);
6725 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6726 tg3_flag(tp
, TSO_BUG
))
6727 return tg3_tso_bug(tp
, skb
);
6729 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6730 TXD_FLAG_CPU_POST_DMA
);
6732 if (tg3_flag(tp
, HW_TSO_1
) ||
6733 tg3_flag(tp
, HW_TSO_2
) ||
6734 tg3_flag(tp
, HW_TSO_3
)) {
6735 tcp_hdr(skb
)->check
= 0;
6736 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6738 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6743 if (tg3_flag(tp
, HW_TSO_3
)) {
6744 mss
|= (hdr_len
& 0xc) << 12;
6746 base_flags
|= 0x00000010;
6747 base_flags
|= (hdr_len
& 0x3e0) << 5;
6748 } else if (tg3_flag(tp
, HW_TSO_2
))
6749 mss
|= hdr_len
<< 9;
6750 else if (tg3_flag(tp
, HW_TSO_1
) ||
6751 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6752 if (tcp_opt_len
|| iph
->ihl
> 5) {
6755 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6756 mss
|= (tsflags
<< 11);
6759 if (tcp_opt_len
|| iph
->ihl
> 5) {
6762 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6763 base_flags
|= tsflags
<< 12;
6768 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6769 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6770 base_flags
|= TXD_FLAG_JMB_PKT
;
6772 if (vlan_tx_tag_present(skb
)) {
6773 base_flags
|= TXD_FLAG_VLAN
;
6774 vlan
= vlan_tx_tag_get(skb
);
6777 len
= skb_headlen(skb
);
6779 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6780 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6784 tnapi
->tx_buffers
[entry
].skb
= skb
;
6785 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6787 would_hit_hwbug
= 0;
6789 if (tg3_flag(tp
, 5701_DMA_BUG
))
6790 would_hit_hwbug
= 1;
6792 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6793 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6795 would_hit_hwbug
= 1;
6796 /* Now loop through additional data fragments, and queue them. */
6797 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
6800 if (!tg3_flag(tp
, HW_TSO_1
) &&
6801 !tg3_flag(tp
, HW_TSO_2
) &&
6802 !tg3_flag(tp
, HW_TSO_3
))
6805 last
= skb_shinfo(skb
)->nr_frags
- 1;
6806 for (i
= 0; i
<= last
; i
++) {
6807 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6809 len
= skb_frag_size(frag
);
6810 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
6811 len
, DMA_TO_DEVICE
);
6813 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6814 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6816 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
6820 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
6822 ((i
== last
) ? TXD_FLAG_END
: 0),
6824 would_hit_hwbug
= 1;
6830 if (would_hit_hwbug
) {
6831 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6833 /* If the workaround fails due to memory/mapping
6834 * failure, silently drop this packet.
6836 entry
= tnapi
->tx_prod
;
6837 budget
= tg3_tx_avail(tnapi
);
6838 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
6839 base_flags
, mss
, vlan
))
6843 skb_tx_timestamp(skb
);
6845 /* Packets are ready, update Tx producer idx local and on card. */
6846 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6848 tnapi
->tx_prod
= entry
;
6849 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6850 netif_tx_stop_queue(txq
);
6852 /* netif_tx_stop_queue() must be done before checking
6853 * checking tx index in tg3_tx_avail() below, because in
6854 * tg3_tx(), we update tx index before checking for
6855 * netif_tx_queue_stopped().
6858 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6859 netif_tx_wake_queue(txq
);
6863 return NETDEV_TX_OK
;
6866 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
6867 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6872 return NETDEV_TX_OK
;
6875 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
6878 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
6879 MAC_MODE_PORT_MODE_MASK
);
6881 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6883 if (!tg3_flag(tp
, 5705_PLUS
))
6884 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
6886 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
6887 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6889 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6891 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6893 if (tg3_flag(tp
, 5705_PLUS
) ||
6894 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
6895 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
6896 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6899 tw32(MAC_MODE
, tp
->mac_mode
);
6903 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
6905 u32 val
, bmcr
, mac_mode
, ptest
= 0;
6907 tg3_phy_toggle_apd(tp
, false);
6908 tg3_phy_toggle_automdix(tp
, 0);
6910 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
6913 bmcr
= BMCR_FULLDPLX
;
6918 bmcr
|= BMCR_SPEED100
;
6922 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
6924 bmcr
|= BMCR_SPEED100
;
6927 bmcr
|= BMCR_SPEED1000
;
6932 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
6933 tg3_readphy(tp
, MII_CTRL1000
, &val
);
6934 val
|= CTL1000_AS_MASTER
|
6935 CTL1000_ENABLE_MASTER
;
6936 tg3_writephy(tp
, MII_CTRL1000
, val
);
6938 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
6939 MII_TG3_FET_PTEST_TRIM_2
;
6940 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
6943 bmcr
|= BMCR_LOOPBACK
;
6945 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6947 /* The write needs to be flushed for the FETs */
6948 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
6949 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6953 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
6954 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
6955 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
6956 MII_TG3_FET_PTEST_FRC_TX_LINK
|
6957 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
6959 /* The write needs to be flushed for the AC131 */
6960 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
6963 /* Reset to prevent losing 1st rx packet intermittently */
6964 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
6965 tg3_flag(tp
, 5780_CLASS
)) {
6966 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6968 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6971 mac_mode
= tp
->mac_mode
&
6972 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
6973 if (speed
== SPEED_1000
)
6974 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6976 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6978 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
6979 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
6981 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
6982 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6983 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
6984 mac_mode
|= MAC_MODE_LINK_POLARITY
;
6986 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
6987 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
6990 tw32(MAC_MODE
, mac_mode
);
6996 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6998 struct tg3
*tp
= netdev_priv(dev
);
7000 if (features
& NETIF_F_LOOPBACK
) {
7001 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7004 spin_lock_bh(&tp
->lock
);
7005 tg3_mac_loopback(tp
, true);
7006 netif_carrier_on(tp
->dev
);
7007 spin_unlock_bh(&tp
->lock
);
7008 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7010 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7013 spin_lock_bh(&tp
->lock
);
7014 tg3_mac_loopback(tp
, false);
7015 /* Force link status check */
7016 tg3_setup_phy(tp
, 1);
7017 spin_unlock_bh(&tp
->lock
);
7018 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7022 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
7024 struct tg3
*tp
= netdev_priv(dev
);
7026 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7027 features
&= ~NETIF_F_ALL_TSO
;
7032 static int tg3_set_features(struct net_device
*dev
, u32 features
)
7034 u32 changed
= dev
->features
^ features
;
7036 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7037 tg3_set_loopback(dev
, features
);
7042 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
7047 if (new_mtu
> ETH_DATA_LEN
) {
7048 if (tg3_flag(tp
, 5780_CLASS
)) {
7049 netdev_update_features(dev
);
7050 tg3_flag_clear(tp
, TSO_CAPABLE
);
7052 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
7055 if (tg3_flag(tp
, 5780_CLASS
)) {
7056 tg3_flag_set(tp
, TSO_CAPABLE
);
7057 netdev_update_features(dev
);
7059 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
7063 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
7065 struct tg3
*tp
= netdev_priv(dev
);
7068 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
7071 if (!netif_running(dev
)) {
7072 /* We'll just catch it later when the
7075 tg3_set_mtu(dev
, tp
, new_mtu
);
7083 tg3_full_lock(tp
, 1);
7085 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7087 tg3_set_mtu(dev
, tp
, new_mtu
);
7089 err
= tg3_restart_hw(tp
, 0);
7092 tg3_netif_start(tp
);
7094 tg3_full_unlock(tp
);
7102 static void tg3_rx_prodring_free(struct tg3
*tp
,
7103 struct tg3_rx_prodring_set
*tpr
)
7107 if (tpr
!= &tp
->napi
[0].prodring
) {
7108 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7109 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7110 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
7113 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7114 for (i
= tpr
->rx_jmb_cons_idx
;
7115 i
!= tpr
->rx_jmb_prod_idx
;
7116 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7117 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7125 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7126 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
7129 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7130 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7131 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7136 /* Initialize rx rings for packet processing.
7138 * The chip has been shut down and the driver detached from
7139 * the networking, so no interrupts or new tx packets will
7140 * end up in the driver. tp->{tx,}lock are held and thus
7143 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7144 struct tg3_rx_prodring_set
*tpr
)
7146 u32 i
, rx_pkt_dma_sz
;
7148 tpr
->rx_std_cons_idx
= 0;
7149 tpr
->rx_std_prod_idx
= 0;
7150 tpr
->rx_jmb_cons_idx
= 0;
7151 tpr
->rx_jmb_prod_idx
= 0;
7153 if (tpr
!= &tp
->napi
[0].prodring
) {
7154 memset(&tpr
->rx_std_buffers
[0], 0,
7155 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7156 if (tpr
->rx_jmb_buffers
)
7157 memset(&tpr
->rx_jmb_buffers
[0], 0,
7158 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7162 /* Zero out all descriptors. */
7163 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7165 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7166 if (tg3_flag(tp
, 5780_CLASS
) &&
7167 tp
->dev
->mtu
> ETH_DATA_LEN
)
7168 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7169 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7171 /* Initialize invariants of the rings, we only set this
7172 * stuff once. This works because the card does not
7173 * write into the rx buffer posting rings.
7175 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7176 struct tg3_rx_buffer_desc
*rxd
;
7178 rxd
= &tpr
->rx_std
[i
];
7179 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7180 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7181 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7182 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7185 /* Now allocate fresh SKBs for each rx ring. */
7186 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7187 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
7188 netdev_warn(tp
->dev
,
7189 "Using a smaller RX standard ring. Only "
7190 "%d out of %d buffers were allocated "
7191 "successfully\n", i
, tp
->rx_pending
);
7199 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7202 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7204 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7207 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7208 struct tg3_rx_buffer_desc
*rxd
;
7210 rxd
= &tpr
->rx_jmb
[i
].std
;
7211 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7212 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7214 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7215 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7218 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7219 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
7220 netdev_warn(tp
->dev
,
7221 "Using a smaller RX jumbo ring. Only %d "
7222 "out of %d buffers were allocated "
7223 "successfully\n", i
, tp
->rx_jumbo_pending
);
7226 tp
->rx_jumbo_pending
= i
;
7235 tg3_rx_prodring_free(tp
, tpr
);
7239 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7240 struct tg3_rx_prodring_set
*tpr
)
7242 kfree(tpr
->rx_std_buffers
);
7243 tpr
->rx_std_buffers
= NULL
;
7244 kfree(tpr
->rx_jmb_buffers
);
7245 tpr
->rx_jmb_buffers
= NULL
;
7247 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7248 tpr
->rx_std
, tpr
->rx_std_mapping
);
7252 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7253 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7258 static int tg3_rx_prodring_init(struct tg3
*tp
,
7259 struct tg3_rx_prodring_set
*tpr
)
7261 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7263 if (!tpr
->rx_std_buffers
)
7266 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7267 TG3_RX_STD_RING_BYTES(tp
),
7268 &tpr
->rx_std_mapping
,
7273 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7274 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7276 if (!tpr
->rx_jmb_buffers
)
7279 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7280 TG3_RX_JMB_RING_BYTES(tp
),
7281 &tpr
->rx_jmb_mapping
,
7290 tg3_rx_prodring_fini(tp
, tpr
);
7294 /* Free up pending packets in all rx/tx rings.
7296 * The chip has been shut down and the driver detached from
7297 * the networking, so no interrupts or new tx packets will
7298 * end up in the driver. tp->{tx,}lock is not held and we are not
7299 * in an interrupt context and thus may sleep.
7301 static void tg3_free_rings(struct tg3
*tp
)
7305 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7306 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7308 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7310 if (!tnapi
->tx_buffers
)
7313 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7314 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7319 tg3_tx_skb_unmap(tnapi
, i
,
7320 skb_shinfo(skb
)->nr_frags
- 1);
7322 dev_kfree_skb_any(skb
);
7327 /* Initialize tx/rx rings for packet processing.
7329 * The chip has been shut down and the driver detached from
7330 * the networking, so no interrupts or new tx packets will
7331 * end up in the driver. tp->{tx,}lock are held and thus
7334 static int tg3_init_rings(struct tg3
*tp
)
7338 /* Free up all the SKBs. */
7341 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7342 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7344 tnapi
->last_tag
= 0;
7345 tnapi
->last_irq_tag
= 0;
7346 tnapi
->hw_status
->status
= 0;
7347 tnapi
->hw_status
->status_tag
= 0;
7348 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7353 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7355 tnapi
->rx_rcb_ptr
= 0;
7357 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7359 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7369 * Must not be invoked with interrupt sources disabled and
7370 * the hardware shutdown down.
7372 static void tg3_free_consistent(struct tg3
*tp
)
7376 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7377 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7379 if (tnapi
->tx_ring
) {
7380 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7381 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7382 tnapi
->tx_ring
= NULL
;
7385 kfree(tnapi
->tx_buffers
);
7386 tnapi
->tx_buffers
= NULL
;
7388 if (tnapi
->rx_rcb
) {
7389 dma_free_coherent(&tp
->pdev
->dev
,
7390 TG3_RX_RCB_RING_BYTES(tp
),
7392 tnapi
->rx_rcb_mapping
);
7393 tnapi
->rx_rcb
= NULL
;
7396 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7398 if (tnapi
->hw_status
) {
7399 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7401 tnapi
->status_mapping
);
7402 tnapi
->hw_status
= NULL
;
7407 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7408 tp
->hw_stats
, tp
->stats_mapping
);
7409 tp
->hw_stats
= NULL
;
7414 * Must not be invoked with interrupt sources disabled and
7415 * the hardware shutdown down. Can sleep.
7417 static int tg3_alloc_consistent(struct tg3
*tp
)
7421 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7422 sizeof(struct tg3_hw_stats
),
7428 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7430 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7431 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7432 struct tg3_hw_status
*sblk
;
7434 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7436 &tnapi
->status_mapping
,
7438 if (!tnapi
->hw_status
)
7441 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7442 sblk
= tnapi
->hw_status
;
7444 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7447 /* If multivector TSS is enabled, vector 0 does not handle
7448 * tx interrupts. Don't allocate any resources for it.
7450 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7451 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7452 tnapi
->tx_buffers
= kzalloc(
7453 sizeof(struct tg3_tx_ring_info
) *
7454 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7455 if (!tnapi
->tx_buffers
)
7458 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7460 &tnapi
->tx_desc_mapping
,
7462 if (!tnapi
->tx_ring
)
7467 * When RSS is enabled, the status block format changes
7468 * slightly. The "rx_jumbo_consumer", "reserved",
7469 * and "rx_mini_consumer" members get mapped to the
7470 * other three rx return ring producer indexes.
7474 if (tg3_flag(tp
, ENABLE_RSS
)) {
7475 tnapi
->rx_rcb_prod_idx
= NULL
;
7480 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7483 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7486 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7489 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7494 * If multivector RSS is enabled, vector 0 does not handle
7495 * rx or tx interrupts. Don't allocate any resources for it.
7497 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7500 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7501 TG3_RX_RCB_RING_BYTES(tp
),
7502 &tnapi
->rx_rcb_mapping
,
7507 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7513 tg3_free_consistent(tp
);
7517 #define MAX_WAIT_CNT 1000
7519 /* To stop a block, clear the enable bit and poll till it
7520 * clears. tp->lock is held.
7522 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7527 if (tg3_flag(tp
, 5705_PLUS
)) {
7534 /* We can't enable/disable these bits of the
7535 * 5705/5750, just say success.
7548 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7551 if ((val
& enable_bit
) == 0)
7555 if (i
== MAX_WAIT_CNT
&& !silent
) {
7556 dev_err(&tp
->pdev
->dev
,
7557 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7565 /* tp->lock is held. */
7566 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7570 tg3_disable_ints(tp
);
7572 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7573 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7576 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7577 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7578 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7579 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7580 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7581 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7583 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7584 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7585 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7586 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7587 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7588 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7589 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7591 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7592 tw32_f(MAC_MODE
, tp
->mac_mode
);
7595 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7596 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7598 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7600 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7603 if (i
>= MAX_WAIT_CNT
) {
7604 dev_err(&tp
->pdev
->dev
,
7605 "%s timed out, TX_MODE_ENABLE will not clear "
7606 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7610 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7611 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7612 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7614 tw32(FTQ_RESET
, 0xffffffff);
7615 tw32(FTQ_RESET
, 0x00000000);
7617 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7618 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7620 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7621 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7622 if (tnapi
->hw_status
)
7623 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7626 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7631 /* Save PCI command register before chip reset */
7632 static void tg3_save_pci_state(struct tg3
*tp
)
7634 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7637 /* Restore PCI state after chip reset */
7638 static void tg3_restore_pci_state(struct tg3
*tp
)
7642 /* Re-enable indirect register accesses. */
7643 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7644 tp
->misc_host_ctrl
);
7646 /* Set MAX PCI retry to zero. */
7647 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7648 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7649 tg3_flag(tp
, PCIX_MODE
))
7650 val
|= PCISTATE_RETRY_SAME_DMA
;
7651 /* Allow reads and writes to the APE register and memory space. */
7652 if (tg3_flag(tp
, ENABLE_APE
))
7653 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7654 PCISTATE_ALLOW_APE_SHMEM_WR
|
7655 PCISTATE_ALLOW_APE_PSPACE_WR
;
7656 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7658 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7660 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7661 if (tg3_flag(tp
, PCI_EXPRESS
))
7662 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7664 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7665 tp
->pci_cacheline_sz
);
7666 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7671 /* Make sure PCI-X relaxed ordering bit is clear. */
7672 if (tg3_flag(tp
, PCIX_MODE
)) {
7675 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7677 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7678 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7682 if (tg3_flag(tp
, 5780_CLASS
)) {
7684 /* Chip reset on 5780 will reset MSI enable bit,
7685 * so need to restore it.
7687 if (tg3_flag(tp
, USING_MSI
)) {
7690 pci_read_config_word(tp
->pdev
,
7691 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7693 pci_write_config_word(tp
->pdev
,
7694 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7695 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7696 val
= tr32(MSGINT_MODE
);
7697 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7702 /* tp->lock is held. */
7703 static int tg3_chip_reset(struct tg3
*tp
)
7706 void (*write_op
)(struct tg3
*, u32
, u32
);
7711 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7713 /* No matching tg3_nvram_unlock() after this because
7714 * chip reset below will undo the nvram lock.
7716 tp
->nvram_lock_cnt
= 0;
7718 /* GRC_MISC_CFG core clock reset will clear the memory
7719 * enable bit in PCI register 4 and the MSI enable bit
7720 * on some chips, so we save relevant registers here.
7722 tg3_save_pci_state(tp
);
7724 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7725 tg3_flag(tp
, 5755_PLUS
))
7726 tw32(GRC_FASTBOOT_PC
, 0);
7729 * We must avoid the readl() that normally takes place.
7730 * It locks machines, causes machine checks, and other
7731 * fun things. So, temporarily disable the 5701
7732 * hardware workaround, while we do the reset.
7734 write_op
= tp
->write32
;
7735 if (write_op
== tg3_write_flush_reg32
)
7736 tp
->write32
= tg3_write32
;
7738 /* Prevent the irq handler from reading or writing PCI registers
7739 * during chip reset when the memory enable bit in the PCI command
7740 * register may be cleared. The chip does not generate interrupt
7741 * at this time, but the irq handler may still be called due to irq
7742 * sharing or irqpoll.
7744 tg3_flag_set(tp
, CHIP_RESETTING
);
7745 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7746 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7747 if (tnapi
->hw_status
) {
7748 tnapi
->hw_status
->status
= 0;
7749 tnapi
->hw_status
->status_tag
= 0;
7751 tnapi
->last_tag
= 0;
7752 tnapi
->last_irq_tag
= 0;
7756 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7757 synchronize_irq(tp
->napi
[i
].irq_vec
);
7759 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7760 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7761 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7765 val
= GRC_MISC_CFG_CORECLK_RESET
;
7767 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7768 /* Force PCIe 1.0a mode */
7769 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7770 !tg3_flag(tp
, 57765_PLUS
) &&
7771 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7772 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7773 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7775 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7776 tw32(GRC_MISC_CFG
, (1 << 29));
7781 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7782 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7783 tw32(GRC_VCPU_EXT_CTRL
,
7784 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7787 /* Manage gphy power for all CPMU absent PCIe devices. */
7788 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7789 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7791 tw32(GRC_MISC_CFG
, val
);
7793 /* restore 5701 hardware bug workaround write method */
7794 tp
->write32
= write_op
;
7796 /* Unfortunately, we have to delay before the PCI read back.
7797 * Some 575X chips even will not respond to a PCI cfg access
7798 * when the reset command is given to the chip.
7800 * How do these hardware designers expect things to work
7801 * properly if the PCI write is posted for a long period
7802 * of time? It is always necessary to have some method by
7803 * which a register read back can occur to push the write
7804 * out which does the reset.
7806 * For most tg3 variants the trick below was working.
7811 /* Flush PCI posted writes. The normal MMIO registers
7812 * are inaccessible at this time so this is the only
7813 * way to make this reliably (actually, this is no longer
7814 * the case, see above). I tried to use indirect
7815 * register read/write but this upset some 5701 variants.
7817 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7821 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7824 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7828 /* Wait for link training to complete. */
7829 for (i
= 0; i
< 5000; i
++)
7832 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7833 pci_write_config_dword(tp
->pdev
, 0xc4,
7834 cfg_val
| (1 << 15));
7837 /* Clear the "no snoop" and "relaxed ordering" bits. */
7838 pci_read_config_word(tp
->pdev
,
7839 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7841 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7842 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7844 * Older PCIe devices only support the 128 byte
7845 * MPS setting. Enforce the restriction.
7847 if (!tg3_flag(tp
, CPMU_PRESENT
))
7848 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7849 pci_write_config_word(tp
->pdev
,
7850 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7853 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7855 /* Clear error status */
7856 pci_write_config_word(tp
->pdev
,
7857 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7858 PCI_EXP_DEVSTA_CED
|
7859 PCI_EXP_DEVSTA_NFED
|
7860 PCI_EXP_DEVSTA_FED
|
7861 PCI_EXP_DEVSTA_URD
);
7864 tg3_restore_pci_state(tp
);
7866 tg3_flag_clear(tp
, CHIP_RESETTING
);
7867 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7870 if (tg3_flag(tp
, 5780_CLASS
))
7871 val
= tr32(MEMARB_MODE
);
7872 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7874 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7876 tw32(0x5000, 0x400);
7879 tw32(GRC_MODE
, tp
->grc_mode
);
7881 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7884 tw32(0xc4, val
| (1 << 15));
7887 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7888 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7889 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7890 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7891 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7892 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7895 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7896 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7898 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7899 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7904 tw32_f(MAC_MODE
, val
);
7907 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7909 err
= tg3_poll_fw(tp
);
7915 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7916 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7917 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7918 !tg3_flag(tp
, 57765_PLUS
)) {
7921 tw32(0x7c00, val
| (1 << 25));
7924 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7925 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7926 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7929 /* Reprobe ASF enable state. */
7930 tg3_flag_clear(tp
, ENABLE_ASF
);
7931 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7932 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7933 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7936 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7937 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7938 tg3_flag_set(tp
, ENABLE_ASF
);
7939 tp
->last_event_jiffies
= jiffies
;
7940 if (tg3_flag(tp
, 5750_PLUS
))
7941 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7948 /* tp->lock is held. */
7949 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7955 tg3_write_sig_pre_reset(tp
, kind
);
7957 tg3_abort_hw(tp
, silent
);
7958 err
= tg3_chip_reset(tp
);
7960 __tg3_set_mac_addr(tp
, 0);
7962 tg3_write_sig_legacy(tp
, kind
);
7963 tg3_write_sig_post_reset(tp
, kind
);
7971 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7973 struct tg3
*tp
= netdev_priv(dev
);
7974 struct sockaddr
*addr
= p
;
7975 int err
= 0, skip_mac_1
= 0;
7977 if (!is_valid_ether_addr(addr
->sa_data
))
7980 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7982 if (!netif_running(dev
))
7985 if (tg3_flag(tp
, ENABLE_ASF
)) {
7986 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7988 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7989 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7990 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7991 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7993 /* Skip MAC addr 1 if ASF is using it. */
7994 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7995 !(addr1_high
== 0 && addr1_low
== 0))
7998 spin_lock_bh(&tp
->lock
);
7999 __tg3_set_mac_addr(tp
, skip_mac_1
);
8000 spin_unlock_bh(&tp
->lock
);
8005 /* tp->lock is held. */
8006 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8007 dma_addr_t mapping
, u32 maxlen_flags
,
8011 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8012 ((u64
) mapping
>> 32));
8014 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8015 ((u64
) mapping
& 0xffffffff));
8017 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8020 if (!tg3_flag(tp
, 5705_PLUS
))
8022 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8026 static void __tg3_set_rx_mode(struct net_device
*);
8027 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8031 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8032 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8033 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8034 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8036 tw32(HOSTCC_TXCOL_TICKS
, 0);
8037 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8038 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8041 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8042 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8043 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8044 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8046 tw32(HOSTCC_RXCOL_TICKS
, 0);
8047 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8048 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8051 if (!tg3_flag(tp
, 5705_PLUS
)) {
8052 u32 val
= ec
->stats_block_coalesce_usecs
;
8054 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8055 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8057 if (!netif_carrier_ok(tp
->dev
))
8060 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8063 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8066 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8067 tw32(reg
, ec
->rx_coalesce_usecs
);
8068 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8069 tw32(reg
, ec
->rx_max_coalesced_frames
);
8070 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8071 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8073 if (tg3_flag(tp
, ENABLE_TSS
)) {
8074 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8075 tw32(reg
, ec
->tx_coalesce_usecs
);
8076 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8077 tw32(reg
, ec
->tx_max_coalesced_frames
);
8078 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8079 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8083 for (; i
< tp
->irq_max
- 1; i
++) {
8084 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8085 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8086 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8088 if (tg3_flag(tp
, ENABLE_TSS
)) {
8089 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8090 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8091 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8096 /* tp->lock is held. */
8097 static void tg3_rings_reset(struct tg3
*tp
)
8100 u32 stblk
, txrcb
, rxrcb
, limit
;
8101 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8103 /* Disable all transmit rings but the first. */
8104 if (!tg3_flag(tp
, 5705_PLUS
))
8105 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8106 else if (tg3_flag(tp
, 5717_PLUS
))
8107 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8108 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8109 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8111 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8113 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8114 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8115 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8116 BDINFO_FLAGS_DISABLED
);
8119 /* Disable all receive return rings but the first. */
8120 if (tg3_flag(tp
, 5717_PLUS
))
8121 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8122 else if (!tg3_flag(tp
, 5705_PLUS
))
8123 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8124 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8125 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8126 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8128 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8130 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8131 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8132 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8133 BDINFO_FLAGS_DISABLED
);
8135 /* Disable interrupts */
8136 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8137 tp
->napi
[0].chk_msi_cnt
= 0;
8138 tp
->napi
[0].last_rx_cons
= 0;
8139 tp
->napi
[0].last_tx_cons
= 0;
8141 /* Zero mailbox registers. */
8142 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8143 for (i
= 1; i
< tp
->irq_max
; i
++) {
8144 tp
->napi
[i
].tx_prod
= 0;
8145 tp
->napi
[i
].tx_cons
= 0;
8146 if (tg3_flag(tp
, ENABLE_TSS
))
8147 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8148 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8149 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8150 tp
->napi
[i
].chk_msi_cnt
= 0;
8151 tp
->napi
[i
].last_rx_cons
= 0;
8152 tp
->napi
[i
].last_tx_cons
= 0;
8154 if (!tg3_flag(tp
, ENABLE_TSS
))
8155 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8157 tp
->napi
[0].tx_prod
= 0;
8158 tp
->napi
[0].tx_cons
= 0;
8159 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8160 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8163 /* Make sure the NIC-based send BD rings are disabled. */
8164 if (!tg3_flag(tp
, 5705_PLUS
)) {
8165 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8166 for (i
= 0; i
< 16; i
++)
8167 tw32_tx_mbox(mbox
+ i
* 8, 0);
8170 txrcb
= NIC_SRAM_SEND_RCB
;
8171 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8173 /* Clear status block in ram. */
8174 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8176 /* Set status block DMA address */
8177 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8178 ((u64
) tnapi
->status_mapping
>> 32));
8179 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8180 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8182 if (tnapi
->tx_ring
) {
8183 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8184 (TG3_TX_RING_SIZE
<<
8185 BDINFO_FLAGS_MAXLEN_SHIFT
),
8186 NIC_SRAM_TX_BUFFER_DESC
);
8187 txrcb
+= TG3_BDINFO_SIZE
;
8190 if (tnapi
->rx_rcb
) {
8191 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8192 (tp
->rx_ret_ring_mask
+ 1) <<
8193 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8194 rxrcb
+= TG3_BDINFO_SIZE
;
8197 stblk
= HOSTCC_STATBLCK_RING1
;
8199 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8200 u64 mapping
= (u64
)tnapi
->status_mapping
;
8201 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8202 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8204 /* Clear status block in ram. */
8205 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8207 if (tnapi
->tx_ring
) {
8208 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8209 (TG3_TX_RING_SIZE
<<
8210 BDINFO_FLAGS_MAXLEN_SHIFT
),
8211 NIC_SRAM_TX_BUFFER_DESC
);
8212 txrcb
+= TG3_BDINFO_SIZE
;
8215 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8216 ((tp
->rx_ret_ring_mask
+ 1) <<
8217 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8220 rxrcb
+= TG3_BDINFO_SIZE
;
8224 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8226 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8228 if (!tg3_flag(tp
, 5750_PLUS
) ||
8229 tg3_flag(tp
, 5780_CLASS
) ||
8230 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8231 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8232 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8233 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8234 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8235 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8237 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8239 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8240 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8242 val
= min(nic_rep_thresh
, host_rep_thresh
);
8243 tw32(RCVBDI_STD_THRESH
, val
);
8245 if (tg3_flag(tp
, 57765_PLUS
))
8246 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8248 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8251 if (!tg3_flag(tp
, 5705_PLUS
))
8252 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8254 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
8256 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8258 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8259 tw32(RCVBDI_JUMBO_THRESH
, val
);
8261 if (tg3_flag(tp
, 57765_PLUS
))
8262 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8265 /* tp->lock is held. */
8266 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8268 u32 val
, rdmac_mode
;
8270 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8272 tg3_disable_ints(tp
);
8276 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8278 if (tg3_flag(tp
, INIT_COMPLETE
))
8279 tg3_abort_hw(tp
, 1);
8281 /* Enable MAC control of LPI */
8282 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8283 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8284 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8285 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8287 tw32_f(TG3_CPMU_EEE_CTRL
,
8288 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8290 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8291 TG3_CPMU_EEEMD_LPI_IN_TX
|
8292 TG3_CPMU_EEEMD_LPI_IN_RX
|
8293 TG3_CPMU_EEEMD_EEE_ENABLE
;
8295 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8296 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8298 if (tg3_flag(tp
, ENABLE_APE
))
8299 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8301 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8303 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8304 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8305 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8307 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8308 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8309 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8315 err
= tg3_chip_reset(tp
);
8319 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8321 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8322 val
= tr32(TG3_CPMU_CTRL
);
8323 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8324 tw32(TG3_CPMU_CTRL
, val
);
8326 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8327 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8328 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8329 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8331 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8332 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8333 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8334 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8336 val
= tr32(TG3_CPMU_HST_ACC
);
8337 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8338 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8339 tw32(TG3_CPMU_HST_ACC
, val
);
8342 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8343 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8344 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8345 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8346 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8348 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8349 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8351 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8353 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8354 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8357 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8358 u32 grc_mode
= tr32(GRC_MODE
);
8360 /* Access the lower 1K of PL PCIE block registers. */
8361 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8362 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8364 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8365 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8366 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8368 tw32(GRC_MODE
, grc_mode
);
8371 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
8372 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8373 u32 grc_mode
= tr32(GRC_MODE
);
8375 /* Access the lower 1K of PL PCIE block registers. */
8376 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8377 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8379 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8380 TG3_PCIE_PL_LO_PHYCTL5
);
8381 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8382 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8384 tw32(GRC_MODE
, grc_mode
);
8387 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8388 u32 grc_mode
= tr32(GRC_MODE
);
8390 /* Access the lower 1K of DL PCIE block registers. */
8391 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8392 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8394 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8395 TG3_PCIE_DL_LO_FTSMAX
);
8396 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8397 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8398 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8400 tw32(GRC_MODE
, grc_mode
);
8403 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8404 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8405 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8406 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8409 /* This works around an issue with Athlon chipsets on
8410 * B3 tigon3 silicon. This bit has no effect on any
8411 * other revision. But do not set this on PCI Express
8412 * chips and don't even touch the clocks if the CPMU is present.
8414 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8415 if (!tg3_flag(tp
, PCI_EXPRESS
))
8416 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8417 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8420 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8421 tg3_flag(tp
, PCIX_MODE
)) {
8422 val
= tr32(TG3PCI_PCISTATE
);
8423 val
|= PCISTATE_RETRY_SAME_DMA
;
8424 tw32(TG3PCI_PCISTATE
, val
);
8427 if (tg3_flag(tp
, ENABLE_APE
)) {
8428 /* Allow reads and writes to the
8429 * APE register and memory space.
8431 val
= tr32(TG3PCI_PCISTATE
);
8432 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8433 PCISTATE_ALLOW_APE_SHMEM_WR
|
8434 PCISTATE_ALLOW_APE_PSPACE_WR
;
8435 tw32(TG3PCI_PCISTATE
, val
);
8438 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8439 /* Enable some hw fixes. */
8440 val
= tr32(TG3PCI_MSI_DATA
);
8441 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8442 tw32(TG3PCI_MSI_DATA
, val
);
8445 /* Descriptor ring init may make accesses to the
8446 * NIC SRAM area to setup the TX descriptors, so we
8447 * can only do this after the hardware has been
8448 * successfully reset.
8450 err
= tg3_init_rings(tp
);
8454 if (tg3_flag(tp
, 57765_PLUS
)) {
8455 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8456 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8457 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8458 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8459 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8460 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8461 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8462 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8463 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8464 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8465 /* This value is determined during the probe time DMA
8466 * engine test, tg3_test_dma.
8468 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8471 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8472 GRC_MODE_4X_NIC_SEND_RINGS
|
8473 GRC_MODE_NO_TX_PHDR_CSUM
|
8474 GRC_MODE_NO_RX_PHDR_CSUM
);
8475 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8477 /* Pseudo-header checksum is done by hardware logic and not
8478 * the offload processers, so make the chip do the pseudo-
8479 * header checksums on receive. For transmit it is more
8480 * convenient to do the pseudo-header checksum in software
8481 * as Linux does that on transmit for us in all cases.
8483 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8487 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8489 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8490 val
= tr32(GRC_MISC_CFG
);
8492 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8493 tw32(GRC_MISC_CFG
, val
);
8495 /* Initialize MBUF/DESC pool. */
8496 if (tg3_flag(tp
, 5750_PLUS
)) {
8498 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8499 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8500 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8501 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8503 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8504 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8505 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8506 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8509 fw_len
= tp
->fw_len
;
8510 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8511 tw32(BUFMGR_MB_POOL_ADDR
,
8512 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8513 tw32(BUFMGR_MB_POOL_SIZE
,
8514 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8517 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8518 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8519 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8520 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8521 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8522 tw32(BUFMGR_MB_HIGH_WATER
,
8523 tp
->bufmgr_config
.mbuf_high_water
);
8525 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8526 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8527 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8528 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8529 tw32(BUFMGR_MB_HIGH_WATER
,
8530 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8532 tw32(BUFMGR_DMA_LOW_WATER
,
8533 tp
->bufmgr_config
.dma_low_water
);
8534 tw32(BUFMGR_DMA_HIGH_WATER
,
8535 tp
->bufmgr_config
.dma_high_water
);
8537 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8538 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8539 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8540 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8541 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8542 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8543 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8544 tw32(BUFMGR_MODE
, val
);
8545 for (i
= 0; i
< 2000; i
++) {
8546 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8551 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8555 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8556 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8558 tg3_setup_rxbd_thresholds(tp
);
8560 /* Initialize TG3_BDINFO's at:
8561 * RCVDBDI_STD_BD: standard eth size rx ring
8562 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8563 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8566 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8567 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8568 * ring attribute flags
8569 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8571 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8572 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8574 * The size of each ring is fixed in the firmware, but the location is
8577 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8578 ((u64
) tpr
->rx_std_mapping
>> 32));
8579 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8580 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8581 if (!tg3_flag(tp
, 5717_PLUS
))
8582 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8583 NIC_SRAM_RX_BUFFER_DESC
);
8585 /* Disable the mini ring */
8586 if (!tg3_flag(tp
, 5705_PLUS
))
8587 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8588 BDINFO_FLAGS_DISABLED
);
8590 /* Program the jumbo buffer descriptor ring control
8591 * blocks on those devices that have them.
8593 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8594 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8596 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8597 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8598 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8599 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8600 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8601 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8602 BDINFO_FLAGS_MAXLEN_SHIFT
;
8603 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8604 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8605 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8606 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8607 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8608 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8610 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8611 BDINFO_FLAGS_DISABLED
);
8614 if (tg3_flag(tp
, 57765_PLUS
)) {
8615 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8616 val
= TG3_RX_STD_MAX_SIZE_5700
;
8618 val
= TG3_RX_STD_MAX_SIZE_5717
;
8619 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8620 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8622 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8624 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8626 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8628 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8629 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8631 tpr
->rx_jmb_prod_idx
=
8632 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8633 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8635 tg3_rings_reset(tp
);
8637 /* Initialize MAC address and backoff seed. */
8638 __tg3_set_mac_addr(tp
, 0);
8640 /* MTU + ethernet header + FCS + optional VLAN tag */
8641 tw32(MAC_RX_MTU_SIZE
,
8642 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8644 /* The slot time is changed by tg3_setup_phy if we
8645 * run at gigabit with half duplex.
8647 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8648 (6 << TX_LENGTHS_IPG_SHIFT
) |
8649 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8651 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8652 val
|= tr32(MAC_TX_LENGTHS
) &
8653 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8654 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8656 tw32(MAC_TX_LENGTHS
, val
);
8658 /* Receive rules. */
8659 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8660 tw32(RCVLPC_CONFIG
, 0x0181);
8662 /* Calculate RDMAC_MODE setting early, we need it to determine
8663 * the RCVLPC_STATE_ENABLE mask.
8665 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8666 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8667 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8668 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8669 RDMAC_MODE_LNGREAD_ENAB
);
8671 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8672 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8675 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8676 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8677 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8678 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8679 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8681 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8682 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8683 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8684 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8685 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8686 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8687 !tg3_flag(tp
, IS_5788
)) {
8688 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8692 if (tg3_flag(tp
, PCI_EXPRESS
))
8693 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8695 if (tg3_flag(tp
, HW_TSO_1
) ||
8696 tg3_flag(tp
, HW_TSO_2
) ||
8697 tg3_flag(tp
, HW_TSO_3
))
8698 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8700 if (tg3_flag(tp
, 57765_PLUS
) ||
8701 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8702 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8703 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8705 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8706 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8708 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8709 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8710 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8711 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8712 tg3_flag(tp
, 57765_PLUS
)) {
8713 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8714 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8715 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8716 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8717 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8718 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8719 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8720 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8721 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8723 tw32(TG3_RDMA_RSRVCTRL_REG
,
8724 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8727 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8728 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8729 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8730 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8731 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8732 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8735 /* Receive/send statistics. */
8736 if (tg3_flag(tp
, 5750_PLUS
)) {
8737 val
= tr32(RCVLPC_STATS_ENABLE
);
8738 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8739 tw32(RCVLPC_STATS_ENABLE
, val
);
8740 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8741 tg3_flag(tp
, TSO_CAPABLE
)) {
8742 val
= tr32(RCVLPC_STATS_ENABLE
);
8743 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8744 tw32(RCVLPC_STATS_ENABLE
, val
);
8746 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8748 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8749 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8750 tw32(SNDDATAI_STATSCTRL
,
8751 (SNDDATAI_SCTRL_ENABLE
|
8752 SNDDATAI_SCTRL_FASTUPD
));
8754 /* Setup host coalescing engine. */
8755 tw32(HOSTCC_MODE
, 0);
8756 for (i
= 0; i
< 2000; i
++) {
8757 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8762 __tg3_set_coalesce(tp
, &tp
->coal
);
8764 if (!tg3_flag(tp
, 5705_PLUS
)) {
8765 /* Status/statistics block address. See tg3_timer,
8766 * the tg3_periodic_fetch_stats call there, and
8767 * tg3_get_stats to see how this works for 5705/5750 chips.
8769 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8770 ((u64
) tp
->stats_mapping
>> 32));
8771 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8772 ((u64
) tp
->stats_mapping
& 0xffffffff));
8773 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8775 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8777 /* Clear statistics and status block memory areas */
8778 for (i
= NIC_SRAM_STATS_BLK
;
8779 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8781 tg3_write_mem(tp
, i
, 0);
8786 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8788 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8789 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8790 if (!tg3_flag(tp
, 5705_PLUS
))
8791 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8793 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8794 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8795 /* reset to prevent losing 1st rx packet intermittently */
8796 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8800 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8801 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8802 MAC_MODE_FHDE_ENABLE
;
8803 if (tg3_flag(tp
, ENABLE_APE
))
8804 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8805 if (!tg3_flag(tp
, 5705_PLUS
) &&
8806 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8807 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8808 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8809 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8812 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8813 * If TG3_FLAG_IS_NIC is zero, we should read the
8814 * register to preserve the GPIO settings for LOMs. The GPIOs,
8815 * whether used as inputs or outputs, are set by boot code after
8818 if (!tg3_flag(tp
, IS_NIC
)) {
8821 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8822 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8823 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8825 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8826 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8827 GRC_LCLCTRL_GPIO_OUTPUT3
;
8829 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8830 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8832 tp
->grc_local_ctrl
&= ~gpio_mask
;
8833 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8835 /* GPIO1 must be driven high for eeprom write protect */
8836 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8837 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8838 GRC_LCLCTRL_GPIO_OUTPUT1
);
8840 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8843 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8844 val
= tr32(MSGINT_MODE
);
8845 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8846 if (!tg3_flag(tp
, 1SHOT_MSI
))
8847 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
8848 tw32(MSGINT_MODE
, val
);
8851 if (!tg3_flag(tp
, 5705_PLUS
)) {
8852 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8856 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8857 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8858 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8859 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8860 WDMAC_MODE_LNGREAD_ENAB
);
8862 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8863 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8864 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8865 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8866 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8868 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8869 !tg3_flag(tp
, IS_5788
)) {
8870 val
|= WDMAC_MODE_RX_ACCEL
;
8874 /* Enable host coalescing bug fix */
8875 if (tg3_flag(tp
, 5755_PLUS
))
8876 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8878 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8879 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8881 tw32_f(WDMAC_MODE
, val
);
8884 if (tg3_flag(tp
, PCIX_MODE
)) {
8887 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8889 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8890 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8891 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8892 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8893 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8894 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8896 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8900 tw32_f(RDMAC_MODE
, rdmac_mode
);
8903 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8904 if (!tg3_flag(tp
, 5705_PLUS
))
8905 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8907 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8909 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8911 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8913 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8914 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8915 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8916 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8917 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8918 tw32(RCVDBDI_MODE
, val
);
8919 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8920 if (tg3_flag(tp
, HW_TSO_1
) ||
8921 tg3_flag(tp
, HW_TSO_2
) ||
8922 tg3_flag(tp
, HW_TSO_3
))
8923 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8924 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8925 if (tg3_flag(tp
, ENABLE_TSS
))
8926 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8927 tw32(SNDBDI_MODE
, val
);
8928 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8930 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8931 err
= tg3_load_5701_a0_firmware_fix(tp
);
8936 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8937 err
= tg3_load_tso_firmware(tp
);
8942 tp
->tx_mode
= TX_MODE_ENABLE
;
8944 if (tg3_flag(tp
, 5755_PLUS
) ||
8945 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8946 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8948 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8949 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8950 tp
->tx_mode
&= ~val
;
8951 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8954 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8957 if (tg3_flag(tp
, ENABLE_RSS
)) {
8959 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8961 if (tp
->irq_cnt
== 2) {
8962 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
+= 8) {
8969 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8970 val
= i
% (tp
->irq_cnt
- 1);
8972 for (; i
% 8; i
++) {
8974 val
|= (i
% (tp
->irq_cnt
- 1));
8981 /* Setup the "secret" hash key. */
8982 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8983 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8984 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8985 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8986 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8987 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8988 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8989 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8990 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8991 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8994 tp
->rx_mode
= RX_MODE_ENABLE
;
8995 if (tg3_flag(tp
, 5755_PLUS
))
8996 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8998 if (tg3_flag(tp
, ENABLE_RSS
))
8999 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9000 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9001 RX_MODE_RSS_IPV6_HASH_EN
|
9002 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9003 RX_MODE_RSS_IPV4_HASH_EN
|
9004 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9006 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9009 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9011 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9012 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9013 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9016 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9019 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9020 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9021 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9022 /* Set drive transmission level to 1.2V */
9023 /* only if the signal pre-emphasis bit is not set */
9024 val
= tr32(MAC_SERDES_CFG
);
9027 tw32(MAC_SERDES_CFG
, val
);
9029 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9030 tw32(MAC_SERDES_CFG
, 0x616000);
9033 /* Prevent chip from dropping frames when flow control
9036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9040 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9042 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9043 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9044 /* Use hardware link auto-negotiation */
9045 tg3_flag_set(tp
, HW_AUTONEG
);
9048 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9049 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9052 tmp
= tr32(SERDES_RX_CTRL
);
9053 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9054 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9055 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9056 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9059 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9060 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
9061 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9062 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
9063 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
9064 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
9067 err
= tg3_setup_phy(tp
, 0);
9071 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9072 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9075 /* Clear CRC stats. */
9076 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9077 tg3_writephy(tp
, MII_TG3_TEST1
,
9078 tmp
| MII_TG3_TEST1_CRC_EN
);
9079 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9084 __tg3_set_rx_mode(tp
->dev
);
9086 /* Initialize receive rules. */
9087 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9088 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9089 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9090 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9092 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9096 if (tg3_flag(tp
, ENABLE_ASF
))
9100 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9102 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9104 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9106 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9108 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9110 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9112 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9114 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9116 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9118 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9120 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9122 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9124 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9126 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9134 if (tg3_flag(tp
, ENABLE_APE
))
9135 /* Write our heartbeat update interval to APE. */
9136 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9137 APE_HOST_HEARTBEAT_INT_DISABLE
);
9139 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9144 /* Called at device open time to get the chip ready for
9145 * packet processing. Invoked with tp->lock held.
9147 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9149 tg3_switch_clocks(tp
);
9151 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9153 return tg3_reset_hw(tp
, reset_phy
);
9156 #define TG3_STAT_ADD32(PSTAT, REG) \
9157 do { u32 __val = tr32(REG); \
9158 (PSTAT)->low += __val; \
9159 if ((PSTAT)->low < __val) \
9160 (PSTAT)->high += 1; \
9163 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9165 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9167 if (!netif_carrier_ok(tp
->dev
))
9170 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9171 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9172 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9173 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9174 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9175 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9176 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9177 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9178 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9179 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9180 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9181 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9182 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9184 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9185 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9186 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9187 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9188 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9189 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9190 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9191 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9192 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9193 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9194 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9195 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9196 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9197 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9199 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9200 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9201 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9202 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9203 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9205 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9206 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9208 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9209 sp
->rx_discards
.low
+= val
;
9210 if (sp
->rx_discards
.low
< val
)
9211 sp
->rx_discards
.high
+= 1;
9213 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9215 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9218 static void tg3_chk_missed_msi(struct tg3
*tp
)
9222 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9223 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9225 if (tg3_has_work(tnapi
)) {
9226 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9227 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9228 if (tnapi
->chk_msi_cnt
< 1) {
9229 tnapi
->chk_msi_cnt
++;
9235 tnapi
->chk_msi_cnt
= 0;
9236 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9237 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9241 static void tg3_timer(unsigned long __opaque
)
9243 struct tg3
*tp
= (struct tg3
*) __opaque
;
9245 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9248 spin_lock(&tp
->lock
);
9250 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9251 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9252 tg3_chk_missed_msi(tp
);
9254 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9255 /* All of this garbage is because when using non-tagged
9256 * IRQ status the mailbox/status_block protocol the chip
9257 * uses with the cpu is race prone.
9259 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9260 tw32(GRC_LOCAL_CTRL
,
9261 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9263 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9264 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9267 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9268 spin_unlock(&tp
->lock
);
9269 tg3_reset_task_schedule(tp
);
9274 /* This part only runs once per second. */
9275 if (!--tp
->timer_counter
) {
9276 if (tg3_flag(tp
, 5705_PLUS
))
9277 tg3_periodic_fetch_stats(tp
);
9279 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9280 tg3_phy_eee_enable(tp
);
9282 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9286 mac_stat
= tr32(MAC_STATUS
);
9289 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9290 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9292 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9296 tg3_setup_phy(tp
, 0);
9297 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9298 u32 mac_stat
= tr32(MAC_STATUS
);
9301 if (netif_carrier_ok(tp
->dev
) &&
9302 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9305 if (!netif_carrier_ok(tp
->dev
) &&
9306 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9307 MAC_STATUS_SIGNAL_DET
))) {
9311 if (!tp
->serdes_counter
) {
9314 ~MAC_MODE_PORT_MODE_MASK
));
9316 tw32_f(MAC_MODE
, tp
->mac_mode
);
9319 tg3_setup_phy(tp
, 0);
9321 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9322 tg3_flag(tp
, 5780_CLASS
)) {
9323 tg3_serdes_parallel_detect(tp
);
9326 tp
->timer_counter
= tp
->timer_multiplier
;
9329 /* Heartbeat is only sent once every 2 seconds.
9331 * The heartbeat is to tell the ASF firmware that the host
9332 * driver is still alive. In the event that the OS crashes,
9333 * ASF needs to reset the hardware to free up the FIFO space
9334 * that may be filled with rx packets destined for the host.
9335 * If the FIFO is full, ASF will no longer function properly.
9337 * Unintended resets have been reported on real time kernels
9338 * where the timer doesn't run on time. Netpoll will also have
9341 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9342 * to check the ring condition when the heartbeat is expiring
9343 * before doing the reset. This will prevent most unintended
9346 if (!--tp
->asf_counter
) {
9347 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9348 tg3_wait_for_event_ack(tp
);
9350 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9351 FWCMD_NICDRV_ALIVE3
);
9352 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9353 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9354 TG3_FW_UPDATE_TIMEOUT_SEC
);
9356 tg3_generate_fw_event(tp
);
9358 tp
->asf_counter
= tp
->asf_multiplier
;
9361 spin_unlock(&tp
->lock
);
9364 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9365 add_timer(&tp
->timer
);
9368 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9371 unsigned long flags
;
9373 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9375 if (tp
->irq_cnt
== 1)
9376 name
= tp
->dev
->name
;
9378 name
= &tnapi
->irq_lbl
[0];
9379 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9380 name
[IFNAMSIZ
-1] = 0;
9383 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9385 if (tg3_flag(tp
, 1SHOT_MSI
))
9390 if (tg3_flag(tp
, TAGGED_STATUS
))
9391 fn
= tg3_interrupt_tagged
;
9392 flags
= IRQF_SHARED
;
9395 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9398 static int tg3_test_interrupt(struct tg3
*tp
)
9400 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9401 struct net_device
*dev
= tp
->dev
;
9402 int err
, i
, intr_ok
= 0;
9405 if (!netif_running(dev
))
9408 tg3_disable_ints(tp
);
9410 free_irq(tnapi
->irq_vec
, tnapi
);
9413 * Turn off MSI one shot mode. Otherwise this test has no
9414 * observable way to know whether the interrupt was delivered.
9416 if (tg3_flag(tp
, 57765_PLUS
)) {
9417 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9418 tw32(MSGINT_MODE
, val
);
9421 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9422 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9426 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9427 tg3_enable_ints(tp
);
9429 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9432 for (i
= 0; i
< 5; i
++) {
9433 u32 int_mbox
, misc_host_ctrl
;
9435 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9436 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9438 if ((int_mbox
!= 0) ||
9439 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9444 if (tg3_flag(tp
, 57765_PLUS
) &&
9445 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9446 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9451 tg3_disable_ints(tp
);
9453 free_irq(tnapi
->irq_vec
, tnapi
);
9455 err
= tg3_request_irq(tp
, 0);
9461 /* Reenable MSI one shot mode. */
9462 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
9463 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9464 tw32(MSGINT_MODE
, val
);
9472 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9473 * successfully restored
9475 static int tg3_test_msi(struct tg3
*tp
)
9480 if (!tg3_flag(tp
, USING_MSI
))
9483 /* Turn off SERR reporting in case MSI terminates with Master
9486 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9487 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9488 pci_cmd
& ~PCI_COMMAND_SERR
);
9490 err
= tg3_test_interrupt(tp
);
9492 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9497 /* other failures */
9501 /* MSI test failed, go back to INTx mode */
9502 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9503 "to INTx mode. Please report this failure to the PCI "
9504 "maintainer and include system chipset information\n");
9506 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9508 pci_disable_msi(tp
->pdev
);
9510 tg3_flag_clear(tp
, USING_MSI
);
9511 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9513 err
= tg3_request_irq(tp
, 0);
9517 /* Need to reset the chip because the MSI cycle may have terminated
9518 * with Master Abort.
9520 tg3_full_lock(tp
, 1);
9522 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9523 err
= tg3_init_hw(tp
, 1);
9525 tg3_full_unlock(tp
);
9528 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9533 static int tg3_request_firmware(struct tg3
*tp
)
9535 const __be32
*fw_data
;
9537 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9538 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9543 fw_data
= (void *)tp
->fw
->data
;
9545 /* Firmware blob starts with version numbers, followed by
9546 * start address and _full_ length including BSS sections
9547 * (which must be longer than the actual data, of course
9550 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9551 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9552 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9553 tp
->fw_len
, tp
->fw_needed
);
9554 release_firmware(tp
->fw
);
9559 /* We no longer need firmware; we have it. */
9560 tp
->fw_needed
= NULL
;
9564 static bool tg3_enable_msix(struct tg3
*tp
)
9566 int i
, rc
, cpus
= num_online_cpus();
9567 struct msix_entry msix_ent
[tp
->irq_max
];
9570 /* Just fallback to the simpler MSI mode. */
9574 * We want as many rx rings enabled as there are cpus.
9575 * The first MSIX vector only deals with link interrupts, etc,
9576 * so we add one to the number of vectors we are requesting.
9578 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9580 for (i
= 0; i
< tp
->irq_max
; i
++) {
9581 msix_ent
[i
].entry
= i
;
9582 msix_ent
[i
].vector
= 0;
9585 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9588 } else if (rc
!= 0) {
9589 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9591 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9596 for (i
= 0; i
< tp
->irq_max
; i
++)
9597 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9599 netif_set_real_num_tx_queues(tp
->dev
, 1);
9600 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9601 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9602 pci_disable_msix(tp
->pdev
);
9606 if (tp
->irq_cnt
> 1) {
9607 tg3_flag_set(tp
, ENABLE_RSS
);
9609 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9610 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9611 tg3_flag_set(tp
, ENABLE_TSS
);
9612 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9619 static void tg3_ints_init(struct tg3
*tp
)
9621 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9622 !tg3_flag(tp
, TAGGED_STATUS
)) {
9623 /* All MSI supporting chips should support tagged
9624 * status. Assert that this is the case.
9626 netdev_warn(tp
->dev
,
9627 "MSI without TAGGED_STATUS? Not using MSI\n");
9631 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9632 tg3_flag_set(tp
, USING_MSIX
);
9633 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9634 tg3_flag_set(tp
, USING_MSI
);
9636 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9637 u32 msi_mode
= tr32(MSGINT_MODE
);
9638 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9639 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9640 if (!tg3_flag(tp
, 1SHOT_MSI
))
9641 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9642 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9645 if (!tg3_flag(tp
, USING_MSIX
)) {
9647 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9648 netif_set_real_num_tx_queues(tp
->dev
, 1);
9649 netif_set_real_num_rx_queues(tp
->dev
, 1);
9653 static void tg3_ints_fini(struct tg3
*tp
)
9655 if (tg3_flag(tp
, USING_MSIX
))
9656 pci_disable_msix(tp
->pdev
);
9657 else if (tg3_flag(tp
, USING_MSI
))
9658 pci_disable_msi(tp
->pdev
);
9659 tg3_flag_clear(tp
, USING_MSI
);
9660 tg3_flag_clear(tp
, USING_MSIX
);
9661 tg3_flag_clear(tp
, ENABLE_RSS
);
9662 tg3_flag_clear(tp
, ENABLE_TSS
);
9665 static int tg3_open(struct net_device
*dev
)
9667 struct tg3
*tp
= netdev_priv(dev
);
9670 if (tp
->fw_needed
) {
9671 err
= tg3_request_firmware(tp
);
9672 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9676 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9677 tg3_flag_clear(tp
, TSO_CAPABLE
);
9678 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9679 netdev_notice(tp
->dev
, "TSO capability restored\n");
9680 tg3_flag_set(tp
, TSO_CAPABLE
);
9684 netif_carrier_off(tp
->dev
);
9686 err
= tg3_power_up(tp
);
9690 tg3_full_lock(tp
, 0);
9692 tg3_disable_ints(tp
);
9693 tg3_flag_clear(tp
, INIT_COMPLETE
);
9695 tg3_full_unlock(tp
);
9698 * Setup interrupts first so we know how
9699 * many NAPI resources to allocate
9703 /* The placement of this call is tied
9704 * to the setup and use of Host TX descriptors.
9706 err
= tg3_alloc_consistent(tp
);
9712 tg3_napi_enable(tp
);
9714 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9715 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9716 err
= tg3_request_irq(tp
, i
);
9718 for (i
--; i
>= 0; i
--) {
9719 tnapi
= &tp
->napi
[i
];
9720 free_irq(tnapi
->irq_vec
, tnapi
);
9726 tg3_full_lock(tp
, 0);
9728 err
= tg3_init_hw(tp
, 1);
9730 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9733 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9734 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9735 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
)
9736 tp
->timer_offset
= HZ
;
9738 tp
->timer_offset
= HZ
/ 10;
9740 BUG_ON(tp
->timer_offset
> HZ
);
9741 tp
->timer_counter
= tp
->timer_multiplier
=
9742 (HZ
/ tp
->timer_offset
);
9743 tp
->asf_counter
= tp
->asf_multiplier
=
9744 ((HZ
/ tp
->timer_offset
) * 2);
9746 init_timer(&tp
->timer
);
9747 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9748 tp
->timer
.data
= (unsigned long) tp
;
9749 tp
->timer
.function
= tg3_timer
;
9752 tg3_full_unlock(tp
);
9757 if (tg3_flag(tp
, USING_MSI
)) {
9758 err
= tg3_test_msi(tp
);
9761 tg3_full_lock(tp
, 0);
9762 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9764 tg3_full_unlock(tp
);
9769 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9770 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9772 tw32(PCIE_TRANSACTION_CFG
,
9773 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9779 tg3_full_lock(tp
, 0);
9781 add_timer(&tp
->timer
);
9782 tg3_flag_set(tp
, INIT_COMPLETE
);
9783 tg3_enable_ints(tp
);
9785 tg3_full_unlock(tp
);
9787 netif_tx_start_all_queues(dev
);
9790 * Reset loopback feature if it was turned on while the device was down
9791 * make sure that it's installed properly now.
9793 if (dev
->features
& NETIF_F_LOOPBACK
)
9794 tg3_set_loopback(dev
, dev
->features
);
9799 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9800 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9801 free_irq(tnapi
->irq_vec
, tnapi
);
9805 tg3_napi_disable(tp
);
9807 tg3_free_consistent(tp
);
9811 tg3_frob_aux_power(tp
, false);
9812 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9816 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9817 struct rtnl_link_stats64
*);
9818 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9820 static int tg3_close(struct net_device
*dev
)
9823 struct tg3
*tp
= netdev_priv(dev
);
9825 tg3_napi_disable(tp
);
9826 tg3_reset_task_cancel(tp
);
9828 netif_tx_stop_all_queues(dev
);
9830 del_timer_sync(&tp
->timer
);
9834 tg3_full_lock(tp
, 1);
9836 tg3_disable_ints(tp
);
9838 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9840 tg3_flag_clear(tp
, INIT_COMPLETE
);
9842 tg3_full_unlock(tp
);
9844 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9845 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9846 free_irq(tnapi
->irq_vec
, tnapi
);
9851 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9853 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9854 sizeof(tp
->estats_prev
));
9858 tg3_free_consistent(tp
);
9862 netif_carrier_off(tp
->dev
);
9867 static inline u64
get_stat64(tg3_stat64_t
*val
)
9869 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9872 static u64
calc_crc_errors(struct tg3
*tp
)
9874 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9876 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9877 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9878 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9881 spin_lock_bh(&tp
->lock
);
9882 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9883 tg3_writephy(tp
, MII_TG3_TEST1
,
9884 val
| MII_TG3_TEST1_CRC_EN
);
9885 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9888 spin_unlock_bh(&tp
->lock
);
9890 tp
->phy_crc_errors
+= val
;
9892 return tp
->phy_crc_errors
;
9895 return get_stat64(&hw_stats
->rx_fcs_errors
);
9898 #define ESTAT_ADD(member) \
9899 estats->member = old_estats->member + \
9900 get_stat64(&hw_stats->member)
9902 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9904 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9905 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9906 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9911 ESTAT_ADD(rx_octets
);
9912 ESTAT_ADD(rx_fragments
);
9913 ESTAT_ADD(rx_ucast_packets
);
9914 ESTAT_ADD(rx_mcast_packets
);
9915 ESTAT_ADD(rx_bcast_packets
);
9916 ESTAT_ADD(rx_fcs_errors
);
9917 ESTAT_ADD(rx_align_errors
);
9918 ESTAT_ADD(rx_xon_pause_rcvd
);
9919 ESTAT_ADD(rx_xoff_pause_rcvd
);
9920 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9921 ESTAT_ADD(rx_xoff_entered
);
9922 ESTAT_ADD(rx_frame_too_long_errors
);
9923 ESTAT_ADD(rx_jabbers
);
9924 ESTAT_ADD(rx_undersize_packets
);
9925 ESTAT_ADD(rx_in_length_errors
);
9926 ESTAT_ADD(rx_out_length_errors
);
9927 ESTAT_ADD(rx_64_or_less_octet_packets
);
9928 ESTAT_ADD(rx_65_to_127_octet_packets
);
9929 ESTAT_ADD(rx_128_to_255_octet_packets
);
9930 ESTAT_ADD(rx_256_to_511_octet_packets
);
9931 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9932 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9933 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9934 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9935 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9936 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9938 ESTAT_ADD(tx_octets
);
9939 ESTAT_ADD(tx_collisions
);
9940 ESTAT_ADD(tx_xon_sent
);
9941 ESTAT_ADD(tx_xoff_sent
);
9942 ESTAT_ADD(tx_flow_control
);
9943 ESTAT_ADD(tx_mac_errors
);
9944 ESTAT_ADD(tx_single_collisions
);
9945 ESTAT_ADD(tx_mult_collisions
);
9946 ESTAT_ADD(tx_deferred
);
9947 ESTAT_ADD(tx_excessive_collisions
);
9948 ESTAT_ADD(tx_late_collisions
);
9949 ESTAT_ADD(tx_collide_2times
);
9950 ESTAT_ADD(tx_collide_3times
);
9951 ESTAT_ADD(tx_collide_4times
);
9952 ESTAT_ADD(tx_collide_5times
);
9953 ESTAT_ADD(tx_collide_6times
);
9954 ESTAT_ADD(tx_collide_7times
);
9955 ESTAT_ADD(tx_collide_8times
);
9956 ESTAT_ADD(tx_collide_9times
);
9957 ESTAT_ADD(tx_collide_10times
);
9958 ESTAT_ADD(tx_collide_11times
);
9959 ESTAT_ADD(tx_collide_12times
);
9960 ESTAT_ADD(tx_collide_13times
);
9961 ESTAT_ADD(tx_collide_14times
);
9962 ESTAT_ADD(tx_collide_15times
);
9963 ESTAT_ADD(tx_ucast_packets
);
9964 ESTAT_ADD(tx_mcast_packets
);
9965 ESTAT_ADD(tx_bcast_packets
);
9966 ESTAT_ADD(tx_carrier_sense_errors
);
9967 ESTAT_ADD(tx_discards
);
9968 ESTAT_ADD(tx_errors
);
9970 ESTAT_ADD(dma_writeq_full
);
9971 ESTAT_ADD(dma_write_prioq_full
);
9972 ESTAT_ADD(rxbds_empty
);
9973 ESTAT_ADD(rx_discards
);
9974 ESTAT_ADD(rx_errors
);
9975 ESTAT_ADD(rx_threshold_hit
);
9977 ESTAT_ADD(dma_readq_full
);
9978 ESTAT_ADD(dma_read_prioq_full
);
9979 ESTAT_ADD(tx_comp_queue_full
);
9981 ESTAT_ADD(ring_set_send_prod_index
);
9982 ESTAT_ADD(ring_status_update
);
9983 ESTAT_ADD(nic_irqs
);
9984 ESTAT_ADD(nic_avoided_irqs
);
9985 ESTAT_ADD(nic_tx_threshold_hit
);
9987 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9992 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9993 struct rtnl_link_stats64
*stats
)
9995 struct tg3
*tp
= netdev_priv(dev
);
9996 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9997 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10002 stats
->rx_packets
= old_stats
->rx_packets
+
10003 get_stat64(&hw_stats
->rx_ucast_packets
) +
10004 get_stat64(&hw_stats
->rx_mcast_packets
) +
10005 get_stat64(&hw_stats
->rx_bcast_packets
);
10007 stats
->tx_packets
= old_stats
->tx_packets
+
10008 get_stat64(&hw_stats
->tx_ucast_packets
) +
10009 get_stat64(&hw_stats
->tx_mcast_packets
) +
10010 get_stat64(&hw_stats
->tx_bcast_packets
);
10012 stats
->rx_bytes
= old_stats
->rx_bytes
+
10013 get_stat64(&hw_stats
->rx_octets
);
10014 stats
->tx_bytes
= old_stats
->tx_bytes
+
10015 get_stat64(&hw_stats
->tx_octets
);
10017 stats
->rx_errors
= old_stats
->rx_errors
+
10018 get_stat64(&hw_stats
->rx_errors
);
10019 stats
->tx_errors
= old_stats
->tx_errors
+
10020 get_stat64(&hw_stats
->tx_errors
) +
10021 get_stat64(&hw_stats
->tx_mac_errors
) +
10022 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
10023 get_stat64(&hw_stats
->tx_discards
);
10025 stats
->multicast
= old_stats
->multicast
+
10026 get_stat64(&hw_stats
->rx_mcast_packets
);
10027 stats
->collisions
= old_stats
->collisions
+
10028 get_stat64(&hw_stats
->tx_collisions
);
10030 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10031 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10032 get_stat64(&hw_stats
->rx_undersize_packets
);
10034 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10035 get_stat64(&hw_stats
->rxbds_empty
);
10036 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10037 get_stat64(&hw_stats
->rx_align_errors
);
10038 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10039 get_stat64(&hw_stats
->tx_discards
);
10040 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10041 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10043 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10044 calc_crc_errors(tp
);
10046 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10047 get_stat64(&hw_stats
->rx_discards
);
10049 stats
->rx_dropped
= tp
->rx_dropped
;
10050 stats
->tx_dropped
= tp
->tx_dropped
;
10055 static inline u32
calc_crc(unsigned char *buf
, int len
)
10063 for (j
= 0; j
< len
; j
++) {
10066 for (k
= 0; k
< 8; k
++) {
10079 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
10081 /* accept or reject all multicast frames */
10082 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
10083 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
10084 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
10085 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
10088 static void __tg3_set_rx_mode(struct net_device
*dev
)
10090 struct tg3
*tp
= netdev_priv(dev
);
10093 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
10094 RX_MODE_KEEP_VLAN_TAG
);
10096 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10097 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10100 if (!tg3_flag(tp
, ENABLE_ASF
))
10101 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
10104 if (dev
->flags
& IFF_PROMISC
) {
10105 /* Promiscuous mode. */
10106 rx_mode
|= RX_MODE_PROMISC
;
10107 } else if (dev
->flags
& IFF_ALLMULTI
) {
10108 /* Accept all multicast. */
10109 tg3_set_multi(tp
, 1);
10110 } else if (netdev_mc_empty(dev
)) {
10111 /* Reject all multicast. */
10112 tg3_set_multi(tp
, 0);
10114 /* Accept one or more multicast(s). */
10115 struct netdev_hw_addr
*ha
;
10116 u32 mc_filter
[4] = { 0, };
10121 netdev_for_each_mc_addr(ha
, dev
) {
10122 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
10124 regidx
= (bit
& 0x60) >> 5;
10126 mc_filter
[regidx
] |= (1 << bit
);
10129 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
10130 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
10131 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
10132 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
10135 if (rx_mode
!= tp
->rx_mode
) {
10136 tp
->rx_mode
= rx_mode
;
10137 tw32_f(MAC_RX_MODE
, rx_mode
);
10142 static void tg3_set_rx_mode(struct net_device
*dev
)
10144 struct tg3
*tp
= netdev_priv(dev
);
10146 if (!netif_running(dev
))
10149 tg3_full_lock(tp
, 0);
10150 __tg3_set_rx_mode(dev
);
10151 tg3_full_unlock(tp
);
10154 static int tg3_get_regs_len(struct net_device
*dev
)
10156 return TG3_REG_BLK_SIZE
;
10159 static void tg3_get_regs(struct net_device
*dev
,
10160 struct ethtool_regs
*regs
, void *_p
)
10162 struct tg3
*tp
= netdev_priv(dev
);
10166 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10168 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10171 tg3_full_lock(tp
, 0);
10173 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10175 tg3_full_unlock(tp
);
10178 static int tg3_get_eeprom_len(struct net_device
*dev
)
10180 struct tg3
*tp
= netdev_priv(dev
);
10182 return tp
->nvram_size
;
10185 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10187 struct tg3
*tp
= netdev_priv(dev
);
10190 u32 i
, offset
, len
, b_offset
, b_count
;
10193 if (tg3_flag(tp
, NO_NVRAM
))
10196 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10199 offset
= eeprom
->offset
;
10203 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10206 /* adjustments to start on required 4 byte boundary */
10207 b_offset
= offset
& 3;
10208 b_count
= 4 - b_offset
;
10209 if (b_count
> len
) {
10210 /* i.e. offset=1 len=2 */
10213 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10216 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10219 eeprom
->len
+= b_count
;
10222 /* read bytes up to the last 4 byte boundary */
10223 pd
= &data
[eeprom
->len
];
10224 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10225 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10230 memcpy(pd
+ i
, &val
, 4);
10235 /* read last bytes not ending on 4 byte boundary */
10236 pd
= &data
[eeprom
->len
];
10238 b_offset
= offset
+ len
- b_count
;
10239 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10242 memcpy(pd
, &val
, b_count
);
10243 eeprom
->len
+= b_count
;
10248 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10250 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10252 struct tg3
*tp
= netdev_priv(dev
);
10254 u32 offset
, len
, b_offset
, odd_len
;
10258 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10261 if (tg3_flag(tp
, NO_NVRAM
) ||
10262 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10265 offset
= eeprom
->offset
;
10268 if ((b_offset
= (offset
& 3))) {
10269 /* adjustments to start on required 4 byte boundary */
10270 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10281 /* adjustments to end on required 4 byte boundary */
10283 len
= (len
+ 3) & ~3;
10284 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10290 if (b_offset
|| odd_len
) {
10291 buf
= kmalloc(len
, GFP_KERNEL
);
10295 memcpy(buf
, &start
, 4);
10297 memcpy(buf
+len
-4, &end
, 4);
10298 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10301 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10309 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10311 struct tg3
*tp
= netdev_priv(dev
);
10313 if (tg3_flag(tp
, USE_PHYLIB
)) {
10314 struct phy_device
*phydev
;
10315 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10317 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10318 return phy_ethtool_gset(phydev
, cmd
);
10321 cmd
->supported
= (SUPPORTED_Autoneg
);
10323 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10324 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10325 SUPPORTED_1000baseT_Full
);
10327 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10328 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10329 SUPPORTED_100baseT_Full
|
10330 SUPPORTED_10baseT_Half
|
10331 SUPPORTED_10baseT_Full
|
10333 cmd
->port
= PORT_TP
;
10335 cmd
->supported
|= SUPPORTED_FIBRE
;
10336 cmd
->port
= PORT_FIBRE
;
10339 cmd
->advertising
= tp
->link_config
.advertising
;
10340 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10341 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10342 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10343 cmd
->advertising
|= ADVERTISED_Pause
;
10345 cmd
->advertising
|= ADVERTISED_Pause
|
10346 ADVERTISED_Asym_Pause
;
10348 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10349 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10352 if (netif_running(dev
)) {
10353 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10354 cmd
->duplex
= tp
->link_config
.active_duplex
;
10356 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10357 cmd
->duplex
= DUPLEX_INVALID
;
10359 cmd
->phy_address
= tp
->phy_addr
;
10360 cmd
->transceiver
= XCVR_INTERNAL
;
10361 cmd
->autoneg
= tp
->link_config
.autoneg
;
10367 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10369 struct tg3
*tp
= netdev_priv(dev
);
10370 u32 speed
= ethtool_cmd_speed(cmd
);
10372 if (tg3_flag(tp
, USE_PHYLIB
)) {
10373 struct phy_device
*phydev
;
10374 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10376 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10377 return phy_ethtool_sset(phydev
, cmd
);
10380 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10381 cmd
->autoneg
!= AUTONEG_DISABLE
)
10384 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10385 cmd
->duplex
!= DUPLEX_FULL
&&
10386 cmd
->duplex
!= DUPLEX_HALF
)
10389 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10390 u32 mask
= ADVERTISED_Autoneg
|
10392 ADVERTISED_Asym_Pause
;
10394 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10395 mask
|= ADVERTISED_1000baseT_Half
|
10396 ADVERTISED_1000baseT_Full
;
10398 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10399 mask
|= ADVERTISED_100baseT_Half
|
10400 ADVERTISED_100baseT_Full
|
10401 ADVERTISED_10baseT_Half
|
10402 ADVERTISED_10baseT_Full
|
10405 mask
|= ADVERTISED_FIBRE
;
10407 if (cmd
->advertising
& ~mask
)
10410 mask
&= (ADVERTISED_1000baseT_Half
|
10411 ADVERTISED_1000baseT_Full
|
10412 ADVERTISED_100baseT_Half
|
10413 ADVERTISED_100baseT_Full
|
10414 ADVERTISED_10baseT_Half
|
10415 ADVERTISED_10baseT_Full
);
10417 cmd
->advertising
&= mask
;
10419 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10420 if (speed
!= SPEED_1000
)
10423 if (cmd
->duplex
!= DUPLEX_FULL
)
10426 if (speed
!= SPEED_100
&&
10432 tg3_full_lock(tp
, 0);
10434 tp
->link_config
.autoneg
= cmd
->autoneg
;
10435 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10436 tp
->link_config
.advertising
= (cmd
->advertising
|
10437 ADVERTISED_Autoneg
);
10438 tp
->link_config
.speed
= SPEED_INVALID
;
10439 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10441 tp
->link_config
.advertising
= 0;
10442 tp
->link_config
.speed
= speed
;
10443 tp
->link_config
.duplex
= cmd
->duplex
;
10446 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10447 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10448 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10450 if (netif_running(dev
))
10451 tg3_setup_phy(tp
, 1);
10453 tg3_full_unlock(tp
);
10458 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10460 struct tg3
*tp
= netdev_priv(dev
);
10462 strcpy(info
->driver
, DRV_MODULE_NAME
);
10463 strcpy(info
->version
, DRV_MODULE_VERSION
);
10464 strcpy(info
->fw_version
, tp
->fw_ver
);
10465 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10468 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10470 struct tg3
*tp
= netdev_priv(dev
);
10472 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10473 wol
->supported
= WAKE_MAGIC
;
10475 wol
->supported
= 0;
10477 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10478 wol
->wolopts
= WAKE_MAGIC
;
10479 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10482 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10484 struct tg3
*tp
= netdev_priv(dev
);
10485 struct device
*dp
= &tp
->pdev
->dev
;
10487 if (wol
->wolopts
& ~WAKE_MAGIC
)
10489 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10490 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10493 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10495 spin_lock_bh(&tp
->lock
);
10496 if (device_may_wakeup(dp
))
10497 tg3_flag_set(tp
, WOL_ENABLE
);
10499 tg3_flag_clear(tp
, WOL_ENABLE
);
10500 spin_unlock_bh(&tp
->lock
);
10505 static u32
tg3_get_msglevel(struct net_device
*dev
)
10507 struct tg3
*tp
= netdev_priv(dev
);
10508 return tp
->msg_enable
;
10511 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10513 struct tg3
*tp
= netdev_priv(dev
);
10514 tp
->msg_enable
= value
;
10517 static int tg3_nway_reset(struct net_device
*dev
)
10519 struct tg3
*tp
= netdev_priv(dev
);
10522 if (!netif_running(dev
))
10525 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10528 if (tg3_flag(tp
, USE_PHYLIB
)) {
10529 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10531 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10535 spin_lock_bh(&tp
->lock
);
10537 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10538 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10539 ((bmcr
& BMCR_ANENABLE
) ||
10540 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10541 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10545 spin_unlock_bh(&tp
->lock
);
10551 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10553 struct tg3
*tp
= netdev_priv(dev
);
10555 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10556 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10557 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10559 ering
->rx_jumbo_max_pending
= 0;
10561 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10563 ering
->rx_pending
= tp
->rx_pending
;
10564 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10565 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10567 ering
->rx_jumbo_pending
= 0;
10569 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10572 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10574 struct tg3
*tp
= netdev_priv(dev
);
10575 int i
, irq_sync
= 0, err
= 0;
10577 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10578 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10579 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10580 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10581 (tg3_flag(tp
, TSO_BUG
) &&
10582 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10585 if (netif_running(dev
)) {
10587 tg3_netif_stop(tp
);
10591 tg3_full_lock(tp
, irq_sync
);
10593 tp
->rx_pending
= ering
->rx_pending
;
10595 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10596 tp
->rx_pending
> 63)
10597 tp
->rx_pending
= 63;
10598 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10600 for (i
= 0; i
< tp
->irq_max
; i
++)
10601 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10603 if (netif_running(dev
)) {
10604 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10605 err
= tg3_restart_hw(tp
, 1);
10607 tg3_netif_start(tp
);
10610 tg3_full_unlock(tp
);
10612 if (irq_sync
&& !err
)
10618 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10620 struct tg3
*tp
= netdev_priv(dev
);
10622 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10624 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10625 epause
->rx_pause
= 1;
10627 epause
->rx_pause
= 0;
10629 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10630 epause
->tx_pause
= 1;
10632 epause
->tx_pause
= 0;
10635 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10637 struct tg3
*tp
= netdev_priv(dev
);
10640 if (tg3_flag(tp
, USE_PHYLIB
)) {
10642 struct phy_device
*phydev
;
10644 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10646 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10647 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10648 (epause
->rx_pause
!= epause
->tx_pause
)))
10651 tp
->link_config
.flowctrl
= 0;
10652 if (epause
->rx_pause
) {
10653 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10655 if (epause
->tx_pause
) {
10656 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10657 newadv
= ADVERTISED_Pause
;
10659 newadv
= ADVERTISED_Pause
|
10660 ADVERTISED_Asym_Pause
;
10661 } else if (epause
->tx_pause
) {
10662 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10663 newadv
= ADVERTISED_Asym_Pause
;
10667 if (epause
->autoneg
)
10668 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10670 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10672 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10673 u32 oldadv
= phydev
->advertising
&
10674 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10675 if (oldadv
!= newadv
) {
10676 phydev
->advertising
&=
10677 ~(ADVERTISED_Pause
|
10678 ADVERTISED_Asym_Pause
);
10679 phydev
->advertising
|= newadv
;
10680 if (phydev
->autoneg
) {
10682 * Always renegotiate the link to
10683 * inform our link partner of our
10684 * flow control settings, even if the
10685 * flow control is forced. Let
10686 * tg3_adjust_link() do the final
10687 * flow control setup.
10689 return phy_start_aneg(phydev
);
10693 if (!epause
->autoneg
)
10694 tg3_setup_flow_control(tp
, 0, 0);
10696 tp
->link_config
.orig_advertising
&=
10697 ~(ADVERTISED_Pause
|
10698 ADVERTISED_Asym_Pause
);
10699 tp
->link_config
.orig_advertising
|= newadv
;
10704 if (netif_running(dev
)) {
10705 tg3_netif_stop(tp
);
10709 tg3_full_lock(tp
, irq_sync
);
10711 if (epause
->autoneg
)
10712 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10714 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10715 if (epause
->rx_pause
)
10716 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10718 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10719 if (epause
->tx_pause
)
10720 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10722 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10724 if (netif_running(dev
)) {
10725 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10726 err
= tg3_restart_hw(tp
, 1);
10728 tg3_netif_start(tp
);
10731 tg3_full_unlock(tp
);
10737 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10741 return TG3_NUM_TEST
;
10743 return TG3_NUM_STATS
;
10745 return -EOPNOTSUPP
;
10749 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10751 switch (stringset
) {
10753 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10756 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10759 WARN_ON(1); /* we need a WARN() */
10764 static int tg3_set_phys_id(struct net_device
*dev
,
10765 enum ethtool_phys_id_state state
)
10767 struct tg3
*tp
= netdev_priv(dev
);
10769 if (!netif_running(tp
->dev
))
10773 case ETHTOOL_ID_ACTIVE
:
10774 return 1; /* cycle on/off once per second */
10776 case ETHTOOL_ID_ON
:
10777 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10778 LED_CTRL_1000MBPS_ON
|
10779 LED_CTRL_100MBPS_ON
|
10780 LED_CTRL_10MBPS_ON
|
10781 LED_CTRL_TRAFFIC_OVERRIDE
|
10782 LED_CTRL_TRAFFIC_BLINK
|
10783 LED_CTRL_TRAFFIC_LED
);
10786 case ETHTOOL_ID_OFF
:
10787 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10788 LED_CTRL_TRAFFIC_OVERRIDE
);
10791 case ETHTOOL_ID_INACTIVE
:
10792 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10799 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10800 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10802 struct tg3
*tp
= netdev_priv(dev
);
10803 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10806 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10810 u32 offset
= 0, len
= 0;
10813 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10816 if (magic
== TG3_EEPROM_MAGIC
) {
10817 for (offset
= TG3_NVM_DIR_START
;
10818 offset
< TG3_NVM_DIR_END
;
10819 offset
+= TG3_NVM_DIRENT_SIZE
) {
10820 if (tg3_nvram_read(tp
, offset
, &val
))
10823 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10824 TG3_NVM_DIRTYPE_EXTVPD
)
10828 if (offset
!= TG3_NVM_DIR_END
) {
10829 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10830 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10833 offset
= tg3_nvram_logical_addr(tp
, offset
);
10837 if (!offset
|| !len
) {
10838 offset
= TG3_NVM_VPD_OFF
;
10839 len
= TG3_NVM_VPD_LEN
;
10842 buf
= kmalloc(len
, GFP_KERNEL
);
10846 if (magic
== TG3_EEPROM_MAGIC
) {
10847 for (i
= 0; i
< len
; i
+= 4) {
10848 /* The data is in little-endian format in NVRAM.
10849 * Use the big-endian read routines to preserve
10850 * the byte order as it exists in NVRAM.
10852 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10858 unsigned int pos
= 0;
10860 ptr
= (u8
*)&buf
[0];
10861 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10862 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10864 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10882 #define NVRAM_TEST_SIZE 0x100
10883 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10884 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10885 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10886 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10887 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10888 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10889 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10890 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10892 static int tg3_test_nvram(struct tg3
*tp
)
10894 u32 csum
, magic
, len
;
10896 int i
, j
, k
, err
= 0, size
;
10898 if (tg3_flag(tp
, NO_NVRAM
))
10901 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10904 if (magic
== TG3_EEPROM_MAGIC
)
10905 size
= NVRAM_TEST_SIZE
;
10906 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10907 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10908 TG3_EEPROM_SB_FORMAT_1
) {
10909 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10910 case TG3_EEPROM_SB_REVISION_0
:
10911 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10913 case TG3_EEPROM_SB_REVISION_2
:
10914 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10916 case TG3_EEPROM_SB_REVISION_3
:
10917 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10919 case TG3_EEPROM_SB_REVISION_4
:
10920 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10922 case TG3_EEPROM_SB_REVISION_5
:
10923 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10925 case TG3_EEPROM_SB_REVISION_6
:
10926 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10933 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10934 size
= NVRAM_SELFBOOT_HW_SIZE
;
10938 buf
= kmalloc(size
, GFP_KERNEL
);
10943 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10944 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10951 /* Selfboot format */
10952 magic
= be32_to_cpu(buf
[0]);
10953 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10954 TG3_EEPROM_MAGIC_FW
) {
10955 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10957 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10958 TG3_EEPROM_SB_REVISION_2
) {
10959 /* For rev 2, the csum doesn't include the MBA. */
10960 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10962 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10965 for (i
= 0; i
< size
; i
++)
10978 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10979 TG3_EEPROM_MAGIC_HW
) {
10980 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10981 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10982 u8
*buf8
= (u8
*) buf
;
10984 /* Separate the parity bits and the data bytes. */
10985 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10986 if ((i
== 0) || (i
== 8)) {
10990 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10991 parity
[k
++] = buf8
[i
] & msk
;
10993 } else if (i
== 16) {
10997 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10998 parity
[k
++] = buf8
[i
] & msk
;
11001 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11002 parity
[k
++] = buf8
[i
] & msk
;
11005 data
[j
++] = buf8
[i
];
11009 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11010 u8 hw8
= hweight8(data
[i
]);
11012 if ((hw8
& 0x1) && parity
[i
])
11014 else if (!(hw8
& 0x1) && !parity
[i
])
11023 /* Bootstrap checksum at offset 0x10 */
11024 csum
= calc_crc((unsigned char *) buf
, 0x10);
11025 if (csum
!= le32_to_cpu(buf
[0x10/4]))
11028 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11029 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
11030 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11035 buf
= tg3_vpd_readblock(tp
, &len
);
11039 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11041 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11045 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11048 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11049 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11050 PCI_VPD_RO_KEYWORD_CHKSUM
);
11054 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11056 for (i
= 0; i
<= j
; i
++)
11057 csum8
+= ((u8
*)buf
)[i
];
11071 #define TG3_SERDES_TIMEOUT_SEC 2
11072 #define TG3_COPPER_TIMEOUT_SEC 6
11074 static int tg3_test_link(struct tg3
*tp
)
11078 if (!netif_running(tp
->dev
))
11081 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11082 max
= TG3_SERDES_TIMEOUT_SEC
;
11084 max
= TG3_COPPER_TIMEOUT_SEC
;
11086 for (i
= 0; i
< max
; i
++) {
11087 if (netif_carrier_ok(tp
->dev
))
11090 if (msleep_interruptible(1000))
11097 /* Only test the commonly used registers */
11098 static int tg3_test_registers(struct tg3
*tp
)
11100 int i
, is_5705
, is_5750
;
11101 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11105 #define TG3_FL_5705 0x1
11106 #define TG3_FL_NOT_5705 0x2
11107 #define TG3_FL_NOT_5788 0x4
11108 #define TG3_FL_NOT_5750 0x8
11112 /* MAC Control Registers */
11113 { MAC_MODE
, TG3_FL_NOT_5705
,
11114 0x00000000, 0x00ef6f8c },
11115 { MAC_MODE
, TG3_FL_5705
,
11116 0x00000000, 0x01ef6b8c },
11117 { MAC_STATUS
, TG3_FL_NOT_5705
,
11118 0x03800107, 0x00000000 },
11119 { MAC_STATUS
, TG3_FL_5705
,
11120 0x03800100, 0x00000000 },
11121 { MAC_ADDR_0_HIGH
, 0x0000,
11122 0x00000000, 0x0000ffff },
11123 { MAC_ADDR_0_LOW
, 0x0000,
11124 0x00000000, 0xffffffff },
11125 { MAC_RX_MTU_SIZE
, 0x0000,
11126 0x00000000, 0x0000ffff },
11127 { MAC_TX_MODE
, 0x0000,
11128 0x00000000, 0x00000070 },
11129 { MAC_TX_LENGTHS
, 0x0000,
11130 0x00000000, 0x00003fff },
11131 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11132 0x00000000, 0x000007fc },
11133 { MAC_RX_MODE
, TG3_FL_5705
,
11134 0x00000000, 0x000007dc },
11135 { MAC_HASH_REG_0
, 0x0000,
11136 0x00000000, 0xffffffff },
11137 { MAC_HASH_REG_1
, 0x0000,
11138 0x00000000, 0xffffffff },
11139 { MAC_HASH_REG_2
, 0x0000,
11140 0x00000000, 0xffffffff },
11141 { MAC_HASH_REG_3
, 0x0000,
11142 0x00000000, 0xffffffff },
11144 /* Receive Data and Receive BD Initiator Control Registers. */
11145 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11146 0x00000000, 0xffffffff },
11147 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11148 0x00000000, 0xffffffff },
11149 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11150 0x00000000, 0x00000003 },
11151 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11152 0x00000000, 0xffffffff },
11153 { RCVDBDI_STD_BD
+0, 0x0000,
11154 0x00000000, 0xffffffff },
11155 { RCVDBDI_STD_BD
+4, 0x0000,
11156 0x00000000, 0xffffffff },
11157 { RCVDBDI_STD_BD
+8, 0x0000,
11158 0x00000000, 0xffff0002 },
11159 { RCVDBDI_STD_BD
+0xc, 0x0000,
11160 0x00000000, 0xffffffff },
11162 /* Receive BD Initiator Control Registers. */
11163 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11164 0x00000000, 0xffffffff },
11165 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11166 0x00000000, 0x000003ff },
11167 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11168 0x00000000, 0xffffffff },
11170 /* Host Coalescing Control Registers. */
11171 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11172 0x00000000, 0x00000004 },
11173 { HOSTCC_MODE
, TG3_FL_5705
,
11174 0x00000000, 0x000000f6 },
11175 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11176 0x00000000, 0xffffffff },
11177 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11178 0x00000000, 0x000003ff },
11179 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11180 0x00000000, 0xffffffff },
11181 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11182 0x00000000, 0x000003ff },
11183 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11184 0x00000000, 0xffffffff },
11185 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11186 0x00000000, 0x000000ff },
11187 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11188 0x00000000, 0xffffffff },
11189 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11190 0x00000000, 0x000000ff },
11191 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11192 0x00000000, 0xffffffff },
11193 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11194 0x00000000, 0xffffffff },
11195 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11196 0x00000000, 0xffffffff },
11197 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11198 0x00000000, 0x000000ff },
11199 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11200 0x00000000, 0xffffffff },
11201 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11202 0x00000000, 0x000000ff },
11203 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11204 0x00000000, 0xffffffff },
11205 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11206 0x00000000, 0xffffffff },
11207 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11208 0x00000000, 0xffffffff },
11209 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11210 0x00000000, 0xffffffff },
11211 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11212 0x00000000, 0xffffffff },
11213 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11214 0xffffffff, 0x00000000 },
11215 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11216 0xffffffff, 0x00000000 },
11218 /* Buffer Manager Control Registers. */
11219 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11220 0x00000000, 0x007fff80 },
11221 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11222 0x00000000, 0x007fffff },
11223 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11224 0x00000000, 0x0000003f },
11225 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11226 0x00000000, 0x000001ff },
11227 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11228 0x00000000, 0x000001ff },
11229 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11230 0xffffffff, 0x00000000 },
11231 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11232 0xffffffff, 0x00000000 },
11234 /* Mailbox Registers */
11235 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11236 0x00000000, 0x000001ff },
11237 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11238 0x00000000, 0x000001ff },
11239 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11240 0x00000000, 0x000007ff },
11241 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11242 0x00000000, 0x000001ff },
11244 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11247 is_5705
= is_5750
= 0;
11248 if (tg3_flag(tp
, 5705_PLUS
)) {
11250 if (tg3_flag(tp
, 5750_PLUS
))
11254 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11255 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11258 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11261 if (tg3_flag(tp
, IS_5788
) &&
11262 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11265 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11268 offset
= (u32
) reg_tbl
[i
].offset
;
11269 read_mask
= reg_tbl
[i
].read_mask
;
11270 write_mask
= reg_tbl
[i
].write_mask
;
11272 /* Save the original register content */
11273 save_val
= tr32(offset
);
11275 /* Determine the read-only value. */
11276 read_val
= save_val
& read_mask
;
11278 /* Write zero to the register, then make sure the read-only bits
11279 * are not changed and the read/write bits are all zeros.
11283 val
= tr32(offset
);
11285 /* Test the read-only and read/write bits. */
11286 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11289 /* Write ones to all the bits defined by RdMask and WrMask, then
11290 * make sure the read-only bits are not changed and the
11291 * read/write bits are all ones.
11293 tw32(offset
, read_mask
| write_mask
);
11295 val
= tr32(offset
);
11297 /* Test the read-only bits. */
11298 if ((val
& read_mask
) != read_val
)
11301 /* Test the read/write bits. */
11302 if ((val
& write_mask
) != write_mask
)
11305 tw32(offset
, save_val
);
11311 if (netif_msg_hw(tp
))
11312 netdev_err(tp
->dev
,
11313 "Register test failed at offset %x\n", offset
);
11314 tw32(offset
, save_val
);
11318 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11320 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11324 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11325 for (j
= 0; j
< len
; j
+= 4) {
11328 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11329 tg3_read_mem(tp
, offset
+ j
, &val
);
11330 if (val
!= test_pattern
[i
])
11337 static int tg3_test_memory(struct tg3
*tp
)
11339 static struct mem_entry
{
11342 } mem_tbl_570x
[] = {
11343 { 0x00000000, 0x00b50},
11344 { 0x00002000, 0x1c000},
11345 { 0xffffffff, 0x00000}
11346 }, mem_tbl_5705
[] = {
11347 { 0x00000100, 0x0000c},
11348 { 0x00000200, 0x00008},
11349 { 0x00004000, 0x00800},
11350 { 0x00006000, 0x01000},
11351 { 0x00008000, 0x02000},
11352 { 0x00010000, 0x0e000},
11353 { 0xffffffff, 0x00000}
11354 }, mem_tbl_5755
[] = {
11355 { 0x00000200, 0x00008},
11356 { 0x00004000, 0x00800},
11357 { 0x00006000, 0x00800},
11358 { 0x00008000, 0x02000},
11359 { 0x00010000, 0x0c000},
11360 { 0xffffffff, 0x00000}
11361 }, mem_tbl_5906
[] = {
11362 { 0x00000200, 0x00008},
11363 { 0x00004000, 0x00400},
11364 { 0x00006000, 0x00400},
11365 { 0x00008000, 0x01000},
11366 { 0x00010000, 0x01000},
11367 { 0xffffffff, 0x00000}
11368 }, mem_tbl_5717
[] = {
11369 { 0x00000200, 0x00008},
11370 { 0x00010000, 0x0a000},
11371 { 0x00020000, 0x13c00},
11372 { 0xffffffff, 0x00000}
11373 }, mem_tbl_57765
[] = {
11374 { 0x00000200, 0x00008},
11375 { 0x00004000, 0x00800},
11376 { 0x00006000, 0x09800},
11377 { 0x00010000, 0x0a000},
11378 { 0xffffffff, 0x00000}
11380 struct mem_entry
*mem_tbl
;
11384 if (tg3_flag(tp
, 5717_PLUS
))
11385 mem_tbl
= mem_tbl_5717
;
11386 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11387 mem_tbl
= mem_tbl_57765
;
11388 else if (tg3_flag(tp
, 5755_PLUS
))
11389 mem_tbl
= mem_tbl_5755
;
11390 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11391 mem_tbl
= mem_tbl_5906
;
11392 else if (tg3_flag(tp
, 5705_PLUS
))
11393 mem_tbl
= mem_tbl_5705
;
11395 mem_tbl
= mem_tbl_570x
;
11397 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11398 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11406 #define TG3_TSO_MSS 500
11408 #define TG3_TSO_IP_HDR_LEN 20
11409 #define TG3_TSO_TCP_HDR_LEN 20
11410 #define TG3_TSO_TCP_OPT_LEN 12
11412 static const u8 tg3_tso_header
[] = {
11414 0x45, 0x00, 0x00, 0x00,
11415 0x00, 0x00, 0x40, 0x00,
11416 0x40, 0x06, 0x00, 0x00,
11417 0x0a, 0x00, 0x00, 0x01,
11418 0x0a, 0x00, 0x00, 0x02,
11419 0x0d, 0x00, 0xe0, 0x00,
11420 0x00, 0x00, 0x01, 0x00,
11421 0x00, 0x00, 0x02, 0x00,
11422 0x80, 0x10, 0x10, 0x00,
11423 0x14, 0x09, 0x00, 0x00,
11424 0x01, 0x01, 0x08, 0x0a,
11425 0x11, 0x11, 0x11, 0x11,
11426 0x11, 0x11, 0x11, 0x11,
11429 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11431 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11432 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11434 struct sk_buff
*skb
, *rx_skb
;
11437 int num_pkts
, tx_len
, rx_len
, i
, err
;
11438 struct tg3_rx_buffer_desc
*desc
;
11439 struct tg3_napi
*tnapi
, *rnapi
;
11440 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11442 tnapi
= &tp
->napi
[0];
11443 rnapi
= &tp
->napi
[0];
11444 if (tp
->irq_cnt
> 1) {
11445 if (tg3_flag(tp
, ENABLE_RSS
))
11446 rnapi
= &tp
->napi
[1];
11447 if (tg3_flag(tp
, ENABLE_TSS
))
11448 tnapi
= &tp
->napi
[1];
11450 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11455 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11459 tx_data
= skb_put(skb
, tx_len
);
11460 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11461 memset(tx_data
+ 6, 0x0, 8);
11463 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11465 if (tso_loopback
) {
11466 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11468 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11469 TG3_TSO_TCP_OPT_LEN
;
11471 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11472 sizeof(tg3_tso_header
));
11475 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11476 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11478 /* Set the total length field in the IP header */
11479 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11481 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11482 TXD_FLAG_CPU_POST_DMA
);
11484 if (tg3_flag(tp
, HW_TSO_1
) ||
11485 tg3_flag(tp
, HW_TSO_2
) ||
11486 tg3_flag(tp
, HW_TSO_3
)) {
11488 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11489 th
= (struct tcphdr
*)&tx_data
[val
];
11492 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11494 if (tg3_flag(tp
, HW_TSO_3
)) {
11495 mss
|= (hdr_len
& 0xc) << 12;
11496 if (hdr_len
& 0x10)
11497 base_flags
|= 0x00000010;
11498 base_flags
|= (hdr_len
& 0x3e0) << 5;
11499 } else if (tg3_flag(tp
, HW_TSO_2
))
11500 mss
|= hdr_len
<< 9;
11501 else if (tg3_flag(tp
, HW_TSO_1
) ||
11502 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11503 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11505 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11508 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11511 data_off
= ETH_HLEN
;
11514 for (i
= data_off
; i
< tx_len
; i
++)
11515 tx_data
[i
] = (u8
) (i
& 0xff);
11517 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11518 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11519 dev_kfree_skb(skb
);
11523 val
= tnapi
->tx_prod
;
11524 tnapi
->tx_buffers
[val
].skb
= skb
;
11525 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11527 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11532 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11534 budget
= tg3_tx_avail(tnapi
);
11535 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11536 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11537 tnapi
->tx_buffers
[val
].skb
= NULL
;
11538 dev_kfree_skb(skb
);
11544 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11545 tr32_mailbox(tnapi
->prodmbox
);
11549 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11550 for (i
= 0; i
< 35; i
++) {
11551 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11556 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11557 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11558 if ((tx_idx
== tnapi
->tx_prod
) &&
11559 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11563 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
11564 dev_kfree_skb(skb
);
11566 if (tx_idx
!= tnapi
->tx_prod
)
11569 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11573 while (rx_idx
!= rx_start_idx
) {
11574 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11575 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11576 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11578 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11579 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11582 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11585 if (!tso_loopback
) {
11586 if (rx_len
!= tx_len
)
11589 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11590 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11593 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11596 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11597 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11598 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11602 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11603 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11604 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11606 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11607 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11608 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11613 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11614 PCI_DMA_FROMDEVICE
);
11616 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11617 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11624 /* tg3_free_rings will unmap and free the rx_skb */
11629 #define TG3_STD_LOOPBACK_FAILED 1
11630 #define TG3_JMB_LOOPBACK_FAILED 2
11631 #define TG3_TSO_LOOPBACK_FAILED 4
11632 #define TG3_LOOPBACK_FAILED \
11633 (TG3_STD_LOOPBACK_FAILED | \
11634 TG3_JMB_LOOPBACK_FAILED | \
11635 TG3_TSO_LOOPBACK_FAILED)
11637 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
11642 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11643 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11645 if (!netif_running(tp
->dev
)) {
11646 data
[0] = TG3_LOOPBACK_FAILED
;
11647 data
[1] = TG3_LOOPBACK_FAILED
;
11649 data
[2] = TG3_LOOPBACK_FAILED
;
11653 err
= tg3_reset_hw(tp
, 1);
11655 data
[0] = TG3_LOOPBACK_FAILED
;
11656 data
[1] = TG3_LOOPBACK_FAILED
;
11658 data
[2] = TG3_LOOPBACK_FAILED
;
11662 if (tg3_flag(tp
, ENABLE_RSS
)) {
11665 /* Reroute all rx packets to the 1st queue */
11666 for (i
= MAC_RSS_INDIR_TBL_0
;
11667 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11671 /* HW errata - mac loopback fails in some cases on 5780.
11672 * Normal traffic and PHY loopback are not affected by
11673 * errata. Also, the MAC loopback test is deprecated for
11674 * all newer ASIC revisions.
11676 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
11677 !tg3_flag(tp
, CPMU_PRESENT
)) {
11678 tg3_mac_loopback(tp
, true);
11680 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11681 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
11683 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11684 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11685 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
11687 tg3_mac_loopback(tp
, false);
11690 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11691 !tg3_flag(tp
, USE_PHYLIB
)) {
11694 tg3_phy_lpbk_set(tp
, 0, false);
11696 /* Wait for link */
11697 for (i
= 0; i
< 100; i
++) {
11698 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11703 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11704 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
11705 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11706 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11707 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
11708 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11709 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11710 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
11713 tg3_phy_lpbk_set(tp
, 0, true);
11715 /* All link indications report up, but the hardware
11716 * isn't really ready for about 20 msec. Double it
11721 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11722 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
11723 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11724 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11725 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
11726 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11727 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11728 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
11731 /* Re-enable gphy autopowerdown. */
11732 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11733 tg3_phy_toggle_apd(tp
, true);
11736 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
11739 tp
->phy_flags
|= eee_cap
;
11744 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11747 struct tg3
*tp
= netdev_priv(dev
);
11748 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
11750 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11751 tg3_power_up(tp
)) {
11752 etest
->flags
|= ETH_TEST_FL_FAILED
;
11753 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11757 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11759 if (tg3_test_nvram(tp
) != 0) {
11760 etest
->flags
|= ETH_TEST_FL_FAILED
;
11763 if (!doextlpbk
&& tg3_test_link(tp
)) {
11764 etest
->flags
|= ETH_TEST_FL_FAILED
;
11767 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11768 int err
, err2
= 0, irq_sync
= 0;
11770 if (netif_running(dev
)) {
11772 tg3_netif_stop(tp
);
11776 tg3_full_lock(tp
, irq_sync
);
11778 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11779 err
= tg3_nvram_lock(tp
);
11780 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11781 if (!tg3_flag(tp
, 5705_PLUS
))
11782 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11784 tg3_nvram_unlock(tp
);
11786 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11789 if (tg3_test_registers(tp
) != 0) {
11790 etest
->flags
|= ETH_TEST_FL_FAILED
;
11794 if (tg3_test_memory(tp
) != 0) {
11795 etest
->flags
|= ETH_TEST_FL_FAILED
;
11800 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
11802 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
11803 etest
->flags
|= ETH_TEST_FL_FAILED
;
11805 tg3_full_unlock(tp
);
11807 if (tg3_test_interrupt(tp
) != 0) {
11808 etest
->flags
|= ETH_TEST_FL_FAILED
;
11812 tg3_full_lock(tp
, 0);
11814 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11815 if (netif_running(dev
)) {
11816 tg3_flag_set(tp
, INIT_COMPLETE
);
11817 err2
= tg3_restart_hw(tp
, 1);
11819 tg3_netif_start(tp
);
11822 tg3_full_unlock(tp
);
11824 if (irq_sync
&& !err2
)
11827 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11828 tg3_power_down(tp
);
11832 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11834 struct mii_ioctl_data
*data
= if_mii(ifr
);
11835 struct tg3
*tp
= netdev_priv(dev
);
11838 if (tg3_flag(tp
, USE_PHYLIB
)) {
11839 struct phy_device
*phydev
;
11840 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11842 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11843 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11848 data
->phy_id
= tp
->phy_addr
;
11851 case SIOCGMIIREG
: {
11854 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11855 break; /* We have no PHY */
11857 if (!netif_running(dev
))
11860 spin_lock_bh(&tp
->lock
);
11861 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11862 spin_unlock_bh(&tp
->lock
);
11864 data
->val_out
= mii_regval
;
11870 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11871 break; /* We have no PHY */
11873 if (!netif_running(dev
))
11876 spin_lock_bh(&tp
->lock
);
11877 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11878 spin_unlock_bh(&tp
->lock
);
11886 return -EOPNOTSUPP
;
11889 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11891 struct tg3
*tp
= netdev_priv(dev
);
11893 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11897 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11899 struct tg3
*tp
= netdev_priv(dev
);
11900 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11901 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11903 if (!tg3_flag(tp
, 5705_PLUS
)) {
11904 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11905 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11906 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11907 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11910 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11911 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11912 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11913 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11914 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11915 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11916 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11917 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11918 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11919 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11922 /* No rx interrupts will be generated if both are zero */
11923 if ((ec
->rx_coalesce_usecs
== 0) &&
11924 (ec
->rx_max_coalesced_frames
== 0))
11927 /* No tx interrupts will be generated if both are zero */
11928 if ((ec
->tx_coalesce_usecs
== 0) &&
11929 (ec
->tx_max_coalesced_frames
== 0))
11932 /* Only copy relevant parameters, ignore all others. */
11933 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11934 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11935 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11936 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11937 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11938 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11939 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11940 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11941 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11943 if (netif_running(dev
)) {
11944 tg3_full_lock(tp
, 0);
11945 __tg3_set_coalesce(tp
, &tp
->coal
);
11946 tg3_full_unlock(tp
);
11951 static const struct ethtool_ops tg3_ethtool_ops
= {
11952 .get_settings
= tg3_get_settings
,
11953 .set_settings
= tg3_set_settings
,
11954 .get_drvinfo
= tg3_get_drvinfo
,
11955 .get_regs_len
= tg3_get_regs_len
,
11956 .get_regs
= tg3_get_regs
,
11957 .get_wol
= tg3_get_wol
,
11958 .set_wol
= tg3_set_wol
,
11959 .get_msglevel
= tg3_get_msglevel
,
11960 .set_msglevel
= tg3_set_msglevel
,
11961 .nway_reset
= tg3_nway_reset
,
11962 .get_link
= ethtool_op_get_link
,
11963 .get_eeprom_len
= tg3_get_eeprom_len
,
11964 .get_eeprom
= tg3_get_eeprom
,
11965 .set_eeprom
= tg3_set_eeprom
,
11966 .get_ringparam
= tg3_get_ringparam
,
11967 .set_ringparam
= tg3_set_ringparam
,
11968 .get_pauseparam
= tg3_get_pauseparam
,
11969 .set_pauseparam
= tg3_set_pauseparam
,
11970 .self_test
= tg3_self_test
,
11971 .get_strings
= tg3_get_strings
,
11972 .set_phys_id
= tg3_set_phys_id
,
11973 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11974 .get_coalesce
= tg3_get_coalesce
,
11975 .set_coalesce
= tg3_set_coalesce
,
11976 .get_sset_count
= tg3_get_sset_count
,
11979 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11981 u32 cursize
, val
, magic
;
11983 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11985 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11988 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11989 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11990 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11994 * Size the chip by reading offsets at increasing powers of two.
11995 * When we encounter our validation signature, we know the addressing
11996 * has wrapped around, and thus have our chip size.
12000 while (cursize
< tp
->nvram_size
) {
12001 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
12010 tp
->nvram_size
= cursize
;
12013 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
12017 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
12020 /* Selfboot format */
12021 if (val
!= TG3_EEPROM_MAGIC
) {
12022 tg3_get_eeprom_size(tp
);
12026 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
12028 /* This is confusing. We want to operate on the
12029 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12030 * call will read from NVRAM and byteswap the data
12031 * according to the byteswapping settings for all
12032 * other register accesses. This ensures the data we
12033 * want will always reside in the lower 16-bits.
12034 * However, the data in NVRAM is in LE format, which
12035 * means the data from the NVRAM read will always be
12036 * opposite the endianness of the CPU. The 16-bit
12037 * byteswap then brings the data to CPU endianness.
12039 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12043 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12046 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12050 nvcfg1
= tr32(NVRAM_CFG1
);
12051 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12052 tg3_flag_set(tp
, FLASH
);
12054 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12055 tw32(NVRAM_CFG1
, nvcfg1
);
12058 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12059 tg3_flag(tp
, 5780_CLASS
)) {
12060 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12061 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12062 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12063 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12064 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12066 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12067 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12068 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12070 case FLASH_VENDOR_ATMEL_EEPROM
:
12071 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12072 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12073 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12075 case FLASH_VENDOR_ST
:
12076 tp
->nvram_jedecnum
= JEDEC_ST
;
12077 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12078 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12080 case FLASH_VENDOR_SAIFUN
:
12081 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12082 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12084 case FLASH_VENDOR_SST_SMALL
:
12085 case FLASH_VENDOR_SST_LARGE
:
12086 tp
->nvram_jedecnum
= JEDEC_SST
;
12087 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12091 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12092 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12093 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12097 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12099 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12100 case FLASH_5752PAGE_SIZE_256
:
12101 tp
->nvram_pagesize
= 256;
12103 case FLASH_5752PAGE_SIZE_512
:
12104 tp
->nvram_pagesize
= 512;
12106 case FLASH_5752PAGE_SIZE_1K
:
12107 tp
->nvram_pagesize
= 1024;
12109 case FLASH_5752PAGE_SIZE_2K
:
12110 tp
->nvram_pagesize
= 2048;
12112 case FLASH_5752PAGE_SIZE_4K
:
12113 tp
->nvram_pagesize
= 4096;
12115 case FLASH_5752PAGE_SIZE_264
:
12116 tp
->nvram_pagesize
= 264;
12118 case FLASH_5752PAGE_SIZE_528
:
12119 tp
->nvram_pagesize
= 528;
12124 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12128 nvcfg1
= tr32(NVRAM_CFG1
);
12130 /* NVRAM protection for TPM */
12131 if (nvcfg1
& (1 << 27))
12132 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12134 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12135 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12136 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12137 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12138 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12140 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12141 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12142 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12143 tg3_flag_set(tp
, FLASH
);
12145 case FLASH_5752VENDOR_ST_M45PE10
:
12146 case FLASH_5752VENDOR_ST_M45PE20
:
12147 case FLASH_5752VENDOR_ST_M45PE40
:
12148 tp
->nvram_jedecnum
= JEDEC_ST
;
12149 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12150 tg3_flag_set(tp
, FLASH
);
12154 if (tg3_flag(tp
, FLASH
)) {
12155 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12157 /* For eeprom, set pagesize to maximum eeprom size */
12158 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12160 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12161 tw32(NVRAM_CFG1
, nvcfg1
);
12165 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12167 u32 nvcfg1
, protect
= 0;
12169 nvcfg1
= tr32(NVRAM_CFG1
);
12171 /* NVRAM protection for TPM */
12172 if (nvcfg1
& (1 << 27)) {
12173 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12177 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12179 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12180 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12181 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12182 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12183 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12184 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12185 tg3_flag_set(tp
, FLASH
);
12186 tp
->nvram_pagesize
= 264;
12187 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12188 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12189 tp
->nvram_size
= (protect
? 0x3e200 :
12190 TG3_NVRAM_SIZE_512KB
);
12191 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12192 tp
->nvram_size
= (protect
? 0x1f200 :
12193 TG3_NVRAM_SIZE_256KB
);
12195 tp
->nvram_size
= (protect
? 0x1f200 :
12196 TG3_NVRAM_SIZE_128KB
);
12198 case FLASH_5752VENDOR_ST_M45PE10
:
12199 case FLASH_5752VENDOR_ST_M45PE20
:
12200 case FLASH_5752VENDOR_ST_M45PE40
:
12201 tp
->nvram_jedecnum
= JEDEC_ST
;
12202 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12203 tg3_flag_set(tp
, FLASH
);
12204 tp
->nvram_pagesize
= 256;
12205 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12206 tp
->nvram_size
= (protect
?
12207 TG3_NVRAM_SIZE_64KB
:
12208 TG3_NVRAM_SIZE_128KB
);
12209 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12210 tp
->nvram_size
= (protect
?
12211 TG3_NVRAM_SIZE_64KB
:
12212 TG3_NVRAM_SIZE_256KB
);
12214 tp
->nvram_size
= (protect
?
12215 TG3_NVRAM_SIZE_128KB
:
12216 TG3_NVRAM_SIZE_512KB
);
12221 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12225 nvcfg1
= tr32(NVRAM_CFG1
);
12227 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12228 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12229 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12230 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12231 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12232 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12233 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12234 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12236 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12237 tw32(NVRAM_CFG1
, nvcfg1
);
12239 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12240 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12241 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12242 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12243 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12244 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12245 tg3_flag_set(tp
, FLASH
);
12246 tp
->nvram_pagesize
= 264;
12248 case FLASH_5752VENDOR_ST_M45PE10
:
12249 case FLASH_5752VENDOR_ST_M45PE20
:
12250 case FLASH_5752VENDOR_ST_M45PE40
:
12251 tp
->nvram_jedecnum
= JEDEC_ST
;
12252 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12253 tg3_flag_set(tp
, FLASH
);
12254 tp
->nvram_pagesize
= 256;
12259 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12261 u32 nvcfg1
, protect
= 0;
12263 nvcfg1
= tr32(NVRAM_CFG1
);
12265 /* NVRAM protection for TPM */
12266 if (nvcfg1
& (1 << 27)) {
12267 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12271 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12273 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12274 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12275 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12276 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12277 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12278 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12279 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12280 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12281 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12282 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12283 tg3_flag_set(tp
, FLASH
);
12284 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12285 tp
->nvram_pagesize
= 256;
12287 case FLASH_5761VENDOR_ST_A_M45PE20
:
12288 case FLASH_5761VENDOR_ST_A_M45PE40
:
12289 case FLASH_5761VENDOR_ST_A_M45PE80
:
12290 case FLASH_5761VENDOR_ST_A_M45PE16
:
12291 case FLASH_5761VENDOR_ST_M_M45PE20
:
12292 case FLASH_5761VENDOR_ST_M_M45PE40
:
12293 case FLASH_5761VENDOR_ST_M_M45PE80
:
12294 case FLASH_5761VENDOR_ST_M_M45PE16
:
12295 tp
->nvram_jedecnum
= JEDEC_ST
;
12296 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12297 tg3_flag_set(tp
, FLASH
);
12298 tp
->nvram_pagesize
= 256;
12303 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12306 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12307 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12308 case FLASH_5761VENDOR_ST_A_M45PE16
:
12309 case FLASH_5761VENDOR_ST_M_M45PE16
:
12310 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12312 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12313 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12314 case FLASH_5761VENDOR_ST_A_M45PE80
:
12315 case FLASH_5761VENDOR_ST_M_M45PE80
:
12316 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12318 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12319 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12320 case FLASH_5761VENDOR_ST_A_M45PE40
:
12321 case FLASH_5761VENDOR_ST_M_M45PE40
:
12322 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12324 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12325 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12326 case FLASH_5761VENDOR_ST_A_M45PE20
:
12327 case FLASH_5761VENDOR_ST_M_M45PE20
:
12328 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12334 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12336 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12337 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12338 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12341 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12345 nvcfg1
= tr32(NVRAM_CFG1
);
12347 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12348 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12349 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12350 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12351 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12352 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12354 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12355 tw32(NVRAM_CFG1
, nvcfg1
);
12357 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12358 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12359 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12360 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12361 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12362 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12363 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12364 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12365 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12366 tg3_flag_set(tp
, FLASH
);
12368 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12369 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12370 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12371 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12372 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12374 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12375 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12376 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12378 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12379 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12380 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12384 case FLASH_5752VENDOR_ST_M45PE10
:
12385 case FLASH_5752VENDOR_ST_M45PE20
:
12386 case FLASH_5752VENDOR_ST_M45PE40
:
12387 tp
->nvram_jedecnum
= JEDEC_ST
;
12388 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12389 tg3_flag_set(tp
, FLASH
);
12391 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12392 case FLASH_5752VENDOR_ST_M45PE10
:
12393 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12395 case FLASH_5752VENDOR_ST_M45PE20
:
12396 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12398 case FLASH_5752VENDOR_ST_M45PE40
:
12399 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12404 tg3_flag_set(tp
, NO_NVRAM
);
12408 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12409 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12410 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12414 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12418 nvcfg1
= tr32(NVRAM_CFG1
);
12420 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12421 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12422 case FLASH_5717VENDOR_MICRO_EEPROM
:
12423 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12424 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12425 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12427 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12428 tw32(NVRAM_CFG1
, nvcfg1
);
12430 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12431 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12432 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12433 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12434 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12435 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12436 case FLASH_5717VENDOR_ATMEL_45USPT
:
12437 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12438 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12439 tg3_flag_set(tp
, FLASH
);
12441 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12442 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12443 /* Detect size with tg3_nvram_get_size() */
12445 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12446 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12447 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12450 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12454 case FLASH_5717VENDOR_ST_M_M25PE10
:
12455 case FLASH_5717VENDOR_ST_A_M25PE10
:
12456 case FLASH_5717VENDOR_ST_M_M45PE10
:
12457 case FLASH_5717VENDOR_ST_A_M45PE10
:
12458 case FLASH_5717VENDOR_ST_M_M25PE20
:
12459 case FLASH_5717VENDOR_ST_A_M25PE20
:
12460 case FLASH_5717VENDOR_ST_M_M45PE20
:
12461 case FLASH_5717VENDOR_ST_A_M45PE20
:
12462 case FLASH_5717VENDOR_ST_25USPT
:
12463 case FLASH_5717VENDOR_ST_45USPT
:
12464 tp
->nvram_jedecnum
= JEDEC_ST
;
12465 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12466 tg3_flag_set(tp
, FLASH
);
12468 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12469 case FLASH_5717VENDOR_ST_M_M25PE20
:
12470 case FLASH_5717VENDOR_ST_M_M45PE20
:
12471 /* Detect size with tg3_nvram_get_size() */
12473 case FLASH_5717VENDOR_ST_A_M25PE20
:
12474 case FLASH_5717VENDOR_ST_A_M45PE20
:
12475 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12478 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12483 tg3_flag_set(tp
, NO_NVRAM
);
12487 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12488 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12489 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12492 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12494 u32 nvcfg1
, nvmpinstrp
;
12496 nvcfg1
= tr32(NVRAM_CFG1
);
12497 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12499 switch (nvmpinstrp
) {
12500 case FLASH_5720_EEPROM_HD
:
12501 case FLASH_5720_EEPROM_LD
:
12502 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12503 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12505 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12506 tw32(NVRAM_CFG1
, nvcfg1
);
12507 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12508 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12510 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12512 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12513 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12514 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12515 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12516 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12517 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12518 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12519 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12520 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12521 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12522 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12523 case FLASH_5720VENDOR_ATMEL_45USPT
:
12524 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12525 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12526 tg3_flag_set(tp
, FLASH
);
12528 switch (nvmpinstrp
) {
12529 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12530 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12531 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12532 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12534 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12535 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12536 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12537 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12539 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12540 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12541 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12544 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12548 case FLASH_5720VENDOR_M_ST_M25PE10
:
12549 case FLASH_5720VENDOR_M_ST_M45PE10
:
12550 case FLASH_5720VENDOR_A_ST_M25PE10
:
12551 case FLASH_5720VENDOR_A_ST_M45PE10
:
12552 case FLASH_5720VENDOR_M_ST_M25PE20
:
12553 case FLASH_5720VENDOR_M_ST_M45PE20
:
12554 case FLASH_5720VENDOR_A_ST_M25PE20
:
12555 case FLASH_5720VENDOR_A_ST_M45PE20
:
12556 case FLASH_5720VENDOR_M_ST_M25PE40
:
12557 case FLASH_5720VENDOR_M_ST_M45PE40
:
12558 case FLASH_5720VENDOR_A_ST_M25PE40
:
12559 case FLASH_5720VENDOR_A_ST_M45PE40
:
12560 case FLASH_5720VENDOR_M_ST_M25PE80
:
12561 case FLASH_5720VENDOR_M_ST_M45PE80
:
12562 case FLASH_5720VENDOR_A_ST_M25PE80
:
12563 case FLASH_5720VENDOR_A_ST_M45PE80
:
12564 case FLASH_5720VENDOR_ST_25USPT
:
12565 case FLASH_5720VENDOR_ST_45USPT
:
12566 tp
->nvram_jedecnum
= JEDEC_ST
;
12567 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12568 tg3_flag_set(tp
, FLASH
);
12570 switch (nvmpinstrp
) {
12571 case FLASH_5720VENDOR_M_ST_M25PE20
:
12572 case FLASH_5720VENDOR_M_ST_M45PE20
:
12573 case FLASH_5720VENDOR_A_ST_M25PE20
:
12574 case FLASH_5720VENDOR_A_ST_M45PE20
:
12575 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12577 case FLASH_5720VENDOR_M_ST_M25PE40
:
12578 case FLASH_5720VENDOR_M_ST_M45PE40
:
12579 case FLASH_5720VENDOR_A_ST_M25PE40
:
12580 case FLASH_5720VENDOR_A_ST_M45PE40
:
12581 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12583 case FLASH_5720VENDOR_M_ST_M25PE80
:
12584 case FLASH_5720VENDOR_M_ST_M45PE80
:
12585 case FLASH_5720VENDOR_A_ST_M25PE80
:
12586 case FLASH_5720VENDOR_A_ST_M45PE80
:
12587 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12590 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12595 tg3_flag_set(tp
, NO_NVRAM
);
12599 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12600 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12601 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12604 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12605 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12607 tw32_f(GRC_EEPROM_ADDR
,
12608 (EEPROM_ADDR_FSM_RESET
|
12609 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12610 EEPROM_ADDR_CLKPERD_SHIFT
)));
12614 /* Enable seeprom accesses. */
12615 tw32_f(GRC_LOCAL_CTRL
,
12616 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12619 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12620 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12621 tg3_flag_set(tp
, NVRAM
);
12623 if (tg3_nvram_lock(tp
)) {
12624 netdev_warn(tp
->dev
,
12625 "Cannot get nvram lock, %s failed\n",
12629 tg3_enable_nvram_access(tp
);
12631 tp
->nvram_size
= 0;
12633 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12634 tg3_get_5752_nvram_info(tp
);
12635 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12636 tg3_get_5755_nvram_info(tp
);
12637 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12638 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12639 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12640 tg3_get_5787_nvram_info(tp
);
12641 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12642 tg3_get_5761_nvram_info(tp
);
12643 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12644 tg3_get_5906_nvram_info(tp
);
12645 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12646 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12647 tg3_get_57780_nvram_info(tp
);
12648 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12649 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12650 tg3_get_5717_nvram_info(tp
);
12651 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12652 tg3_get_5720_nvram_info(tp
);
12654 tg3_get_nvram_info(tp
);
12656 if (tp
->nvram_size
== 0)
12657 tg3_get_nvram_size(tp
);
12659 tg3_disable_nvram_access(tp
);
12660 tg3_nvram_unlock(tp
);
12663 tg3_flag_clear(tp
, NVRAM
);
12664 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12666 tg3_get_eeprom_size(tp
);
12670 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12671 u32 offset
, u32 len
, u8
*buf
)
12676 for (i
= 0; i
< len
; i
+= 4) {
12682 memcpy(&data
, buf
+ i
, 4);
12685 * The SEEPROM interface expects the data to always be opposite
12686 * the native endian format. We accomplish this by reversing
12687 * all the operations that would have been performed on the
12688 * data from a call to tg3_nvram_read_be32().
12690 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12692 val
= tr32(GRC_EEPROM_ADDR
);
12693 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12695 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12697 tw32(GRC_EEPROM_ADDR
, val
|
12698 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12699 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12700 EEPROM_ADDR_START
|
12701 EEPROM_ADDR_WRITE
);
12703 for (j
= 0; j
< 1000; j
++) {
12704 val
= tr32(GRC_EEPROM_ADDR
);
12706 if (val
& EEPROM_ADDR_COMPLETE
)
12710 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12719 /* offset and length are dword aligned */
12720 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12724 u32 pagesize
= tp
->nvram_pagesize
;
12725 u32 pagemask
= pagesize
- 1;
12729 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12735 u32 phy_addr
, page_off
, size
;
12737 phy_addr
= offset
& ~pagemask
;
12739 for (j
= 0; j
< pagesize
; j
+= 4) {
12740 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12741 (__be32
*) (tmp
+ j
));
12748 page_off
= offset
& pagemask
;
12755 memcpy(tmp
+ page_off
, buf
, size
);
12757 offset
= offset
+ (pagesize
- page_off
);
12759 tg3_enable_nvram_access(tp
);
12762 * Before we can erase the flash page, we need
12763 * to issue a special "write enable" command.
12765 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12767 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12770 /* Erase the target page */
12771 tw32(NVRAM_ADDR
, phy_addr
);
12773 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12774 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12776 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12779 /* Issue another write enable to start the write. */
12780 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12782 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12785 for (j
= 0; j
< pagesize
; j
+= 4) {
12788 data
= *((__be32
*) (tmp
+ j
));
12790 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12792 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12794 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12798 nvram_cmd
|= NVRAM_CMD_FIRST
;
12799 else if (j
== (pagesize
- 4))
12800 nvram_cmd
|= NVRAM_CMD_LAST
;
12802 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12809 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12810 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12817 /* offset and length are dword aligned */
12818 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12823 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12824 u32 page_off
, phy_addr
, nvram_cmd
;
12827 memcpy(&data
, buf
+ i
, 4);
12828 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12830 page_off
= offset
% tp
->nvram_pagesize
;
12832 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12834 tw32(NVRAM_ADDR
, phy_addr
);
12836 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12838 if (page_off
== 0 || i
== 0)
12839 nvram_cmd
|= NVRAM_CMD_FIRST
;
12840 if (page_off
== (tp
->nvram_pagesize
- 4))
12841 nvram_cmd
|= NVRAM_CMD_LAST
;
12843 if (i
== (len
- 4))
12844 nvram_cmd
|= NVRAM_CMD_LAST
;
12846 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12847 !tg3_flag(tp
, 5755_PLUS
) &&
12848 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12849 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12851 if ((ret
= tg3_nvram_exec_cmd(tp
,
12852 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12857 if (!tg3_flag(tp
, FLASH
)) {
12858 /* We always do complete word writes to eeprom. */
12859 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12862 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12868 /* offset and length are dword aligned */
12869 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12873 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12874 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12875 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12879 if (!tg3_flag(tp
, NVRAM
)) {
12880 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12884 ret
= tg3_nvram_lock(tp
);
12888 tg3_enable_nvram_access(tp
);
12889 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12890 tw32(NVRAM_WRITE1
, 0x406);
12892 grc_mode
= tr32(GRC_MODE
);
12893 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12895 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12896 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12899 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12903 grc_mode
= tr32(GRC_MODE
);
12904 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12906 tg3_disable_nvram_access(tp
);
12907 tg3_nvram_unlock(tp
);
12910 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12911 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12918 struct subsys_tbl_ent
{
12919 u16 subsys_vendor
, subsys_devid
;
12923 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12924 /* Broadcom boards. */
12925 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12926 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12927 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12928 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12929 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12930 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12931 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12932 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12933 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12934 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12935 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12936 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12937 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12938 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12939 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12940 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12941 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12942 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12943 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12944 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12945 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12946 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12949 { TG3PCI_SUBVENDOR_ID_3COM
,
12950 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12951 { TG3PCI_SUBVENDOR_ID_3COM
,
12952 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12953 { TG3PCI_SUBVENDOR_ID_3COM
,
12954 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12955 { TG3PCI_SUBVENDOR_ID_3COM
,
12956 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12957 { TG3PCI_SUBVENDOR_ID_3COM
,
12958 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12961 { TG3PCI_SUBVENDOR_ID_DELL
,
12962 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12963 { TG3PCI_SUBVENDOR_ID_DELL
,
12964 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12965 { TG3PCI_SUBVENDOR_ID_DELL
,
12966 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12967 { TG3PCI_SUBVENDOR_ID_DELL
,
12968 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12970 /* Compaq boards. */
12971 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12972 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12973 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12974 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12975 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12976 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12977 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12978 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12979 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12980 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12983 { TG3PCI_SUBVENDOR_ID_IBM
,
12984 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12987 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12991 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12992 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12993 tp
->pdev
->subsystem_vendor
) &&
12994 (subsys_id_to_phy_id
[i
].subsys_devid
==
12995 tp
->pdev
->subsystem_device
))
12996 return &subsys_id_to_phy_id
[i
];
13001 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13005 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13006 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13008 /* Assume an onboard device and WOL capable by default. */
13009 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13010 tg3_flag_set(tp
, WOL_CAP
);
13012 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13013 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13014 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13015 tg3_flag_set(tp
, IS_NIC
);
13017 val
= tr32(VCPU_CFGSHDW
);
13018 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13019 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13020 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13021 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13022 tg3_flag_set(tp
, WOL_ENABLE
);
13023 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13028 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13029 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13030 u32 nic_cfg
, led_cfg
;
13031 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13032 int eeprom_phy_serdes
= 0;
13034 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13035 tp
->nic_sram_data_cfg
= nic_cfg
;
13037 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13038 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13039 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13040 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13041 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13042 (ver
> 0) && (ver
< 0x100))
13043 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13045 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13046 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13048 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13049 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13050 eeprom_phy_serdes
= 1;
13052 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13053 if (nic_phy_id
!= 0) {
13054 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13055 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13057 eeprom_phy_id
= (id1
>> 16) << 10;
13058 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13059 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13063 tp
->phy_id
= eeprom_phy_id
;
13064 if (eeprom_phy_serdes
) {
13065 if (!tg3_flag(tp
, 5705_PLUS
))
13066 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13068 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13071 if (tg3_flag(tp
, 5750_PLUS
))
13072 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13073 SHASTA_EXT_LED_MODE_MASK
);
13075 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13079 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13080 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13083 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13084 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13087 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13088 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13090 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13091 * read on some older 5700/5701 bootcode.
13093 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13095 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13097 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13101 case SHASTA_EXT_LED_SHARED
:
13102 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13103 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13104 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13105 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13106 LED_CTRL_MODE_PHY_2
);
13109 case SHASTA_EXT_LED_MAC
:
13110 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13113 case SHASTA_EXT_LED_COMBO
:
13114 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13115 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13116 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13117 LED_CTRL_MODE_PHY_2
);
13122 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13123 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13124 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13125 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13127 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13128 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13130 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13131 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13132 if ((tp
->pdev
->subsystem_vendor
==
13133 PCI_VENDOR_ID_ARIMA
) &&
13134 (tp
->pdev
->subsystem_device
== 0x205a ||
13135 tp
->pdev
->subsystem_device
== 0x2063))
13136 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13138 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13139 tg3_flag_set(tp
, IS_NIC
);
13142 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13143 tg3_flag_set(tp
, ENABLE_ASF
);
13144 if (tg3_flag(tp
, 5750_PLUS
))
13145 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13148 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13149 tg3_flag(tp
, 5750_PLUS
))
13150 tg3_flag_set(tp
, ENABLE_APE
);
13152 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13153 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13154 tg3_flag_clear(tp
, WOL_CAP
);
13156 if (tg3_flag(tp
, WOL_CAP
) &&
13157 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13158 tg3_flag_set(tp
, WOL_ENABLE
);
13159 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13162 if (cfg2
& (1 << 17))
13163 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13165 /* serdes signal pre-emphasis in register 0x590 set by */
13166 /* bootcode if bit 18 is set */
13167 if (cfg2
& (1 << 18))
13168 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13170 if ((tg3_flag(tp
, 57765_PLUS
) ||
13171 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13172 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13173 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13174 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13176 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13177 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13178 !tg3_flag(tp
, 57765_PLUS
)) {
13181 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13182 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13183 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13186 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13187 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13188 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13189 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13190 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13191 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13194 if (tg3_flag(tp
, WOL_CAP
))
13195 device_set_wakeup_enable(&tp
->pdev
->dev
,
13196 tg3_flag(tp
, WOL_ENABLE
));
13198 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13201 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13206 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13207 tw32(OTP_CTRL
, cmd
);
13209 /* Wait for up to 1 ms for command to execute. */
13210 for (i
= 0; i
< 100; i
++) {
13211 val
= tr32(OTP_STATUS
);
13212 if (val
& OTP_STATUS_CMD_DONE
)
13217 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13220 /* Read the gphy configuration from the OTP region of the chip. The gphy
13221 * configuration is a 32-bit value that straddles the alignment boundary.
13222 * We do two 32-bit reads and then shift and merge the results.
13224 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13226 u32 bhalf_otp
, thalf_otp
;
13228 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13230 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13233 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13235 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13238 thalf_otp
= tr32(OTP_READ_DATA
);
13240 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13242 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13245 bhalf_otp
= tr32(OTP_READ_DATA
);
13247 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13250 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13252 u32 adv
= ADVERTISED_Autoneg
|
13255 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13256 adv
|= ADVERTISED_1000baseT_Half
|
13257 ADVERTISED_1000baseT_Full
;
13259 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13260 adv
|= ADVERTISED_100baseT_Half
|
13261 ADVERTISED_100baseT_Full
|
13262 ADVERTISED_10baseT_Half
|
13263 ADVERTISED_10baseT_Full
|
13266 adv
|= ADVERTISED_FIBRE
;
13268 tp
->link_config
.advertising
= adv
;
13269 tp
->link_config
.speed
= SPEED_INVALID
;
13270 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13271 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13272 tp
->link_config
.active_speed
= SPEED_INVALID
;
13273 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13274 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13275 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13276 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13279 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13281 u32 hw_phy_id_1
, hw_phy_id_2
;
13282 u32 hw_phy_id
, hw_phy_id_masked
;
13285 /* flow control autonegotiation is default behavior */
13286 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13287 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13289 if (tg3_flag(tp
, USE_PHYLIB
))
13290 return tg3_phy_init(tp
);
13292 /* Reading the PHY ID register can conflict with ASF
13293 * firmware access to the PHY hardware.
13296 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13297 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13299 /* Now read the physical PHY_ID from the chip and verify
13300 * that it is sane. If it doesn't look good, we fall back
13301 * to either the hard-coded table based PHY_ID and failing
13302 * that the value found in the eeprom area.
13304 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13305 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13307 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13308 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13309 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13311 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13314 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13315 tp
->phy_id
= hw_phy_id
;
13316 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13317 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13319 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13321 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13322 /* Do nothing, phy ID already set up in
13323 * tg3_get_eeprom_hw_cfg().
13326 struct subsys_tbl_ent
*p
;
13328 /* No eeprom signature? Try the hardcoded
13329 * subsys device table.
13331 p
= tg3_lookup_by_subsys(tp
);
13335 tp
->phy_id
= p
->phy_id
;
13337 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13338 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13342 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13343 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13344 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13345 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13346 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13347 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13348 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13349 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13351 tg3_phy_init_link_config(tp
);
13353 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13354 !tg3_flag(tp
, ENABLE_APE
) &&
13355 !tg3_flag(tp
, ENABLE_ASF
)) {
13358 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13359 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13360 (bmsr
& BMSR_LSTATUS
))
13361 goto skip_phy_reset
;
13363 err
= tg3_phy_reset(tp
);
13367 tg3_phy_set_wirespeed(tp
);
13369 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13370 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13371 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13372 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13373 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13374 tp
->link_config
.flowctrl
);
13376 tg3_writephy(tp
, MII_BMCR
,
13377 BMCR_ANENABLE
| BMCR_ANRESTART
);
13382 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13383 err
= tg3_init_5401phy_dsp(tp
);
13387 err
= tg3_init_5401phy_dsp(tp
);
13393 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13396 unsigned int block_end
, rosize
, len
;
13400 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13404 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13406 goto out_not_found
;
13408 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13409 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13410 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13412 if (block_end
> vpdlen
)
13413 goto out_not_found
;
13415 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13416 PCI_VPD_RO_KEYWORD_MFR_ID
);
13418 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13420 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13421 if (j
+ len
> block_end
|| len
!= 4 ||
13422 memcmp(&vpd_data
[j
], "1028", 4))
13425 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13426 PCI_VPD_RO_KEYWORD_VENDOR0
);
13430 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13432 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13433 if (j
+ len
> block_end
)
13436 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13437 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13441 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13442 PCI_VPD_RO_KEYWORD_PARTNO
);
13444 goto out_not_found
;
13446 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13448 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13449 if (len
> TG3_BPN_SIZE
||
13450 (len
+ i
) > vpdlen
)
13451 goto out_not_found
;
13453 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13457 if (tp
->board_part_number
[0])
13461 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13462 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13463 strcpy(tp
->board_part_number
, "BCM5717");
13464 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13465 strcpy(tp
->board_part_number
, "BCM5718");
13468 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13469 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13470 strcpy(tp
->board_part_number
, "BCM57780");
13471 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13472 strcpy(tp
->board_part_number
, "BCM57760");
13473 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13474 strcpy(tp
->board_part_number
, "BCM57790");
13475 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13476 strcpy(tp
->board_part_number
, "BCM57788");
13479 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13480 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13481 strcpy(tp
->board_part_number
, "BCM57761");
13482 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13483 strcpy(tp
->board_part_number
, "BCM57765");
13484 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13485 strcpy(tp
->board_part_number
, "BCM57781");
13486 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13487 strcpy(tp
->board_part_number
, "BCM57785");
13488 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13489 strcpy(tp
->board_part_number
, "BCM57791");
13490 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13491 strcpy(tp
->board_part_number
, "BCM57795");
13494 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13495 strcpy(tp
->board_part_number
, "BCM95906");
13498 strcpy(tp
->board_part_number
, "none");
13502 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13506 if (tg3_nvram_read(tp
, offset
, &val
) ||
13507 (val
& 0xfc000000) != 0x0c000000 ||
13508 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13515 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13517 u32 val
, offset
, start
, ver_offset
;
13519 bool newver
= false;
13521 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13522 tg3_nvram_read(tp
, 0x4, &start
))
13525 offset
= tg3_nvram_logical_addr(tp
, offset
);
13527 if (tg3_nvram_read(tp
, offset
, &val
))
13530 if ((val
& 0xfc000000) == 0x0c000000) {
13531 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13538 dst_off
= strlen(tp
->fw_ver
);
13541 if (TG3_VER_SIZE
- dst_off
< 16 ||
13542 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13545 offset
= offset
+ ver_offset
- start
;
13546 for (i
= 0; i
< 16; i
+= 4) {
13548 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13551 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13556 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13559 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13560 TG3_NVM_BCVER_MAJSFT
;
13561 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13562 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13563 "v%d.%02d", major
, minor
);
13567 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13569 u32 val
, major
, minor
;
13571 /* Use native endian representation */
13572 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13575 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13576 TG3_NVM_HWSB_CFG1_MAJSFT
;
13577 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13578 TG3_NVM_HWSB_CFG1_MINSFT
;
13580 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13583 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13585 u32 offset
, major
, minor
, build
;
13587 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13589 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13592 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13593 case TG3_EEPROM_SB_REVISION_0
:
13594 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13596 case TG3_EEPROM_SB_REVISION_2
:
13597 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13599 case TG3_EEPROM_SB_REVISION_3
:
13600 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13602 case TG3_EEPROM_SB_REVISION_4
:
13603 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13605 case TG3_EEPROM_SB_REVISION_5
:
13606 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13608 case TG3_EEPROM_SB_REVISION_6
:
13609 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13615 if (tg3_nvram_read(tp
, offset
, &val
))
13618 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13619 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13620 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13621 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13622 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13624 if (minor
> 99 || build
> 26)
13627 offset
= strlen(tp
->fw_ver
);
13628 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13629 " v%d.%02d", major
, minor
);
13632 offset
= strlen(tp
->fw_ver
);
13633 if (offset
< TG3_VER_SIZE
- 1)
13634 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13638 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13640 u32 val
, offset
, start
;
13643 for (offset
= TG3_NVM_DIR_START
;
13644 offset
< TG3_NVM_DIR_END
;
13645 offset
+= TG3_NVM_DIRENT_SIZE
) {
13646 if (tg3_nvram_read(tp
, offset
, &val
))
13649 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13653 if (offset
== TG3_NVM_DIR_END
)
13656 if (!tg3_flag(tp
, 5705_PLUS
))
13657 start
= 0x08000000;
13658 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13661 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13662 !tg3_fw_img_is_valid(tp
, offset
) ||
13663 tg3_nvram_read(tp
, offset
+ 8, &val
))
13666 offset
+= val
- start
;
13668 vlen
= strlen(tp
->fw_ver
);
13670 tp
->fw_ver
[vlen
++] = ',';
13671 tp
->fw_ver
[vlen
++] = ' ';
13673 for (i
= 0; i
< 4; i
++) {
13675 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13678 offset
+= sizeof(v
);
13680 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13681 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13685 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13690 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13696 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13699 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13700 if (apedata
!= APE_SEG_SIG_MAGIC
)
13703 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13704 if (!(apedata
& APE_FW_STATUS_READY
))
13707 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13709 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13710 tg3_flag_set(tp
, APE_HAS_NCSI
);
13716 vlen
= strlen(tp
->fw_ver
);
13718 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13720 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13721 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13722 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13723 (apedata
& APE_FW_VERSION_BLDMSK
));
13726 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13729 bool vpd_vers
= false;
13731 if (tp
->fw_ver
[0] != 0)
13734 if (tg3_flag(tp
, NO_NVRAM
)) {
13735 strcat(tp
->fw_ver
, "sb");
13739 if (tg3_nvram_read(tp
, 0, &val
))
13742 if (val
== TG3_EEPROM_MAGIC
)
13743 tg3_read_bc_ver(tp
);
13744 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13745 tg3_read_sb_ver(tp
, val
);
13746 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13747 tg3_read_hwsb_ver(tp
);
13754 if (tg3_flag(tp
, ENABLE_APE
)) {
13755 if (tg3_flag(tp
, ENABLE_ASF
))
13756 tg3_read_dash_ver(tp
);
13757 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13758 tg3_read_mgmtfw_ver(tp
);
13762 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13765 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13767 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13769 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13770 return TG3_RX_RET_MAX_SIZE_5717
;
13771 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13772 return TG3_RX_RET_MAX_SIZE_5700
;
13774 return TG3_RX_RET_MAX_SIZE_5705
;
13777 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13778 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13779 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13780 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13784 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13787 u32 pci_state_reg
, grc_misc_cfg
;
13792 /* Force memory write invalidate off. If we leave it on,
13793 * then on 5700_BX chips we have to enable a workaround.
13794 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13795 * to match the cacheline size. The Broadcom driver have this
13796 * workaround but turns MWI off all the times so never uses
13797 * it. This seems to suggest that the workaround is insufficient.
13799 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13800 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13801 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13803 /* Important! -- Make sure register accesses are byteswapped
13804 * correctly. Also, for those chips that require it, make
13805 * sure that indirect register accesses are enabled before
13806 * the first operation.
13808 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13810 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13811 MISC_HOST_CTRL_CHIPREV
);
13812 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13813 tp
->misc_host_ctrl
);
13815 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13816 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13817 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13818 u32 prod_id_asic_rev
;
13820 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13821 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13822 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13823 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13824 pci_read_config_dword(tp
->pdev
,
13825 TG3PCI_GEN2_PRODID_ASICREV
,
13826 &prod_id_asic_rev
);
13827 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13828 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13829 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13830 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13831 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13832 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13833 pci_read_config_dword(tp
->pdev
,
13834 TG3PCI_GEN15_PRODID_ASICREV
,
13835 &prod_id_asic_rev
);
13837 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13838 &prod_id_asic_rev
);
13840 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13843 /* Wrong chip ID in 5752 A0. This code can be removed later
13844 * as A0 is not in production.
13846 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13847 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13849 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13850 * we need to disable memory and use config. cycles
13851 * only to access all registers. The 5702/03 chips
13852 * can mistakenly decode the special cycles from the
13853 * ICH chipsets as memory write cycles, causing corruption
13854 * of register and memory space. Only certain ICH bridges
13855 * will drive special cycles with non-zero data during the
13856 * address phase which can fall within the 5703's address
13857 * range. This is not an ICH bug as the PCI spec allows
13858 * non-zero address during special cycles. However, only
13859 * these ICH bridges are known to drive non-zero addresses
13860 * during special cycles.
13862 * Since special cycles do not cross PCI bridges, we only
13863 * enable this workaround if the 5703 is on the secondary
13864 * bus of these ICH bridges.
13866 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13867 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13868 static struct tg3_dev_id
{
13872 } ich_chipsets
[] = {
13873 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13875 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13877 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13879 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13883 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13884 struct pci_dev
*bridge
= NULL
;
13886 while (pci_id
->vendor
!= 0) {
13887 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13893 if (pci_id
->rev
!= PCI_ANY_ID
) {
13894 if (bridge
->revision
> pci_id
->rev
)
13897 if (bridge
->subordinate
&&
13898 (bridge
->subordinate
->number
==
13899 tp
->pdev
->bus
->number
)) {
13900 tg3_flag_set(tp
, ICH_WORKAROUND
);
13901 pci_dev_put(bridge
);
13907 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13908 static struct tg3_dev_id
{
13911 } bridge_chipsets
[] = {
13912 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13913 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13916 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13917 struct pci_dev
*bridge
= NULL
;
13919 while (pci_id
->vendor
!= 0) {
13920 bridge
= pci_get_device(pci_id
->vendor
,
13927 if (bridge
->subordinate
&&
13928 (bridge
->subordinate
->number
<=
13929 tp
->pdev
->bus
->number
) &&
13930 (bridge
->subordinate
->subordinate
>=
13931 tp
->pdev
->bus
->number
)) {
13932 tg3_flag_set(tp
, 5701_DMA_BUG
);
13933 pci_dev_put(bridge
);
13939 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13940 * DMA addresses > 40-bit. This bridge may have other additional
13941 * 57xx devices behind it in some 4-port NIC designs for example.
13942 * Any tg3 device found behind the bridge will also need the 40-bit
13945 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13946 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13947 tg3_flag_set(tp
, 5780_CLASS
);
13948 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13949 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13951 struct pci_dev
*bridge
= NULL
;
13954 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13955 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13957 if (bridge
&& bridge
->subordinate
&&
13958 (bridge
->subordinate
->number
<=
13959 tp
->pdev
->bus
->number
) &&
13960 (bridge
->subordinate
->subordinate
>=
13961 tp
->pdev
->bus
->number
)) {
13962 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13963 pci_dev_put(bridge
);
13969 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13970 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
13971 tp
->pdev_peer
= tg3_find_peer(tp
);
13973 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13974 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13975 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13976 tg3_flag_set(tp
, 5717_PLUS
);
13978 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13979 tg3_flag(tp
, 5717_PLUS
))
13980 tg3_flag_set(tp
, 57765_PLUS
);
13982 /* Intentionally exclude ASIC_REV_5906 */
13983 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13984 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13985 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13986 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13987 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13988 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13989 tg3_flag(tp
, 57765_PLUS
))
13990 tg3_flag_set(tp
, 5755_PLUS
);
13992 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13993 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13994 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13995 tg3_flag(tp
, 5755_PLUS
) ||
13996 tg3_flag(tp
, 5780_CLASS
))
13997 tg3_flag_set(tp
, 5750_PLUS
);
13999 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14000 tg3_flag(tp
, 5750_PLUS
))
14001 tg3_flag_set(tp
, 5705_PLUS
);
14003 /* Determine TSO capabilities */
14004 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
14005 ; /* Do nothing. HW bug. */
14006 else if (tg3_flag(tp
, 57765_PLUS
))
14007 tg3_flag_set(tp
, HW_TSO_3
);
14008 else if (tg3_flag(tp
, 5755_PLUS
) ||
14009 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14010 tg3_flag_set(tp
, HW_TSO_2
);
14011 else if (tg3_flag(tp
, 5750_PLUS
)) {
14012 tg3_flag_set(tp
, HW_TSO_1
);
14013 tg3_flag_set(tp
, TSO_BUG
);
14014 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
14015 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
14016 tg3_flag_clear(tp
, TSO_BUG
);
14017 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14018 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14019 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
14020 tg3_flag_set(tp
, TSO_BUG
);
14021 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
14022 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
14024 tp
->fw_needed
= FIRMWARE_TG3TSO
;
14027 /* Selectively allow TSO based on operating conditions */
14028 if (tg3_flag(tp
, HW_TSO_1
) ||
14029 tg3_flag(tp
, HW_TSO_2
) ||
14030 tg3_flag(tp
, HW_TSO_3
) ||
14032 /* For firmware TSO, assume ASF is disabled.
14033 * We'll disable TSO later if we discover ASF
14034 * is enabled in tg3_get_eeprom_hw_cfg().
14036 tg3_flag_set(tp
, TSO_CAPABLE
);
14038 tg3_flag_clear(tp
, TSO_CAPABLE
);
14039 tg3_flag_clear(tp
, TSO_BUG
);
14040 tp
->fw_needed
= NULL
;
14043 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14044 tp
->fw_needed
= FIRMWARE_TG3
;
14048 if (tg3_flag(tp
, 5750_PLUS
)) {
14049 tg3_flag_set(tp
, SUPPORT_MSI
);
14050 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14051 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14052 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14053 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14054 tp
->pdev_peer
== tp
->pdev
))
14055 tg3_flag_clear(tp
, SUPPORT_MSI
);
14057 if (tg3_flag(tp
, 5755_PLUS
) ||
14058 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14059 tg3_flag_set(tp
, 1SHOT_MSI
);
14062 if (tg3_flag(tp
, 57765_PLUS
)) {
14063 tg3_flag_set(tp
, SUPPORT_MSIX
);
14064 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14068 if (tg3_flag(tp
, 5755_PLUS
) ||
14069 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14070 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14072 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14073 tg3_flag_set(tp
, 4K_FIFO_LIMIT
);
14075 if (tg3_flag(tp
, 5717_PLUS
))
14076 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14078 if (tg3_flag(tp
, 57765_PLUS
) &&
14079 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14080 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14082 if (!tg3_flag(tp
, 5705_PLUS
) ||
14083 tg3_flag(tp
, 5780_CLASS
) ||
14084 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14085 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14087 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14090 if (pci_is_pcie(tp
->pdev
)) {
14093 tg3_flag_set(tp
, PCI_EXPRESS
);
14095 tp
->pcie_readrq
= 4096;
14096 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14097 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14098 tp
->pcie_readrq
= 2048;
14100 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
14102 pci_read_config_word(tp
->pdev
,
14103 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
14105 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14106 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14108 tg3_flag_clear(tp
, HW_TSO_2
);
14109 tg3_flag_clear(tp
, TSO_CAPABLE
);
14111 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14112 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14113 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14114 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14115 tg3_flag_set(tp
, CLKREQ_BUG
);
14116 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14117 tg3_flag_set(tp
, L1PLLPD_EN
);
14119 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14120 /* BCM5785 devices are effectively PCIe devices, and should
14121 * follow PCIe codepaths, but do not have a PCIe capabilities
14124 tg3_flag_set(tp
, PCI_EXPRESS
);
14125 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14126 tg3_flag(tp
, 5780_CLASS
)) {
14127 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14128 if (!tp
->pcix_cap
) {
14129 dev_err(&tp
->pdev
->dev
,
14130 "Cannot find PCI-X capability, aborting\n");
14134 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14135 tg3_flag_set(tp
, PCIX_MODE
);
14138 /* If we have an AMD 762 or VIA K8T800 chipset, write
14139 * reordering to the mailbox registers done by the host
14140 * controller can cause major troubles. We read back from
14141 * every mailbox register write to force the writes to be
14142 * posted to the chip in order.
14144 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14145 !tg3_flag(tp
, PCI_EXPRESS
))
14146 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14148 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14149 &tp
->pci_cacheline_sz
);
14150 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14151 &tp
->pci_lat_timer
);
14152 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14153 tp
->pci_lat_timer
< 64) {
14154 tp
->pci_lat_timer
= 64;
14155 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14156 tp
->pci_lat_timer
);
14159 /* Important! -- It is critical that the PCI-X hw workaround
14160 * situation is decided before the first MMIO register access.
14162 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14163 /* 5700 BX chips need to have their TX producer index
14164 * mailboxes written twice to workaround a bug.
14166 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14168 /* If we are in PCI-X mode, enable register write workaround.
14170 * The workaround is to use indirect register accesses
14171 * for all chip writes not to mailbox registers.
14173 if (tg3_flag(tp
, PCIX_MODE
)) {
14176 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14178 /* The chip can have it's power management PCI config
14179 * space registers clobbered due to this bug.
14180 * So explicitly force the chip into D0 here.
14182 pci_read_config_dword(tp
->pdev
,
14183 tp
->pm_cap
+ PCI_PM_CTRL
,
14185 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14186 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14187 pci_write_config_dword(tp
->pdev
,
14188 tp
->pm_cap
+ PCI_PM_CTRL
,
14191 /* Also, force SERR#/PERR# in PCI command. */
14192 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14193 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14194 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14198 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14199 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14200 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14201 tg3_flag_set(tp
, PCI_32BIT
);
14203 /* Chip-specific fixup from Broadcom driver */
14204 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14205 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14206 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14207 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14210 /* Default fast path register access methods */
14211 tp
->read32
= tg3_read32
;
14212 tp
->write32
= tg3_write32
;
14213 tp
->read32_mbox
= tg3_read32
;
14214 tp
->write32_mbox
= tg3_write32
;
14215 tp
->write32_tx_mbox
= tg3_write32
;
14216 tp
->write32_rx_mbox
= tg3_write32
;
14218 /* Various workaround register access methods */
14219 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14220 tp
->write32
= tg3_write_indirect_reg32
;
14221 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14222 (tg3_flag(tp
, PCI_EXPRESS
) &&
14223 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14225 * Back to back register writes can cause problems on these
14226 * chips, the workaround is to read back all reg writes
14227 * except those to mailbox regs.
14229 * See tg3_write_indirect_reg32().
14231 tp
->write32
= tg3_write_flush_reg32
;
14234 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14235 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14236 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14237 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14240 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14241 tp
->read32
= tg3_read_indirect_reg32
;
14242 tp
->write32
= tg3_write_indirect_reg32
;
14243 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14244 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14245 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14246 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14251 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14252 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14253 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14255 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14256 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14257 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14258 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14259 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14262 if (tp
->write32
== tg3_write_indirect_reg32
||
14263 (tg3_flag(tp
, PCIX_MODE
) &&
14264 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14265 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14266 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14268 /* The memory arbiter has to be enabled in order for SRAM accesses
14269 * to succeed. Normally on powerup the tg3 chip firmware will make
14270 * sure it is enabled, but other entities such as system netboot
14271 * code might disable it.
14273 val
= tr32(MEMARB_MODE
);
14274 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14276 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14277 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14278 tg3_flag(tp
, 5780_CLASS
)) {
14279 if (tg3_flag(tp
, PCIX_MODE
)) {
14280 pci_read_config_dword(tp
->pdev
,
14281 tp
->pcix_cap
+ PCI_X_STATUS
,
14283 tp
->pci_fn
= val
& 0x7;
14285 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14286 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14287 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14288 NIC_SRAM_CPMUSTAT_SIG
) {
14289 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14290 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14292 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14293 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14294 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14295 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14296 NIC_SRAM_CPMUSTAT_SIG
) {
14297 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14298 TG3_CPMU_STATUS_FSHFT_5719
;
14302 /* Get eeprom hw config before calling tg3_set_power_state().
14303 * In particular, the TG3_FLAG_IS_NIC flag must be
14304 * determined before calling tg3_set_power_state() so that
14305 * we know whether or not to switch out of Vaux power.
14306 * When the flag is set, it means that GPIO1 is used for eeprom
14307 * write protect and also implies that it is a LOM where GPIOs
14308 * are not used to switch power.
14310 tg3_get_eeprom_hw_cfg(tp
);
14312 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
14313 tg3_flag_clear(tp
, TSO_CAPABLE
);
14314 tg3_flag_clear(tp
, TSO_BUG
);
14315 tp
->fw_needed
= NULL
;
14318 if (tg3_flag(tp
, ENABLE_APE
)) {
14319 /* Allow reads and writes to the
14320 * APE register and memory space.
14322 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14323 PCISTATE_ALLOW_APE_SHMEM_WR
|
14324 PCISTATE_ALLOW_APE_PSPACE_WR
;
14325 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14328 tg3_ape_lock_init(tp
);
14331 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14332 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14333 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14334 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14335 tg3_flag(tp
, 57765_PLUS
))
14336 tg3_flag_set(tp
, CPMU_PRESENT
);
14338 /* Set up tp->grc_local_ctrl before calling
14339 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14340 * will bring 5700's external PHY out of reset.
14341 * It is also used as eeprom write protect on LOMs.
14343 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14344 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14345 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14346 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14347 GRC_LCLCTRL_GPIO_OUTPUT1
);
14348 /* Unused GPIO3 must be driven as output on 5752 because there
14349 * are no pull-up resistors on unused GPIO pins.
14351 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14352 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14354 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14355 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14356 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
14357 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14359 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14360 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14361 /* Turn off the debug UART. */
14362 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14363 if (tg3_flag(tp
, IS_NIC
))
14364 /* Keep VMain power. */
14365 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14366 GRC_LCLCTRL_GPIO_OUTPUT0
;
14369 /* Switch out of Vaux if it is a NIC */
14370 tg3_pwrsrc_switch_to_vmain(tp
);
14372 /* Derive initial jumbo mode from MTU assigned in
14373 * ether_setup() via the alloc_etherdev() call
14375 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14376 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14378 /* Determine WakeOnLan speed to use. */
14379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14380 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14381 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14382 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14383 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14385 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14389 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14391 /* A few boards don't want Ethernet@WireSpeed phy feature */
14392 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14393 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14394 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14395 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14396 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14397 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14398 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14400 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14401 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14402 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14403 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14404 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14406 if (tg3_flag(tp
, 5705_PLUS
) &&
14407 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14408 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14409 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14410 !tg3_flag(tp
, 57765_PLUS
)) {
14411 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14412 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14413 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14414 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14415 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14416 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14417 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14418 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14419 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14421 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14424 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14425 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14426 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14427 if (tp
->phy_otp
== 0)
14428 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14431 if (tg3_flag(tp
, CPMU_PRESENT
))
14432 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14434 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14436 tp
->coalesce_mode
= 0;
14437 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14438 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14439 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14441 /* Set these bits to enable statistics workaround. */
14442 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14443 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14444 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14445 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14446 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14449 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14450 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14451 tg3_flag_set(tp
, USE_PHYLIB
);
14453 err
= tg3_mdio_init(tp
);
14457 /* Initialize data/descriptor byte/word swapping. */
14458 val
= tr32(GRC_MODE
);
14459 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14460 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14461 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14462 GRC_MODE_B2HRX_ENABLE
|
14463 GRC_MODE_HTX2B_ENABLE
|
14464 GRC_MODE_HOST_STACKUP
);
14466 val
&= GRC_MODE_HOST_STACKUP
;
14468 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14470 tg3_switch_clocks(tp
);
14472 /* Clear this out for sanity. */
14473 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14475 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14477 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14478 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14479 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14481 if (chiprevid
== CHIPREV_ID_5701_A0
||
14482 chiprevid
== CHIPREV_ID_5701_B0
||
14483 chiprevid
== CHIPREV_ID_5701_B2
||
14484 chiprevid
== CHIPREV_ID_5701_B5
) {
14485 void __iomem
*sram_base
;
14487 /* Write some dummy words into the SRAM status block
14488 * area, see if it reads back correctly. If the return
14489 * value is bad, force enable the PCIX workaround.
14491 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14493 writel(0x00000000, sram_base
);
14494 writel(0x00000000, sram_base
+ 4);
14495 writel(0xffffffff, sram_base
+ 4);
14496 if (readl(sram_base
) != 0x00000000)
14497 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14502 tg3_nvram_init(tp
);
14504 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14505 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14507 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14508 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14509 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14510 tg3_flag_set(tp
, IS_5788
);
14512 if (!tg3_flag(tp
, IS_5788
) &&
14513 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14514 tg3_flag_set(tp
, TAGGED_STATUS
);
14515 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14516 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14517 HOSTCC_MODE_CLRTICK_TXBD
);
14519 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14520 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14521 tp
->misc_host_ctrl
);
14524 /* Preserve the APE MAC_MODE bits */
14525 if (tg3_flag(tp
, ENABLE_APE
))
14526 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14530 /* these are limited to 10/100 only */
14531 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14532 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14533 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14534 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14535 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14536 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14537 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14538 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14539 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14540 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14541 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14542 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14543 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14544 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14545 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14546 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14548 err
= tg3_phy_probe(tp
);
14550 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14551 /* ... but do not return immediately ... */
14556 tg3_read_fw_ver(tp
);
14558 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14559 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14561 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14562 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14564 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14567 /* 5700 {AX,BX} chips have a broken status block link
14568 * change bit implementation, so we must use the
14569 * status register in those cases.
14571 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14572 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14574 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14576 /* The led_ctrl is set during tg3_phy_probe, here we might
14577 * have to force the link status polling mechanism based
14578 * upon subsystem IDs.
14580 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14581 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14582 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14583 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14584 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14587 /* For all SERDES we poll the MAC status register. */
14588 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14589 tg3_flag_set(tp
, POLL_SERDES
);
14591 tg3_flag_clear(tp
, POLL_SERDES
);
14593 tp
->rx_offset
= NET_IP_ALIGN
;
14594 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14595 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14596 tg3_flag(tp
, PCIX_MODE
)) {
14598 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14599 tp
->rx_copy_thresh
= ~(u16
)0;
14603 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14604 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14605 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14607 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14609 /* Increment the rx prod index on the rx std ring by at most
14610 * 8 for these chips to workaround hw errata.
14612 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14613 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14614 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14615 tp
->rx_std_max_post
= 8;
14617 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14618 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14619 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14624 #ifdef CONFIG_SPARC
14625 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14627 struct net_device
*dev
= tp
->dev
;
14628 struct pci_dev
*pdev
= tp
->pdev
;
14629 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14630 const unsigned char *addr
;
14633 addr
= of_get_property(dp
, "local-mac-address", &len
);
14634 if (addr
&& len
== 6) {
14635 memcpy(dev
->dev_addr
, addr
, 6);
14636 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14642 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14644 struct net_device
*dev
= tp
->dev
;
14646 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14647 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14652 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14654 struct net_device
*dev
= tp
->dev
;
14655 u32 hi
, lo
, mac_offset
;
14658 #ifdef CONFIG_SPARC
14659 if (!tg3_get_macaddr_sparc(tp
))
14664 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14665 tg3_flag(tp
, 5780_CLASS
)) {
14666 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14668 if (tg3_nvram_lock(tp
))
14669 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14671 tg3_nvram_unlock(tp
);
14672 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14673 if (tp
->pci_fn
& 1)
14675 if (tp
->pci_fn
> 1)
14676 mac_offset
+= 0x18c;
14677 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14680 /* First try to get it from MAC address mailbox. */
14681 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14682 if ((hi
>> 16) == 0x484b) {
14683 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14684 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14686 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14687 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14688 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14689 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14690 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14692 /* Some old bootcode may report a 0 MAC address in SRAM */
14693 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14696 /* Next, try NVRAM. */
14697 if (!tg3_flag(tp
, NO_NVRAM
) &&
14698 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14699 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14700 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14701 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14703 /* Finally just fetch it out of the MAC control regs. */
14705 hi
= tr32(MAC_ADDR_0_HIGH
);
14706 lo
= tr32(MAC_ADDR_0_LOW
);
14708 dev
->dev_addr
[5] = lo
& 0xff;
14709 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14710 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14711 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14712 dev
->dev_addr
[1] = hi
& 0xff;
14713 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14717 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14718 #ifdef CONFIG_SPARC
14719 if (!tg3_get_default_macaddr_sparc(tp
))
14724 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14728 #define BOUNDARY_SINGLE_CACHELINE 1
14729 #define BOUNDARY_MULTI_CACHELINE 2
14731 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14733 int cacheline_size
;
14737 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14739 cacheline_size
= 1024;
14741 cacheline_size
= (int) byte
* 4;
14743 /* On 5703 and later chips, the boundary bits have no
14746 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14747 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14748 !tg3_flag(tp
, PCI_EXPRESS
))
14751 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14752 goal
= BOUNDARY_MULTI_CACHELINE
;
14754 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14755 goal
= BOUNDARY_SINGLE_CACHELINE
;
14761 if (tg3_flag(tp
, 57765_PLUS
)) {
14762 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14769 /* PCI controllers on most RISC systems tend to disconnect
14770 * when a device tries to burst across a cache-line boundary.
14771 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14773 * Unfortunately, for PCI-E there are only limited
14774 * write-side controls for this, and thus for reads
14775 * we will still get the disconnects. We'll also waste
14776 * these PCI cycles for both read and write for chips
14777 * other than 5700 and 5701 which do not implement the
14780 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14781 switch (cacheline_size
) {
14786 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14787 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14788 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14790 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14796 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14797 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14801 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14802 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14805 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14806 switch (cacheline_size
) {
14810 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14811 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14812 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14818 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14819 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14823 switch (cacheline_size
) {
14825 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14826 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14827 DMA_RWCTRL_WRITE_BNDRY_16
);
14832 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14833 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14834 DMA_RWCTRL_WRITE_BNDRY_32
);
14839 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14840 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14841 DMA_RWCTRL_WRITE_BNDRY_64
);
14846 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14847 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14848 DMA_RWCTRL_WRITE_BNDRY_128
);
14853 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14854 DMA_RWCTRL_WRITE_BNDRY_256
);
14857 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14858 DMA_RWCTRL_WRITE_BNDRY_512
);
14862 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14863 DMA_RWCTRL_WRITE_BNDRY_1024
);
14872 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14874 struct tg3_internal_buffer_desc test_desc
;
14875 u32 sram_dma_descs
;
14878 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14880 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14881 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14882 tw32(RDMAC_STATUS
, 0);
14883 tw32(WDMAC_STATUS
, 0);
14885 tw32(BUFMGR_MODE
, 0);
14886 tw32(FTQ_RESET
, 0);
14888 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14889 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14890 test_desc
.nic_mbuf
= 0x00002100;
14891 test_desc
.len
= size
;
14894 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14895 * the *second* time the tg3 driver was getting loaded after an
14898 * Broadcom tells me:
14899 * ...the DMA engine is connected to the GRC block and a DMA
14900 * reset may affect the GRC block in some unpredictable way...
14901 * The behavior of resets to individual blocks has not been tested.
14903 * Broadcom noted the GRC reset will also reset all sub-components.
14906 test_desc
.cqid_sqid
= (13 << 8) | 2;
14908 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14911 test_desc
.cqid_sqid
= (16 << 8) | 7;
14913 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14916 test_desc
.flags
= 0x00000005;
14918 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14921 val
= *(((u32
*)&test_desc
) + i
);
14922 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14923 sram_dma_descs
+ (i
* sizeof(u32
)));
14924 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14926 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14929 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14931 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14934 for (i
= 0; i
< 40; i
++) {
14938 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14940 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14941 if ((val
& 0xffff) == sram_dma_descs
) {
14952 #define TEST_BUFFER_SIZE 0x2000
14954 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14955 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14959 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14961 dma_addr_t buf_dma
;
14962 u32
*buf
, saved_dma_rwctrl
;
14965 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14966 &buf_dma
, GFP_KERNEL
);
14972 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14973 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14975 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14977 if (tg3_flag(tp
, 57765_PLUS
))
14980 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14981 /* DMA read watermark not used on PCIE */
14982 tp
->dma_rwctrl
|= 0x00180000;
14983 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14984 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14985 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14986 tp
->dma_rwctrl
|= 0x003f0000;
14988 tp
->dma_rwctrl
|= 0x003f000f;
14990 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14991 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14992 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14993 u32 read_water
= 0x7;
14995 /* If the 5704 is behind the EPB bridge, we can
14996 * do the less restrictive ONE_DMA workaround for
14997 * better performance.
14999 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
15000 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15001 tp
->dma_rwctrl
|= 0x8000;
15002 else if (ccval
== 0x6 || ccval
== 0x7)
15003 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
15005 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
15007 /* Set bit 23 to enable PCIX hw bug fix */
15009 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
15010 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
15012 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
15013 /* 5780 always in PCIX mode */
15014 tp
->dma_rwctrl
|= 0x00144000;
15015 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
15016 /* 5714 always in PCIX mode */
15017 tp
->dma_rwctrl
|= 0x00148000;
15019 tp
->dma_rwctrl
|= 0x001b000f;
15023 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15024 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15025 tp
->dma_rwctrl
&= 0xfffffff0;
15027 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15028 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
15029 /* Remove this if it causes problems for some boards. */
15030 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
15032 /* On 5700/5701 chips, we need to set this bit.
15033 * Otherwise the chip will issue cacheline transactions
15034 * to streamable DMA memory with not all the byte
15035 * enables turned on. This is an error on several
15036 * RISC PCI controllers, in particular sparc64.
15038 * On 5703/5704 chips, this bit has been reassigned
15039 * a different meaning. In particular, it is used
15040 * on those chips to enable a PCI-X workaround.
15042 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15045 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15048 /* Unneeded, already done by tg3_get_invariants. */
15049 tg3_switch_clocks(tp
);
15052 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15053 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15056 /* It is best to perform DMA test with maximum write burst size
15057 * to expose the 5700/5701 write DMA bug.
15059 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15060 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15061 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15066 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15069 /* Send the buffer to the chip. */
15070 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15072 dev_err(&tp
->pdev
->dev
,
15073 "%s: Buffer write failed. err = %d\n",
15079 /* validate data reached card RAM correctly. */
15080 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15082 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15083 if (le32_to_cpu(val
) != p
[i
]) {
15084 dev_err(&tp
->pdev
->dev
,
15085 "%s: Buffer corrupted on device! "
15086 "(%d != %d)\n", __func__
, val
, i
);
15087 /* ret = -ENODEV here? */
15092 /* Now read it back. */
15093 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15095 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15096 "err = %d\n", __func__
, ret
);
15101 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15105 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15106 DMA_RWCTRL_WRITE_BNDRY_16
) {
15107 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15108 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15109 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15112 dev_err(&tp
->pdev
->dev
,
15113 "%s: Buffer corrupted on read back! "
15114 "(%d != %d)\n", __func__
, p
[i
], i
);
15120 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15126 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15127 DMA_RWCTRL_WRITE_BNDRY_16
) {
15128 /* DMA test passed without adjusting DMA boundary,
15129 * now look for chipsets that are known to expose the
15130 * DMA bug without failing the test.
15132 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15133 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15134 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15136 /* Safe to use the calculated DMA boundary. */
15137 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15140 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15144 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15149 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15151 if (tg3_flag(tp
, 57765_PLUS
)) {
15152 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15153 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15154 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15155 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15156 tp
->bufmgr_config
.mbuf_high_water
=
15157 DEFAULT_MB_HIGH_WATER_57765
;
15159 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15160 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15161 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15162 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15163 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15164 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15165 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15166 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15167 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15168 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15169 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15170 tp
->bufmgr_config
.mbuf_high_water
=
15171 DEFAULT_MB_HIGH_WATER_5705
;
15172 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15173 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15174 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15175 tp
->bufmgr_config
.mbuf_high_water
=
15176 DEFAULT_MB_HIGH_WATER_5906
;
15179 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15180 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15181 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15182 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15183 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15184 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15186 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15187 DEFAULT_MB_RDMA_LOW_WATER
;
15188 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15189 DEFAULT_MB_MACRX_LOW_WATER
;
15190 tp
->bufmgr_config
.mbuf_high_water
=
15191 DEFAULT_MB_HIGH_WATER
;
15193 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15194 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15195 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15196 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15197 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15198 DEFAULT_MB_HIGH_WATER_JUMBO
;
15201 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15202 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15205 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15207 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15208 case TG3_PHY_ID_BCM5400
: return "5400";
15209 case TG3_PHY_ID_BCM5401
: return "5401";
15210 case TG3_PHY_ID_BCM5411
: return "5411";
15211 case TG3_PHY_ID_BCM5701
: return "5701";
15212 case TG3_PHY_ID_BCM5703
: return "5703";
15213 case TG3_PHY_ID_BCM5704
: return "5704";
15214 case TG3_PHY_ID_BCM5705
: return "5705";
15215 case TG3_PHY_ID_BCM5750
: return "5750";
15216 case TG3_PHY_ID_BCM5752
: return "5752";
15217 case TG3_PHY_ID_BCM5714
: return "5714";
15218 case TG3_PHY_ID_BCM5780
: return "5780";
15219 case TG3_PHY_ID_BCM5755
: return "5755";
15220 case TG3_PHY_ID_BCM5787
: return "5787";
15221 case TG3_PHY_ID_BCM5784
: return "5784";
15222 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15223 case TG3_PHY_ID_BCM5906
: return "5906";
15224 case TG3_PHY_ID_BCM5761
: return "5761";
15225 case TG3_PHY_ID_BCM5718C
: return "5718C";
15226 case TG3_PHY_ID_BCM5718S
: return "5718S";
15227 case TG3_PHY_ID_BCM57765
: return "57765";
15228 case TG3_PHY_ID_BCM5719C
: return "5719C";
15229 case TG3_PHY_ID_BCM5720C
: return "5720C";
15230 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15231 case 0: return "serdes";
15232 default: return "unknown";
15236 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15238 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15239 strcpy(str
, "PCI Express");
15241 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15242 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15244 strcpy(str
, "PCIX:");
15246 if ((clock_ctrl
== 7) ||
15247 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15248 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15249 strcat(str
, "133MHz");
15250 else if (clock_ctrl
== 0)
15251 strcat(str
, "33MHz");
15252 else if (clock_ctrl
== 2)
15253 strcat(str
, "50MHz");
15254 else if (clock_ctrl
== 4)
15255 strcat(str
, "66MHz");
15256 else if (clock_ctrl
== 6)
15257 strcat(str
, "100MHz");
15259 strcpy(str
, "PCI:");
15260 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15261 strcat(str
, "66MHz");
15263 strcat(str
, "33MHz");
15265 if (tg3_flag(tp
, PCI_32BIT
))
15266 strcat(str
, ":32-bit");
15268 strcat(str
, ":64-bit");
15272 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15274 struct pci_dev
*peer
;
15275 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15277 for (func
= 0; func
< 8; func
++) {
15278 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15279 if (peer
&& peer
!= tp
->pdev
)
15283 /* 5704 can be configured in single-port mode, set peer to
15284 * tp->pdev in that case.
15292 * We don't need to keep the refcount elevated; there's no way
15293 * to remove one half of this device without removing the other
15300 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15302 struct ethtool_coalesce
*ec
= &tp
->coal
;
15304 memset(ec
, 0, sizeof(*ec
));
15305 ec
->cmd
= ETHTOOL_GCOALESCE
;
15306 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15307 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15308 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15309 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15310 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15311 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15312 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15313 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15314 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15316 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15317 HOSTCC_MODE_CLRTICK_TXBD
)) {
15318 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15319 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15320 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15321 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15324 if (tg3_flag(tp
, 5705_PLUS
)) {
15325 ec
->rx_coalesce_usecs_irq
= 0;
15326 ec
->tx_coalesce_usecs_irq
= 0;
15327 ec
->stats_block_coalesce_usecs
= 0;
15331 static const struct net_device_ops tg3_netdev_ops
= {
15332 .ndo_open
= tg3_open
,
15333 .ndo_stop
= tg3_close
,
15334 .ndo_start_xmit
= tg3_start_xmit
,
15335 .ndo_get_stats64
= tg3_get_stats64
,
15336 .ndo_validate_addr
= eth_validate_addr
,
15337 .ndo_set_rx_mode
= tg3_set_rx_mode
,
15338 .ndo_set_mac_address
= tg3_set_mac_addr
,
15339 .ndo_do_ioctl
= tg3_ioctl
,
15340 .ndo_tx_timeout
= tg3_tx_timeout
,
15341 .ndo_change_mtu
= tg3_change_mtu
,
15342 .ndo_fix_features
= tg3_fix_features
,
15343 .ndo_set_features
= tg3_set_features
,
15344 #ifdef CONFIG_NET_POLL_CONTROLLER
15345 .ndo_poll_controller
= tg3_poll_controller
,
15349 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15350 const struct pci_device_id
*ent
)
15352 struct net_device
*dev
;
15354 int i
, err
, pm_cap
;
15355 u32 sndmbx
, rcvmbx
, intmbx
;
15357 u64 dma_mask
, persist_dma_mask
;
15360 printk_once(KERN_INFO
"%s\n", version
);
15362 err
= pci_enable_device(pdev
);
15364 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15368 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15370 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15371 goto err_out_disable_pdev
;
15374 pci_set_master(pdev
);
15376 /* Find power-management capability. */
15377 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15379 dev_err(&pdev
->dev
,
15380 "Cannot find Power Management capability, aborting\n");
15382 goto err_out_free_res
;
15385 err
= pci_set_power_state(pdev
, PCI_D0
);
15387 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15388 goto err_out_free_res
;
15391 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15393 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15395 goto err_out_power_down
;
15398 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15400 tp
= netdev_priv(dev
);
15403 tp
->pm_cap
= pm_cap
;
15404 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15405 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15409 tp
->msg_enable
= tg3_debug
;
15411 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15413 /* The word/byte swap controls here control register access byte
15414 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15417 tp
->misc_host_ctrl
=
15418 MISC_HOST_CTRL_MASK_PCI_INT
|
15419 MISC_HOST_CTRL_WORD_SWAP
|
15420 MISC_HOST_CTRL_INDIR_ACCESS
|
15421 MISC_HOST_CTRL_PCISTATE_RW
;
15423 /* The NONFRM (non-frame) byte/word swap controls take effect
15424 * on descriptor entries, anything which isn't packet data.
15426 * The StrongARM chips on the board (one for tx, one for rx)
15427 * are running in big-endian mode.
15429 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15430 GRC_MODE_WSWAP_NONFRM_DATA
);
15431 #ifdef __BIG_ENDIAN
15432 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15434 spin_lock_init(&tp
->lock
);
15435 spin_lock_init(&tp
->indirect_lock
);
15436 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15438 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15440 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15442 goto err_out_free_dev
;
15445 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15446 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15447 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15448 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15449 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15450 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15451 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15452 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15453 tg3_flag_set(tp
, ENABLE_APE
);
15454 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15455 if (!tp
->aperegs
) {
15456 dev_err(&pdev
->dev
,
15457 "Cannot map APE registers, aborting\n");
15459 goto err_out_iounmap
;
15463 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15464 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15466 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15467 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15468 dev
->netdev_ops
= &tg3_netdev_ops
;
15469 dev
->irq
= pdev
->irq
;
15471 err
= tg3_get_invariants(tp
);
15473 dev_err(&pdev
->dev
,
15474 "Problem fetching invariants of chip, aborting\n");
15475 goto err_out_apeunmap
;
15478 /* The EPB bridge inside 5714, 5715, and 5780 and any
15479 * device behind the EPB cannot support DMA addresses > 40-bit.
15480 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15481 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15482 * do DMA address check in tg3_start_xmit().
15484 if (tg3_flag(tp
, IS_5788
))
15485 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15486 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15487 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15488 #ifdef CONFIG_HIGHMEM
15489 dma_mask
= DMA_BIT_MASK(64);
15492 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15494 /* Configure DMA attributes. */
15495 if (dma_mask
> DMA_BIT_MASK(32)) {
15496 err
= pci_set_dma_mask(pdev
, dma_mask
);
15498 features
|= NETIF_F_HIGHDMA
;
15499 err
= pci_set_consistent_dma_mask(pdev
,
15502 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15503 "DMA for consistent allocations\n");
15504 goto err_out_apeunmap
;
15508 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15509 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15511 dev_err(&pdev
->dev
,
15512 "No usable DMA configuration, aborting\n");
15513 goto err_out_apeunmap
;
15517 tg3_init_bufmgr_config(tp
);
15519 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15521 /* 5700 B0 chips do not support checksumming correctly due
15522 * to hardware bugs.
15524 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15525 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15527 if (tg3_flag(tp
, 5755_PLUS
))
15528 features
|= NETIF_F_IPV6_CSUM
;
15531 /* TSO is on by default on chips that support hardware TSO.
15532 * Firmware TSO on older chips gives lower performance, so it
15533 * is off by default, but can be enabled using ethtool.
15535 if ((tg3_flag(tp
, HW_TSO_1
) ||
15536 tg3_flag(tp
, HW_TSO_2
) ||
15537 tg3_flag(tp
, HW_TSO_3
)) &&
15538 (features
& NETIF_F_IP_CSUM
))
15539 features
|= NETIF_F_TSO
;
15540 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15541 if (features
& NETIF_F_IPV6_CSUM
)
15542 features
|= NETIF_F_TSO6
;
15543 if (tg3_flag(tp
, HW_TSO_3
) ||
15544 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15545 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15546 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15547 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15548 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15549 features
|= NETIF_F_TSO_ECN
;
15552 dev
->features
|= features
;
15553 dev
->vlan_features
|= features
;
15556 * Add loopback capability only for a subset of devices that support
15557 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15558 * loopback for the remaining devices.
15560 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15561 !tg3_flag(tp
, CPMU_PRESENT
))
15562 /* Add the loopback capability */
15563 features
|= NETIF_F_LOOPBACK
;
15565 dev
->hw_features
|= features
;
15567 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15568 !tg3_flag(tp
, TSO_CAPABLE
) &&
15569 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15570 tg3_flag_set(tp
, MAX_RXPEND_64
);
15571 tp
->rx_pending
= 63;
15574 err
= tg3_get_device_address(tp
);
15576 dev_err(&pdev
->dev
,
15577 "Could not obtain valid ethernet address, aborting\n");
15578 goto err_out_apeunmap
;
15582 * Reset chip in case UNDI or EFI driver did not shutdown
15583 * DMA self test will enable WDMAC and we'll see (spurious)
15584 * pending DMA on the PCI bus at that point.
15586 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15587 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15588 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15589 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15592 err
= tg3_test_dma(tp
);
15594 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15595 goto err_out_apeunmap
;
15598 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15599 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15600 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15601 for (i
= 0; i
< tp
->irq_max
; i
++) {
15602 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15605 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15607 tnapi
->int_mbox
= intmbx
;
15613 tnapi
->consmbox
= rcvmbx
;
15614 tnapi
->prodmbox
= sndmbx
;
15617 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15619 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15621 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15625 * If we support MSIX, we'll be using RSS. If we're using
15626 * RSS, the first vector only handles link interrupts and the
15627 * remaining vectors handle rx and tx interrupts. Reuse the
15628 * mailbox values for the next iteration. The values we setup
15629 * above are still useful for the single vectored mode.
15644 pci_set_drvdata(pdev
, dev
);
15646 if (tg3_flag(tp
, 5717_PLUS
)) {
15647 /* Resume a low-power mode */
15648 tg3_frob_aux_power(tp
, false);
15651 err
= register_netdev(dev
);
15653 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15654 goto err_out_apeunmap
;
15657 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15658 tp
->board_part_number
,
15659 tp
->pci_chip_rev_id
,
15660 tg3_bus_string(tp
, str
),
15663 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15664 struct phy_device
*phydev
;
15665 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15667 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15668 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15672 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15673 ethtype
= "10/100Base-TX";
15674 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15675 ethtype
= "1000Base-SX";
15677 ethtype
= "10/100/1000Base-T";
15679 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15680 "(WireSpeed[%d], EEE[%d])\n",
15681 tg3_phy_string(tp
), ethtype
,
15682 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15683 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15686 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15687 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15688 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15689 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15690 tg3_flag(tp
, ENABLE_ASF
) != 0,
15691 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15692 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15694 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15695 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15697 pci_save_state(pdev
);
15703 iounmap(tp
->aperegs
);
15704 tp
->aperegs
= NULL
;
15716 err_out_power_down
:
15717 pci_set_power_state(pdev
, PCI_D3hot
);
15720 pci_release_regions(pdev
);
15722 err_out_disable_pdev
:
15723 pci_disable_device(pdev
);
15724 pci_set_drvdata(pdev
, NULL
);
15728 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15730 struct net_device
*dev
= pci_get_drvdata(pdev
);
15733 struct tg3
*tp
= netdev_priv(dev
);
15736 release_firmware(tp
->fw
);
15738 tg3_reset_task_cancel(tp
);
15740 if (tg3_flag(tp
, USE_PHYLIB
)) {
15745 unregister_netdev(dev
);
15747 iounmap(tp
->aperegs
);
15748 tp
->aperegs
= NULL
;
15755 pci_release_regions(pdev
);
15756 pci_disable_device(pdev
);
15757 pci_set_drvdata(pdev
, NULL
);
15761 #ifdef CONFIG_PM_SLEEP
15762 static int tg3_suspend(struct device
*device
)
15764 struct pci_dev
*pdev
= to_pci_dev(device
);
15765 struct net_device
*dev
= pci_get_drvdata(pdev
);
15766 struct tg3
*tp
= netdev_priv(dev
);
15769 if (!netif_running(dev
))
15772 tg3_reset_task_cancel(tp
);
15774 tg3_netif_stop(tp
);
15776 del_timer_sync(&tp
->timer
);
15778 tg3_full_lock(tp
, 1);
15779 tg3_disable_ints(tp
);
15780 tg3_full_unlock(tp
);
15782 netif_device_detach(dev
);
15784 tg3_full_lock(tp
, 0);
15785 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15786 tg3_flag_clear(tp
, INIT_COMPLETE
);
15787 tg3_full_unlock(tp
);
15789 err
= tg3_power_down_prepare(tp
);
15793 tg3_full_lock(tp
, 0);
15795 tg3_flag_set(tp
, INIT_COMPLETE
);
15796 err2
= tg3_restart_hw(tp
, 1);
15800 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15801 add_timer(&tp
->timer
);
15803 netif_device_attach(dev
);
15804 tg3_netif_start(tp
);
15807 tg3_full_unlock(tp
);
15816 static int tg3_resume(struct device
*device
)
15818 struct pci_dev
*pdev
= to_pci_dev(device
);
15819 struct net_device
*dev
= pci_get_drvdata(pdev
);
15820 struct tg3
*tp
= netdev_priv(dev
);
15823 if (!netif_running(dev
))
15826 netif_device_attach(dev
);
15828 tg3_full_lock(tp
, 0);
15830 tg3_flag_set(tp
, INIT_COMPLETE
);
15831 err
= tg3_restart_hw(tp
, 1);
15835 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15836 add_timer(&tp
->timer
);
15838 tg3_netif_start(tp
);
15841 tg3_full_unlock(tp
);
15849 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15850 #define TG3_PM_OPS (&tg3_pm_ops)
15854 #define TG3_PM_OPS NULL
15856 #endif /* CONFIG_PM_SLEEP */
15859 * tg3_io_error_detected - called when PCI error is detected
15860 * @pdev: Pointer to PCI device
15861 * @state: The current pci connection state
15863 * This function is called after a PCI bus error affecting
15864 * this device has been detected.
15866 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15867 pci_channel_state_t state
)
15869 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15870 struct tg3
*tp
= netdev_priv(netdev
);
15871 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15873 netdev_info(netdev
, "PCI I/O error detected\n");
15877 if (!netif_running(netdev
))
15882 tg3_netif_stop(tp
);
15884 del_timer_sync(&tp
->timer
);
15886 /* Want to make sure that the reset task doesn't run */
15887 tg3_reset_task_cancel(tp
);
15888 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15890 netif_device_detach(netdev
);
15892 /* Clean up software state, even if MMIO is blocked */
15893 tg3_full_lock(tp
, 0);
15894 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15895 tg3_full_unlock(tp
);
15898 if (state
== pci_channel_io_perm_failure
)
15899 err
= PCI_ERS_RESULT_DISCONNECT
;
15901 pci_disable_device(pdev
);
15909 * tg3_io_slot_reset - called after the pci bus has been reset.
15910 * @pdev: Pointer to PCI device
15912 * Restart the card from scratch, as if from a cold-boot.
15913 * At this point, the card has exprienced a hard reset,
15914 * followed by fixups by BIOS, and has its config space
15915 * set up identically to what it was at cold boot.
15917 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15919 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15920 struct tg3
*tp
= netdev_priv(netdev
);
15921 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15926 if (pci_enable_device(pdev
)) {
15927 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15931 pci_set_master(pdev
);
15932 pci_restore_state(pdev
);
15933 pci_save_state(pdev
);
15935 if (!netif_running(netdev
)) {
15936 rc
= PCI_ERS_RESULT_RECOVERED
;
15940 err
= tg3_power_up(tp
);
15944 rc
= PCI_ERS_RESULT_RECOVERED
;
15953 * tg3_io_resume - called when traffic can start flowing again.
15954 * @pdev: Pointer to PCI device
15956 * This callback is called when the error recovery driver tells
15957 * us that its OK to resume normal operation.
15959 static void tg3_io_resume(struct pci_dev
*pdev
)
15961 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15962 struct tg3
*tp
= netdev_priv(netdev
);
15967 if (!netif_running(netdev
))
15970 tg3_full_lock(tp
, 0);
15971 tg3_flag_set(tp
, INIT_COMPLETE
);
15972 err
= tg3_restart_hw(tp
, 1);
15973 tg3_full_unlock(tp
);
15975 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15979 netif_device_attach(netdev
);
15981 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15982 add_timer(&tp
->timer
);
15984 tg3_netif_start(tp
);
15992 static struct pci_error_handlers tg3_err_handler
= {
15993 .error_detected
= tg3_io_error_detected
,
15994 .slot_reset
= tg3_io_slot_reset
,
15995 .resume
= tg3_io_resume
15998 static struct pci_driver tg3_driver
= {
15999 .name
= DRV_MODULE_NAME
,
16000 .id_table
= tg3_pci_tbl
,
16001 .probe
= tg3_init_one
,
16002 .remove
= __devexit_p(tg3_remove_one
),
16003 .err_handler
= &tg3_err_handler
,
16004 .driver
.pm
= TG3_PM_OPS
,
16007 static int __init
tg3_init(void)
16009 return pci_register_driver(&tg3_driver
);
16012 static void __exit
tg3_cleanup(void)
16014 pci_unregister_driver(&tg3_driver
);
16017 module_init(tg3_init
);
16018 module_exit(tg3_cleanup
);