2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 121
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "November 2, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) 0
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version
[] __devinitdata
=
213 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION
);
219 MODULE_FIRMWARE(FIRMWARE_TG3
);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
223 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug
, int, 0);
225 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
314 static const struct {
315 const char string
[ETH_GSTRING_LEN
];
316 } ethtool_stats_keys
[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string
[ETH_GSTRING_LEN
];
402 } ethtool_test_keys
[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
418 writel(val
, tp
->regs
+ off
);
421 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
423 return readl(tp
->regs
+ off
);
426 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
428 writel(val
, tp
->aperegs
+ off
);
431 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
433 return readl(tp
->aperegs
+ off
);
436 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
440 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
441 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
443 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
446 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
448 writel(val
, tp
->regs
+ off
);
449 readl(tp
->regs
+ off
);
452 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
457 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
458 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
459 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
460 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
464 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
468 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
469 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
470 TG3_64BIT_REG_LOW
, val
);
473 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
475 TG3_64BIT_REG_LOW
, val
);
479 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
480 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
482 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
490 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
494 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
499 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
500 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
501 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
502 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
513 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
514 /* Non-posted methods */
515 tp
->write32(tp
, off
, val
);
518 tg3_write32(tp
, off
, val
);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
532 tp
->write32_mbox(tp
, off
, val
);
533 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
534 tp
->read32_mbox(tp
, off
);
537 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
539 void __iomem
*mbox
= tp
->regs
+ off
;
541 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
543 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
547 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
549 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
552 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
554 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
572 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
573 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
576 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
577 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
578 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
579 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
585 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
590 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
593 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
597 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
598 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
603 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
604 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
605 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
606 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
612 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
617 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
620 static void tg3_ape_lock_init(struct tg3
*tp
)
625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
626 regbase
= TG3_APE_LOCK_GRANT
;
628 regbase
= TG3_APE_PER_LOCK_GRANT
;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
633 case TG3_APE_LOCK_PHY0
:
634 case TG3_APE_LOCK_PHY1
:
635 case TG3_APE_LOCK_PHY2
:
636 case TG3_APE_LOCK_PHY3
:
637 bit
= APE_LOCK_GRANT_DRIVER
;
641 bit
= APE_LOCK_GRANT_DRIVER
;
643 bit
= 1 << tp
->pci_fn
;
645 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
650 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
654 u32 status
, req
, gnt
, bit
;
656 if (!tg3_flag(tp
, ENABLE_APE
))
660 case TG3_APE_LOCK_GPIO
:
661 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
663 case TG3_APE_LOCK_GRC
:
664 case TG3_APE_LOCK_MEM
:
666 bit
= APE_LOCK_REQ_DRIVER
;
668 bit
= 1 << tp
->pci_fn
;
674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
675 req
= TG3_APE_LOCK_REQ
;
676 gnt
= TG3_APE_LOCK_GRANT
;
678 req
= TG3_APE_PER_LOCK_REQ
;
679 gnt
= TG3_APE_PER_LOCK_GRANT
;
684 tg3_ape_write32(tp
, req
+ off
, bit
);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i
= 0; i
< 100; i
++) {
688 status
= tg3_ape_read32(tp
, gnt
+ off
);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp
, gnt
+ off
, bit
);
703 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
707 if (!tg3_flag(tp
, ENABLE_APE
))
711 case TG3_APE_LOCK_GPIO
:
712 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
714 case TG3_APE_LOCK_GRC
:
715 case TG3_APE_LOCK_MEM
:
717 bit
= APE_LOCK_GRANT_DRIVER
;
719 bit
= 1 << tp
->pci_fn
;
725 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
726 gnt
= TG3_APE_LOCK_GRANT
;
728 gnt
= TG3_APE_PER_LOCK_GRANT
;
730 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
733 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp
, APE_HAS_NCSI
))
742 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
743 if (apedata
!= APE_SEG_SIG_MAGIC
)
746 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
747 if (!(apedata
& APE_FW_STATUS_READY
))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i
= 0; i
< 10; i
++) {
752 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
755 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
757 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
758 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
759 event
| APE_EVENT_STATUS_EVENT_PENDING
);
761 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
763 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
769 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
770 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
773 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
778 if (!tg3_flag(tp
, ENABLE_APE
))
782 case RESET_KIND_INIT
:
783 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
784 APE_HOST_SEG_SIG_MAGIC
);
785 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
786 APE_HOST_SEG_LEN_MAGIC
);
787 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
788 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
789 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
791 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
792 APE_HOST_BEHAV_NO_PHYLOCK
);
793 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
794 TG3_APE_HOST_DRVR_STATE_START
);
796 event
= APE_EVENT_STATUS_STATE_START
;
798 case RESET_KIND_SHUTDOWN
:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
806 if (device_may_wakeup(&tp
->pdev
->dev
) &&
807 tg3_flag(tp
, WOL_ENABLE
)) {
808 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
809 TG3_APE_HOST_WOL_SPEED_AUTO
);
810 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
812 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
814 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
816 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
818 case RESET_KIND_SUSPEND
:
819 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
825 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
827 tg3_ape_send_event(tp
, event
);
830 static void tg3_disable_ints(struct tg3
*tp
)
834 tw32(TG3PCI_MISC_HOST_CTRL
,
835 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
836 for (i
= 0; i
< tp
->irq_max
; i
++)
837 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
840 static void tg3_enable_ints(struct tg3
*tp
)
847 tw32(TG3PCI_MISC_HOST_CTRL
,
848 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
850 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
851 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
852 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
854 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
855 if (tg3_flag(tp
, 1SHOT_MSI
))
856 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
858 tp
->coal_now
|= tnapi
->coal_now
;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
863 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
864 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
866 tw32(HOSTCC_MODE
, tp
->coal_now
);
868 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
871 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
873 struct tg3
*tp
= tnapi
->tp
;
874 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
875 unsigned int work_exists
= 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
879 if (sblk
->status
& SD_STATUS_LINK_CHG
)
882 /* check for RX/TX work to do */
883 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
884 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
895 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
897 struct tg3
*tp
= tnapi
->tp
;
899 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
906 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
907 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
908 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
911 static void tg3_switch_clocks(struct tg3
*tp
)
916 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
919 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
921 orig_clock_ctrl
= clock_ctrl
;
922 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
923 CLOCK_CTRL_CLKRUN_OENABLE
|
925 tp
->pci_clock_ctrl
= clock_ctrl
;
927 if (tg3_flag(tp
, 5705_PLUS
)) {
928 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
930 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
932 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
935 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
937 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
938 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
941 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
944 #define PHY_BUSY_LOOPS 5000
946 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
952 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
954 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
960 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
961 MI_COM_PHY_ADDR_MASK
);
962 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
963 MI_COM_REG_ADDR_MASK
);
964 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
966 tw32_f(MAC_MI_COM
, frame_val
);
968 loops
= PHY_BUSY_LOOPS
;
971 frame_val
= tr32(MAC_MI_COM
);
973 if ((frame_val
& MI_COM_BUSY
) == 0) {
975 frame_val
= tr32(MAC_MI_COM
);
983 *val
= frame_val
& MI_COM_DATA_MASK
;
987 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
988 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
995 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1001 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1002 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1005 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1007 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1011 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1012 MI_COM_PHY_ADDR_MASK
);
1013 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1014 MI_COM_REG_ADDR_MASK
);
1015 frame_val
|= (val
& MI_COM_DATA_MASK
);
1016 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1018 tw32_f(MAC_MI_COM
, frame_val
);
1020 loops
= PHY_BUSY_LOOPS
;
1021 while (loops
!= 0) {
1023 frame_val
= tr32(MAC_MI_COM
);
1024 if ((frame_val
& MI_COM_BUSY
) == 0) {
1026 frame_val
= tr32(MAC_MI_COM
);
1036 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1037 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1044 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1048 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1052 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1056 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1057 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1061 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1067 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1071 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1075 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1079 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1080 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1084 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1090 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1094 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1096 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1101 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1105 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1107 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1112 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1116 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1117 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1120 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1125 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1127 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1128 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1130 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1142 static int tg3_bmcr_reset(struct tg3
*tp
)
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1150 phy_control
= BMCR_RESET
;
1151 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1157 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1161 if ((phy_control
& BMCR_RESET
) == 0) {
1173 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1175 struct tg3
*tp
= bp
->priv
;
1178 spin_lock_bh(&tp
->lock
);
1180 if (tg3_readphy(tp
, reg
, &val
))
1183 spin_unlock_bh(&tp
->lock
);
1188 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1190 struct tg3
*tp
= bp
->priv
;
1193 spin_lock_bh(&tp
->lock
);
1195 if (tg3_writephy(tp
, reg
, val
))
1198 spin_unlock_bh(&tp
->lock
);
1203 static int tg3_mdio_reset(struct mii_bus
*bp
)
1208 static void tg3_mdio_config_5785(struct tg3
*tp
)
1211 struct phy_device
*phydev
;
1213 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1214 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1215 case PHY_ID_BCM50610
:
1216 case PHY_ID_BCM50610M
:
1217 val
= MAC_PHYCFG2_50610_LED_MODES
;
1219 case PHY_ID_BCMAC131
:
1220 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1222 case PHY_ID_RTL8211C
:
1223 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1225 case PHY_ID_RTL8201E
:
1226 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1232 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1233 tw32(MAC_PHYCFG2
, val
);
1235 val
= tr32(MAC_PHYCFG1
);
1236 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1237 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1238 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1239 tw32(MAC_PHYCFG1
, val
);
1244 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1245 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1246 MAC_PHYCFG2_FMODE_MASK_MASK
|
1247 MAC_PHYCFG2_GMODE_MASK_MASK
|
1248 MAC_PHYCFG2_ACT_MASK_MASK
|
1249 MAC_PHYCFG2_QUAL_MASK_MASK
|
1250 MAC_PHYCFG2_INBAND_ENABLE
;
1252 tw32(MAC_PHYCFG2
, val
);
1254 val
= tr32(MAC_PHYCFG1
);
1255 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1257 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1258 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1259 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1260 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1261 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1263 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1264 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1265 tw32(MAC_PHYCFG1
, val
);
1267 val
= tr32(MAC_EXT_RGMII_MODE
);
1268 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1269 MAC_RGMII_MODE_RX_QUALITY
|
1270 MAC_RGMII_MODE_RX_ACTIVITY
|
1271 MAC_RGMII_MODE_RX_ENG_DET
|
1272 MAC_RGMII_MODE_TX_ENABLE
|
1273 MAC_RGMII_MODE_TX_LOWPWR
|
1274 MAC_RGMII_MODE_TX_RESET
);
1275 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1276 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1277 val
|= MAC_RGMII_MODE_RX_INT_B
|
1278 MAC_RGMII_MODE_RX_QUALITY
|
1279 MAC_RGMII_MODE_RX_ACTIVITY
|
1280 MAC_RGMII_MODE_RX_ENG_DET
;
1281 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1282 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1283 MAC_RGMII_MODE_TX_LOWPWR
|
1284 MAC_RGMII_MODE_TX_RESET
;
1286 tw32(MAC_EXT_RGMII_MODE
, val
);
1289 static void tg3_mdio_start(struct tg3
*tp
)
1291 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1292 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1295 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1296 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1297 tg3_mdio_config_5785(tp
);
1300 static int tg3_mdio_init(struct tg3
*tp
)
1304 struct phy_device
*phydev
;
1306 if (tg3_flag(tp
, 5717_PLUS
)) {
1309 tp
->phy_addr
= tp
->pci_fn
+ 1;
1311 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1312 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1314 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1319 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1323 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1326 tp
->mdio_bus
= mdiobus_alloc();
1327 if (tp
->mdio_bus
== NULL
)
1330 tp
->mdio_bus
->name
= "tg3 mdio bus";
1331 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1332 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1333 tp
->mdio_bus
->priv
= tp
;
1334 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1335 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1336 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1337 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1338 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1339 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1341 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1342 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1349 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1352 i
= mdiobus_register(tp
->mdio_bus
);
1354 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1355 mdiobus_free(tp
->mdio_bus
);
1359 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1361 if (!phydev
|| !phydev
->drv
) {
1362 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1363 mdiobus_unregister(tp
->mdio_bus
);
1364 mdiobus_free(tp
->mdio_bus
);
1368 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1369 case PHY_ID_BCM57780
:
1370 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1371 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1373 case PHY_ID_BCM50610
:
1374 case PHY_ID_BCM50610M
:
1375 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1376 PHY_BRCM_RX_REFCLK_UNUSED
|
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1379 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1380 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1381 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1382 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1383 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1384 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1386 case PHY_ID_RTL8211C
:
1387 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1389 case PHY_ID_RTL8201E
:
1390 case PHY_ID_BCMAC131
:
1391 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1392 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1393 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1397 tg3_flag_set(tp
, MDIOBUS_INITED
);
1399 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1400 tg3_mdio_config_5785(tp
);
1405 static void tg3_mdio_fini(struct tg3
*tp
)
1407 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1408 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1409 mdiobus_unregister(tp
->mdio_bus
);
1410 mdiobus_free(tp
->mdio_bus
);
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1419 val
= tr32(GRC_RX_CPU_EVENT
);
1420 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1421 tw32_f(GRC_RX_CPU_EVENT
, val
);
1423 tp
->last_event_jiffies
= jiffies
;
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1432 unsigned int delay_cnt
;
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1439 if (time_remain
< 0)
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt
= jiffies_to_usecs(time_remain
);
1444 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1445 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1446 delay_cnt
= (delay_cnt
>> 3) + 1;
1448 for (i
= 0; i
< delay_cnt
; i
++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3
*tp
)
1461 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1464 tg3_wait_for_event_ack(tp
);
1466 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1468 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1471 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1473 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1474 val
|= (reg
& 0xffff);
1475 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1478 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1480 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1481 val
|= (reg
& 0xffff);
1482 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1485 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1486 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1488 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1489 val
|= (reg
& 0xffff);
1491 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1493 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1497 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1499 tg3_generate_fw_event(tp
);
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3
*tp
)
1505 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1506 /* Wait for RX cpu to ACK the previous event. */
1507 tg3_wait_for_event_ack(tp
);
1509 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1511 tg3_generate_fw_event(tp
);
1513 /* Wait for RX cpu to ACK this event. */
1514 tg3_wait_for_event_ack(tp
);
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1521 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1522 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1524 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1526 case RESET_KIND_INIT
:
1527 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1531 case RESET_KIND_SHUTDOWN
:
1532 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1536 case RESET_KIND_SUSPEND
:
1537 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1546 if (kind
== RESET_KIND_INIT
||
1547 kind
== RESET_KIND_SUSPEND
)
1548 tg3_ape_driver_state_change(tp
, kind
);
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1554 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1556 case RESET_KIND_INIT
:
1557 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1558 DRV_STATE_START_DONE
);
1561 case RESET_KIND_SHUTDOWN
:
1562 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1563 DRV_STATE_UNLOAD_DONE
);
1571 if (kind
== RESET_KIND_SHUTDOWN
)
1572 tg3_ape_driver_state_change(tp
, kind
);
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1578 if (tg3_flag(tp
, ENABLE_ASF
)) {
1580 case RESET_KIND_INIT
:
1581 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1585 case RESET_KIND_SHUTDOWN
:
1586 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1590 case RESET_KIND_SUSPEND
:
1591 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1601 static int tg3_poll_fw(struct tg3
*tp
)
1606 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1607 /* Wait up to 20ms for init done. */
1608 for (i
= 0; i
< 200; i
++) {
1609 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1616 /* Wait for firmware initialization to complete. */
1617 for (i
= 0; i
< 100000; i
++) {
1618 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1619 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1624 /* Chip might not be fitted with firmware. Some Sun onboard
1625 * parts are configured like that. So don't signal the timeout
1626 * of the above loop as an error, but do report the lack of
1627 * running firmware once.
1629 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1630 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1632 netdev_info(tp
->dev
, "No firmware running\n");
1635 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1636 /* The 57765 A0 needs a little more
1637 * time to do some important work.
1645 static void tg3_link_report(struct tg3
*tp
)
1647 if (!netif_carrier_ok(tp
->dev
)) {
1648 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1649 tg3_ump_link_report(tp
);
1650 } else if (netif_msg_link(tp
)) {
1651 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1652 (tp
->link_config
.active_speed
== SPEED_1000
?
1654 (tp
->link_config
.active_speed
== SPEED_100
?
1656 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1659 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1660 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1662 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1665 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1666 netdev_info(tp
->dev
, "EEE is %s\n",
1667 tp
->setlpicnt
? "enabled" : "disabled");
1669 tg3_ump_link_report(tp
);
1673 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1677 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1678 miireg
= ADVERTISE_PAUSE_CAP
;
1679 else if (flow_ctrl
& FLOW_CTRL_TX
)
1680 miireg
= ADVERTISE_PAUSE_ASYM
;
1681 else if (flow_ctrl
& FLOW_CTRL_RX
)
1682 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1689 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1693 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1694 miireg
= ADVERTISE_1000XPAUSE
;
1695 else if (flow_ctrl
& FLOW_CTRL_TX
)
1696 miireg
= ADVERTISE_1000XPSE_ASYM
;
1697 else if (flow_ctrl
& FLOW_CTRL_RX
)
1698 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1705 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1709 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1710 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1711 if (rmtadv
& LPA_1000XPAUSE
)
1712 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1713 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1716 if (rmtadv
& LPA_1000XPAUSE
)
1717 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1719 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1720 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1727 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1731 u32 old_rx_mode
= tp
->rx_mode
;
1732 u32 old_tx_mode
= tp
->tx_mode
;
1734 if (tg3_flag(tp
, USE_PHYLIB
))
1735 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1737 autoneg
= tp
->link_config
.autoneg
;
1739 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1740 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1741 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1743 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1745 flowctrl
= tp
->link_config
.flowctrl
;
1747 tp
->link_config
.active_flowctrl
= flowctrl
;
1749 if (flowctrl
& FLOW_CTRL_RX
)
1750 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1752 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1754 if (old_rx_mode
!= tp
->rx_mode
)
1755 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1757 if (flowctrl
& FLOW_CTRL_TX
)
1758 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1760 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1762 if (old_tx_mode
!= tp
->tx_mode
)
1763 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1766 static void tg3_adjust_link(struct net_device
*dev
)
1768 u8 oldflowctrl
, linkmesg
= 0;
1769 u32 mac_mode
, lcl_adv
, rmt_adv
;
1770 struct tg3
*tp
= netdev_priv(dev
);
1771 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1773 spin_lock_bh(&tp
->lock
);
1775 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1776 MAC_MODE_HALF_DUPLEX
);
1778 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1784 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1785 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1786 else if (phydev
->speed
== SPEED_1000
||
1787 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1788 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1790 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1792 if (phydev
->duplex
== DUPLEX_HALF
)
1793 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1795 lcl_adv
= tg3_advert_flowctrl_1000T(
1796 tp
->link_config
.flowctrl
);
1799 rmt_adv
= LPA_PAUSE_CAP
;
1800 if (phydev
->asym_pause
)
1801 rmt_adv
|= LPA_PAUSE_ASYM
;
1804 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1806 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1808 if (mac_mode
!= tp
->mac_mode
) {
1809 tp
->mac_mode
= mac_mode
;
1810 tw32_f(MAC_MODE
, tp
->mac_mode
);
1814 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1815 if (phydev
->speed
== SPEED_10
)
1817 MAC_MI_STAT_10MBPS_MODE
|
1818 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1820 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1823 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1824 tw32(MAC_TX_LENGTHS
,
1825 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1826 (6 << TX_LENGTHS_IPG_SHIFT
) |
1827 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1829 tw32(MAC_TX_LENGTHS
,
1830 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1831 (6 << TX_LENGTHS_IPG_SHIFT
) |
1832 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1834 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1835 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1836 phydev
->speed
!= tp
->link_config
.active_speed
||
1837 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1838 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1841 tp
->link_config
.active_speed
= phydev
->speed
;
1842 tp
->link_config
.active_duplex
= phydev
->duplex
;
1844 spin_unlock_bh(&tp
->lock
);
1847 tg3_link_report(tp
);
1850 static int tg3_phy_init(struct tg3
*tp
)
1852 struct phy_device
*phydev
;
1854 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1857 /* Bring the PHY back to a known state. */
1860 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1862 /* Attach the MAC to the PHY. */
1863 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1864 phydev
->dev_flags
, phydev
->interface
);
1865 if (IS_ERR(phydev
)) {
1866 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1867 return PTR_ERR(phydev
);
1870 /* Mask with MAC supported features. */
1871 switch (phydev
->interface
) {
1872 case PHY_INTERFACE_MODE_GMII
:
1873 case PHY_INTERFACE_MODE_RGMII
:
1874 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1875 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1877 SUPPORTED_Asym_Pause
);
1881 case PHY_INTERFACE_MODE_MII
:
1882 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1884 SUPPORTED_Asym_Pause
);
1887 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1891 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1893 phydev
->advertising
= phydev
->supported
;
1898 static void tg3_phy_start(struct tg3
*tp
)
1900 struct phy_device
*phydev
;
1902 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1905 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1907 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1908 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1909 phydev
->speed
= tp
->link_config
.orig_speed
;
1910 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1911 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1912 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1917 phy_start_aneg(phydev
);
1920 static void tg3_phy_stop(struct tg3
*tp
)
1922 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1925 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1928 static void tg3_phy_fini(struct tg3
*tp
)
1930 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1931 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1932 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1936 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
1941 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
1944 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
1945 /* Cannot do read-modify-write on 5401 */
1946 err
= tg3_phy_auxctl_write(tp
,
1947 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1948 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
1953 err
= tg3_phy_auxctl_read(tp
,
1954 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1958 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
1959 err
= tg3_phy_auxctl_write(tp
,
1960 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
1966 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1970 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1973 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1974 phytest
| MII_TG3_FET_SHADOW_EN
);
1975 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1977 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1979 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1980 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1982 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1986 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1990 if (!tg3_flag(tp
, 5705_PLUS
) ||
1991 (tg3_flag(tp
, 5717_PLUS
) &&
1992 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1995 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1996 tg3_phy_fet_toggle_apd(tp
, enable
);
2000 reg
= MII_TG3_MISC_SHDW_WREN
|
2001 MII_TG3_MISC_SHDW_SCR5_SEL
|
2002 MII_TG3_MISC_SHDW_SCR5_LPED
|
2003 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2004 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2005 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2006 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
2007 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2009 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2012 reg
= MII_TG3_MISC_SHDW_WREN
|
2013 MII_TG3_MISC_SHDW_APD_SEL
|
2014 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2016 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2018 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2021 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2025 if (!tg3_flag(tp
, 5705_PLUS
) ||
2026 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2029 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2032 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2033 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2035 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2036 ephy
| MII_TG3_FET_SHADOW_EN
);
2037 if (!tg3_readphy(tp
, reg
, &phy
)) {
2039 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2041 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2042 tg3_writephy(tp
, reg
, phy
);
2044 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2049 ret
= tg3_phy_auxctl_read(tp
,
2050 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2053 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2055 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2056 tg3_phy_auxctl_write(tp
,
2057 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2062 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2067 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2070 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2072 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2073 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2076 static void tg3_phy_apply_otp(struct tg3
*tp
)
2085 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
2088 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2089 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2090 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2092 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2093 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2094 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2096 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2097 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2098 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2100 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2101 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2103 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2104 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2106 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2107 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2108 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2110 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2113 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2117 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2122 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2123 current_link_up
== 1 &&
2124 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2125 (tp
->link_config
.active_speed
== SPEED_100
||
2126 tp
->link_config
.active_speed
== SPEED_1000
)) {
2129 if (tp
->link_config
.active_speed
== SPEED_1000
)
2130 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2132 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2134 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2136 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2137 TG3_CL45_D7_EEERES_STAT
, &val
);
2139 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2140 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2144 if (!tp
->setlpicnt
) {
2145 if (current_link_up
== 1 &&
2146 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2147 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2148 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2151 val
= tr32(TG3_CPMU_EEE_MODE
);
2152 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2156 static void tg3_phy_eee_enable(struct tg3
*tp
)
2160 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2161 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2162 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2163 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
2164 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2165 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2166 MII_TG3_DSP_TAP26_RMRXSTO
;
2167 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2168 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2171 val
= tr32(TG3_CPMU_EEE_MODE
);
2172 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2175 static int tg3_wait_macro_done(struct tg3
*tp
)
2182 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2183 if ((tmp32
& 0x1000) == 0)
2193 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2195 static const u32 test_pat
[4][6] = {
2196 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2197 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2198 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2199 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2203 for (chan
= 0; chan
< 4; chan
++) {
2206 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2207 (chan
* 0x2000) | 0x0200);
2208 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2210 for (i
= 0; i
< 6; i
++)
2211 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2214 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2215 if (tg3_wait_macro_done(tp
)) {
2220 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2221 (chan
* 0x2000) | 0x0200);
2222 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2223 if (tg3_wait_macro_done(tp
)) {
2228 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2229 if (tg3_wait_macro_done(tp
)) {
2234 for (i
= 0; i
< 6; i
+= 2) {
2237 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2238 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2239 tg3_wait_macro_done(tp
)) {
2245 if (low
!= test_pat
[chan
][i
] ||
2246 high
!= test_pat
[chan
][i
+1]) {
2247 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2248 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2249 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2259 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2263 for (chan
= 0; chan
< 4; chan
++) {
2266 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2267 (chan
* 0x2000) | 0x0200);
2268 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2269 for (i
= 0; i
< 6; i
++)
2270 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2271 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2272 if (tg3_wait_macro_done(tp
))
2279 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2281 u32 reg32
, phy9_orig
;
2282 int retries
, do_phy_reset
, err
;
2288 err
= tg3_bmcr_reset(tp
);
2294 /* Disable transmitter and interrupt. */
2295 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2299 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2301 /* Set full-duplex, 1000 mbps. */
2302 tg3_writephy(tp
, MII_BMCR
,
2303 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2305 /* Set to master mode. */
2306 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2309 tg3_writephy(tp
, MII_CTRL1000
,
2310 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2312 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2316 /* Block the PHY control access. */
2317 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2319 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2322 } while (--retries
);
2324 err
= tg3_phy_reset_chanpat(tp
);
2328 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2330 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2331 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2333 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2335 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2337 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2339 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2346 /* This will reset the tigon3 PHY if there is no valid
2347 * link unless the FORCE argument is non-zero.
2349 static int tg3_phy_reset(struct tg3
*tp
)
2354 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2355 val
= tr32(GRC_MISC_CFG
);
2356 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2359 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2360 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2364 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2365 netif_carrier_off(tp
->dev
);
2366 tg3_link_report(tp
);
2369 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2370 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2371 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2372 err
= tg3_phy_reset_5703_4_5(tp
);
2379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2380 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2381 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2382 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2384 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2387 err
= tg3_bmcr_reset(tp
);
2391 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2392 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2393 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2395 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2398 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2399 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2400 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2401 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2402 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2403 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2405 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2409 if (tg3_flag(tp
, 5717_PLUS
) &&
2410 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2413 tg3_phy_apply_otp(tp
);
2415 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2416 tg3_phy_toggle_apd(tp
, true);
2418 tg3_phy_toggle_apd(tp
, false);
2421 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2422 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2423 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2424 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2425 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2428 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2429 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2430 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2433 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2434 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2435 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2436 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2437 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2440 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2441 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2442 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2443 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2444 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2445 tg3_writephy(tp
, MII_TG3_TEST1
,
2446 MII_TG3_TEST1_TRIM_EN
| 0x4);
2448 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2450 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2454 /* Set Extended packet length bit (bit 14) on all chips that */
2455 /* support jumbo frames */
2456 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2457 /* Cannot do read-modify-write on 5401 */
2458 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2459 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2460 /* Set bit 14 with read-modify-write to preserve other bits */
2461 err
= tg3_phy_auxctl_read(tp
,
2462 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2464 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2465 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2468 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2469 * jumbo frames transmission.
2471 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2472 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2473 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2474 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2477 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2478 /* adjust output voltage */
2479 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2482 tg3_phy_toggle_automdix(tp
, 1);
2483 tg3_phy_set_wirespeed(tp
);
2487 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2488 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2489 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2490 TG3_GPIO_MSG_NEED_VAUX)
2491 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2492 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2493 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2494 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2495 (TG3_GPIO_MSG_DRVR_PRES << 12))
2497 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2498 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2499 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2500 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2501 (TG3_GPIO_MSG_NEED_VAUX << 12))
2503 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2507 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2508 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2509 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2511 status
= tr32(TG3_CPMU_DRV_STATUS
);
2513 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2514 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2515 status
|= (newstat
<< shift
);
2517 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2519 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2521 tw32(TG3_CPMU_DRV_STATUS
, status
);
2523 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2526 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2528 if (!tg3_flag(tp
, IS_NIC
))
2531 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2532 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2533 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2534 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2537 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2539 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2540 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2542 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2544 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2545 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2551 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2555 if (!tg3_flag(tp
, IS_NIC
) ||
2556 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2557 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2560 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2562 tw32_wait_f(GRC_LOCAL_CTRL
,
2563 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2564 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2566 tw32_wait_f(GRC_LOCAL_CTRL
,
2568 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2570 tw32_wait_f(GRC_LOCAL_CTRL
,
2571 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2572 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2575 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2577 if (!tg3_flag(tp
, IS_NIC
))
2580 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2581 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2582 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2583 (GRC_LCLCTRL_GPIO_OE0
|
2584 GRC_LCLCTRL_GPIO_OE1
|
2585 GRC_LCLCTRL_GPIO_OE2
|
2586 GRC_LCLCTRL_GPIO_OUTPUT0
|
2587 GRC_LCLCTRL_GPIO_OUTPUT1
),
2588 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2589 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2590 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2591 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2592 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2593 GRC_LCLCTRL_GPIO_OE1
|
2594 GRC_LCLCTRL_GPIO_OE2
|
2595 GRC_LCLCTRL_GPIO_OUTPUT0
|
2596 GRC_LCLCTRL_GPIO_OUTPUT1
|
2598 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2599 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2601 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2602 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2603 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2605 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2606 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2607 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2610 u32 grc_local_ctrl
= 0;
2612 /* Workaround to prevent overdrawing Amps. */
2613 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2614 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2615 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2617 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2620 /* On 5753 and variants, GPIO2 cannot be used. */
2621 no_gpio2
= tp
->nic_sram_data_cfg
&
2622 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2624 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2625 GRC_LCLCTRL_GPIO_OE1
|
2626 GRC_LCLCTRL_GPIO_OE2
|
2627 GRC_LCLCTRL_GPIO_OUTPUT1
|
2628 GRC_LCLCTRL_GPIO_OUTPUT2
;
2630 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2631 GRC_LCLCTRL_GPIO_OUTPUT2
);
2633 tw32_wait_f(GRC_LOCAL_CTRL
,
2634 tp
->grc_local_ctrl
| grc_local_ctrl
,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2637 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2639 tw32_wait_f(GRC_LOCAL_CTRL
,
2640 tp
->grc_local_ctrl
| grc_local_ctrl
,
2641 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2644 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2645 tw32_wait_f(GRC_LOCAL_CTRL
,
2646 tp
->grc_local_ctrl
| grc_local_ctrl
,
2647 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2652 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2656 /* Serialize power state transitions */
2657 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2660 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2661 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2663 msg
= tg3_set_function_status(tp
, msg
);
2665 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2668 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2669 tg3_pwrsrc_switch_to_vaux(tp
);
2671 tg3_pwrsrc_die_with_vmain(tp
);
2674 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2677 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2679 bool need_vaux
= false;
2681 /* The GPIOs do something completely different on 57765. */
2682 if (!tg3_flag(tp
, IS_NIC
) ||
2683 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2686 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2687 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2688 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2689 tg3_frob_aux_power_5717(tp
, include_wol
?
2690 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2694 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2695 struct net_device
*dev_peer
;
2697 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2699 /* remove_one() may have been run on the peer. */
2701 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2703 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2706 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2707 tg3_flag(tp_peer
, ENABLE_ASF
))
2712 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2713 tg3_flag(tp
, ENABLE_ASF
))
2717 tg3_pwrsrc_switch_to_vaux(tp
);
2719 tg3_pwrsrc_die_with_vmain(tp
);
2722 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2724 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2726 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2727 if (speed
!= SPEED_10
)
2729 } else if (speed
== SPEED_10
)
2735 static int tg3_setup_phy(struct tg3
*, int);
2736 static int tg3_halt_cpu(struct tg3
*, u32
);
2738 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2742 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2743 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2744 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2745 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2748 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2749 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2750 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2755 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2757 val
= tr32(GRC_MISC_CFG
);
2758 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2761 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2763 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2766 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2767 tg3_writephy(tp
, MII_BMCR
,
2768 BMCR_ANENABLE
| BMCR_ANRESTART
);
2770 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2771 phytest
| MII_TG3_FET_SHADOW_EN
);
2772 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2773 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2775 MII_TG3_FET_SHDW_AUXMODE4
,
2778 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2781 } else if (do_low_power
) {
2782 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2783 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2785 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2786 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2787 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2788 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2791 /* The PHY should not be powered down on some chips because
2794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2795 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2796 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2797 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2800 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2801 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2802 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2803 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2804 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2805 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2808 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3
*tp
)
2814 if (tg3_flag(tp
, NVRAM
)) {
2817 if (tp
->nvram_lock_cnt
== 0) {
2818 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2819 for (i
= 0; i
< 8000; i
++) {
2820 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2825 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2829 tp
->nvram_lock_cnt
++;
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3
*tp
)
2837 if (tg3_flag(tp
, NVRAM
)) {
2838 if (tp
->nvram_lock_cnt
> 0)
2839 tp
->nvram_lock_cnt
--;
2840 if (tp
->nvram_lock_cnt
== 0)
2841 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3
*tp
)
2848 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2849 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2851 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3
*tp
)
2858 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2859 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2861 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2865 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2866 u32 offset
, u32
*val
)
2871 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2874 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2875 EEPROM_ADDR_DEVID_MASK
|
2877 tw32(GRC_EEPROM_ADDR
,
2879 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2880 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2881 EEPROM_ADDR_ADDR_MASK
) |
2882 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2884 for (i
= 0; i
< 1000; i
++) {
2885 tmp
= tr32(GRC_EEPROM_ADDR
);
2887 if (tmp
& EEPROM_ADDR_COMPLETE
)
2891 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2894 tmp
= tr32(GRC_EEPROM_DATA
);
2897 * The data will always be opposite the native endian
2898 * format. Perform a blind byteswap to compensate.
2905 #define NVRAM_CMD_TIMEOUT 10000
2907 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2911 tw32(NVRAM_CMD
, nvram_cmd
);
2912 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2914 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2920 if (i
== NVRAM_CMD_TIMEOUT
)
2926 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2928 if (tg3_flag(tp
, NVRAM
) &&
2929 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2930 tg3_flag(tp
, FLASH
) &&
2931 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2932 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2934 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2935 ATMEL_AT45DB0X1B_PAGE_POS
) +
2936 (addr
% tp
->nvram_pagesize
);
2941 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2943 if (tg3_flag(tp
, NVRAM
) &&
2944 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2945 tg3_flag(tp
, FLASH
) &&
2946 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2947 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2949 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2950 tp
->nvram_pagesize
) +
2951 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957 * the byteswapping settings for all other register accesses.
2958 * tg3 devices are BE devices, so on a BE machine, the data
2959 * returned will be exactly as it is seen in NVRAM. On a LE
2960 * machine, the 32-bit value will be byteswapped.
2962 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2966 if (!tg3_flag(tp
, NVRAM
))
2967 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2969 offset
= tg3_nvram_phys_addr(tp
, offset
);
2971 if (offset
> NVRAM_ADDR_MSK
)
2974 ret
= tg3_nvram_lock(tp
);
2978 tg3_enable_nvram_access(tp
);
2980 tw32(NVRAM_ADDR
, offset
);
2981 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2982 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2985 *val
= tr32(NVRAM_RDDATA
);
2987 tg3_disable_nvram_access(tp
);
2989 tg3_nvram_unlock(tp
);
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2998 int res
= tg3_nvram_read(tp
, offset
, &v
);
3000 *val
= cpu_to_be32(v
);
3004 #define RX_CPU_SCRATCH_BASE 0x30000
3005 #define RX_CPU_SCRATCH_SIZE 0x04000
3006 #define TX_CPU_SCRATCH_BASE 0x34000
3007 #define TX_CPU_SCRATCH_SIZE 0x04000
3009 /* tp->lock is held. */
3010 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3014 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3016 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3017 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3019 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3022 if (offset
== RX_CPU_BASE
) {
3023 for (i
= 0; i
< 10000; i
++) {
3024 tw32(offset
+ CPU_STATE
, 0xffffffff);
3025 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3026 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3030 tw32(offset
+ CPU_STATE
, 0xffffffff);
3031 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3034 for (i
= 0; i
< 10000; i
++) {
3035 tw32(offset
+ CPU_STATE
, 0xffffffff);
3036 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3037 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3043 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3044 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3048 /* Clear firmware's nvram arbitration. */
3049 if (tg3_flag(tp
, NVRAM
))
3050 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3055 unsigned int fw_base
;
3056 unsigned int fw_len
;
3057 const __be32
*fw_data
;
3060 /* tp->lock is held. */
3061 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3062 u32 cpu_scratch_base
, int cpu_scratch_size
,
3063 struct fw_info
*info
)
3065 int err
, lock_err
, i
;
3066 void (*write_op
)(struct tg3
*, u32
, u32
);
3068 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3070 "%s: Trying to load TX cpu firmware which is 5705\n",
3075 if (tg3_flag(tp
, 5705_PLUS
))
3076 write_op
= tg3_write_mem
;
3078 write_op
= tg3_write_indirect_reg32
;
3080 /* It is possible that bootcode is still loading at this point.
3081 * Get the nvram lock first before halting the cpu.
3083 lock_err
= tg3_nvram_lock(tp
);
3084 err
= tg3_halt_cpu(tp
, cpu_base
);
3086 tg3_nvram_unlock(tp
);
3090 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3091 write_op(tp
, cpu_scratch_base
+ i
, 0);
3092 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3093 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3094 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3095 write_op(tp
, (cpu_scratch_base
+
3096 (info
->fw_base
& 0xffff) +
3098 be32_to_cpu(info
->fw_data
[i
]));
3106 /* tp->lock is held. */
3107 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3109 struct fw_info info
;
3110 const __be32
*fw_data
;
3113 fw_data
= (void *)tp
->fw
->data
;
3115 /* Firmware blob starts with version numbers, followed by
3116 start address and length. We are setting complete length.
3117 length = end_address_of_bss - start_address_of_text.
3118 Remainder is the blob to be loaded contiguously
3119 from start address. */
3121 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3122 info
.fw_len
= tp
->fw
->size
- 12;
3123 info
.fw_data
= &fw_data
[3];
3125 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3126 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3131 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3132 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3137 /* Now startup only the RX cpu. */
3138 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3139 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3141 for (i
= 0; i
< 5; i
++) {
3142 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3144 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3145 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3146 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3150 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3151 "should be %08x\n", __func__
,
3152 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3155 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3156 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3161 /* tp->lock is held. */
3162 static int tg3_load_tso_firmware(struct tg3
*tp
)
3164 struct fw_info info
;
3165 const __be32
*fw_data
;
3166 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3169 if (tg3_flag(tp
, HW_TSO_1
) ||
3170 tg3_flag(tp
, HW_TSO_2
) ||
3171 tg3_flag(tp
, HW_TSO_3
))
3174 fw_data
= (void *)tp
->fw
->data
;
3176 /* Firmware blob starts with version numbers, followed by
3177 start address and length. We are setting complete length.
3178 length = end_address_of_bss - start_address_of_text.
3179 Remainder is the blob to be loaded contiguously
3180 from start address. */
3182 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3183 cpu_scratch_size
= tp
->fw_len
;
3184 info
.fw_len
= tp
->fw
->size
- 12;
3185 info
.fw_data
= &fw_data
[3];
3187 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3188 cpu_base
= RX_CPU_BASE
;
3189 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3191 cpu_base
= TX_CPU_BASE
;
3192 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3193 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3196 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3197 cpu_scratch_base
, cpu_scratch_size
,
3202 /* Now startup the cpu. */
3203 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3204 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3206 for (i
= 0; i
< 5; i
++) {
3207 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3209 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3210 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3211 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3216 "%s fails to set CPU PC, is %08x should be %08x\n",
3217 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3220 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3221 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3226 /* tp->lock is held. */
3227 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3229 u32 addr_high
, addr_low
;
3232 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3233 tp
->dev
->dev_addr
[1]);
3234 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3235 (tp
->dev
->dev_addr
[3] << 16) |
3236 (tp
->dev
->dev_addr
[4] << 8) |
3237 (tp
->dev
->dev_addr
[5] << 0));
3238 for (i
= 0; i
< 4; i
++) {
3239 if (i
== 1 && skip_mac_1
)
3241 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3242 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3245 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3246 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3247 for (i
= 0; i
< 12; i
++) {
3248 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3249 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3253 addr_high
= (tp
->dev
->dev_addr
[0] +
3254 tp
->dev
->dev_addr
[1] +
3255 tp
->dev
->dev_addr
[2] +
3256 tp
->dev
->dev_addr
[3] +
3257 tp
->dev
->dev_addr
[4] +
3258 tp
->dev
->dev_addr
[5]) &
3259 TX_BACKOFF_SEED_MASK
;
3260 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3263 static void tg3_enable_register_access(struct tg3
*tp
)
3266 * Make sure register accesses (indirect or otherwise) will function
3269 pci_write_config_dword(tp
->pdev
,
3270 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3273 static int tg3_power_up(struct tg3
*tp
)
3277 tg3_enable_register_access(tp
);
3279 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3281 /* Switch out of Vaux if it is a NIC */
3282 tg3_pwrsrc_switch_to_vmain(tp
);
3284 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3290 static int tg3_power_down_prepare(struct tg3
*tp
)
3293 bool device_should_wake
, do_low_power
;
3295 tg3_enable_register_access(tp
);
3297 /* Restore the CLKREQ setting. */
3298 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3301 pci_read_config_word(tp
->pdev
,
3302 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3304 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
3305 pci_write_config_word(tp
->pdev
,
3306 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3310 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3311 tw32(TG3PCI_MISC_HOST_CTRL
,
3312 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3314 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3315 tg3_flag(tp
, WOL_ENABLE
);
3317 if (tg3_flag(tp
, USE_PHYLIB
)) {
3318 do_low_power
= false;
3319 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3320 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3321 struct phy_device
*phydev
;
3322 u32 phyid
, advertising
;
3324 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3326 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3328 tp
->link_config
.orig_speed
= phydev
->speed
;
3329 tp
->link_config
.orig_duplex
= phydev
->duplex
;
3330 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
3331 tp
->link_config
.orig_advertising
= phydev
->advertising
;
3333 advertising
= ADVERTISED_TP
|
3335 ADVERTISED_Autoneg
|
3336 ADVERTISED_10baseT_Half
;
3338 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3339 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3341 ADVERTISED_100baseT_Half
|
3342 ADVERTISED_100baseT_Full
|
3343 ADVERTISED_10baseT_Full
;
3345 advertising
|= ADVERTISED_10baseT_Full
;
3348 phydev
->advertising
= advertising
;
3350 phy_start_aneg(phydev
);
3352 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3353 if (phyid
!= PHY_ID_BCMAC131
) {
3354 phyid
&= PHY_BCM_OUI_MASK
;
3355 if (phyid
== PHY_BCM_OUI_1
||
3356 phyid
== PHY_BCM_OUI_2
||
3357 phyid
== PHY_BCM_OUI_3
)
3358 do_low_power
= true;
3362 do_low_power
= true;
3364 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3365 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3366 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
3367 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
3368 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
3371 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
3372 tp
->link_config
.speed
= SPEED_10
;
3373 tp
->link_config
.duplex
= DUPLEX_HALF
;
3374 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
3375 tg3_setup_phy(tp
, 0);
3379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3382 val
= tr32(GRC_VCPU_EXT_CTRL
);
3383 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3384 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3388 for (i
= 0; i
< 200; i
++) {
3389 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3390 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3395 if (tg3_flag(tp
, WOL_CAP
))
3396 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3397 WOL_DRV_STATE_SHUTDOWN
|
3401 if (device_should_wake
) {
3404 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3406 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3407 tg3_phy_auxctl_write(tp
,
3408 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3409 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3410 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3411 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3415 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3416 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3418 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3420 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3421 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3423 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3424 SPEED_100
: SPEED_10
;
3425 if (tg3_5700_link_polarity(tp
, speed
))
3426 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3428 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3431 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3434 if (!tg3_flag(tp
, 5750_PLUS
))
3435 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3437 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3438 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3439 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3440 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3442 if (tg3_flag(tp
, ENABLE_APE
))
3443 mac_mode
|= MAC_MODE_APE_TX_EN
|
3444 MAC_MODE_APE_RX_EN
|
3445 MAC_MODE_TDE_ENABLE
;
3447 tw32_f(MAC_MODE
, mac_mode
);
3450 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3454 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3455 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3456 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3459 base_val
= tp
->pci_clock_ctrl
;
3460 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3461 CLOCK_CTRL_TXCLK_DISABLE
);
3463 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3464 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3465 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3466 tg3_flag(tp
, CPMU_PRESENT
) ||
3467 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3469 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3470 u32 newbits1
, newbits2
;
3472 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3473 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3474 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3475 CLOCK_CTRL_TXCLK_DISABLE
|
3477 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3478 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3479 newbits1
= CLOCK_CTRL_625_CORE
;
3480 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3482 newbits1
= CLOCK_CTRL_ALTCLK
;
3483 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3486 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3489 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3492 if (!tg3_flag(tp
, 5705_PLUS
)) {
3495 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3496 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3497 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3498 CLOCK_CTRL_TXCLK_DISABLE
|
3499 CLOCK_CTRL_44MHZ_CORE
);
3501 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3504 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3505 tp
->pci_clock_ctrl
| newbits3
, 40);
3509 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3510 tg3_power_down_phy(tp
, do_low_power
);
3512 tg3_frob_aux_power(tp
, true);
3514 /* Workaround for unstable PLL clock */
3515 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3516 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3517 u32 val
= tr32(0x7d00);
3519 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3521 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3524 err
= tg3_nvram_lock(tp
);
3525 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3527 tg3_nvram_unlock(tp
);
3531 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3536 static void tg3_power_down(struct tg3
*tp
)
3538 tg3_power_down_prepare(tp
);
3540 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3541 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3544 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3546 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3547 case MII_TG3_AUX_STAT_10HALF
:
3549 *duplex
= DUPLEX_HALF
;
3552 case MII_TG3_AUX_STAT_10FULL
:
3554 *duplex
= DUPLEX_FULL
;
3557 case MII_TG3_AUX_STAT_100HALF
:
3559 *duplex
= DUPLEX_HALF
;
3562 case MII_TG3_AUX_STAT_100FULL
:
3564 *duplex
= DUPLEX_FULL
;
3567 case MII_TG3_AUX_STAT_1000HALF
:
3568 *speed
= SPEED_1000
;
3569 *duplex
= DUPLEX_HALF
;
3572 case MII_TG3_AUX_STAT_1000FULL
:
3573 *speed
= SPEED_1000
;
3574 *duplex
= DUPLEX_FULL
;
3578 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3579 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3581 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3585 *speed
= SPEED_INVALID
;
3586 *duplex
= DUPLEX_INVALID
;
3591 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3596 new_adv
= ADVERTISE_CSMA
;
3597 if (advertise
& ADVERTISED_10baseT_Half
)
3598 new_adv
|= ADVERTISE_10HALF
;
3599 if (advertise
& ADVERTISED_10baseT_Full
)
3600 new_adv
|= ADVERTISE_10FULL
;
3601 if (advertise
& ADVERTISED_100baseT_Half
)
3602 new_adv
|= ADVERTISE_100HALF
;
3603 if (advertise
& ADVERTISED_100baseT_Full
)
3604 new_adv
|= ADVERTISE_100FULL
;
3606 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
3608 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3612 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3616 if (advertise
& ADVERTISED_1000baseT_Half
)
3617 new_adv
|= ADVERTISE_1000HALF
;
3618 if (advertise
& ADVERTISED_1000baseT_Full
)
3619 new_adv
|= ADVERTISE_1000FULL
;
3621 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3622 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3623 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3625 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3629 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3632 tw32(TG3_CPMU_EEE_MODE
,
3633 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3635 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3640 /* Advertise 100-BaseTX EEE ability */
3641 if (advertise
& ADVERTISED_100baseT_Full
)
3642 val
|= MDIO_AN_EEE_ADV_100TX
;
3643 /* Advertise 1000-BaseT EEE ability */
3644 if (advertise
& ADVERTISED_1000baseT_Full
)
3645 val
|= MDIO_AN_EEE_ADV_1000T
;
3646 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3650 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3652 case ASIC_REV_57765
:
3654 /* If we advertised any eee advertisements above... */
3656 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3657 MII_TG3_DSP_TAP26_RMRXSTO
|
3658 MII_TG3_DSP_TAP26_OPCSINPT
;
3659 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3662 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3663 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3664 MII_TG3_DSP_CH34TP2_HIBW01
);
3667 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3676 static void tg3_phy_copper_begin(struct tg3
*tp
)
3681 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3682 new_adv
= ADVERTISED_10baseT_Half
|
3683 ADVERTISED_10baseT_Full
;
3684 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3685 new_adv
|= ADVERTISED_100baseT_Half
|
3686 ADVERTISED_100baseT_Full
;
3688 tg3_phy_autoneg_cfg(tp
, new_adv
,
3689 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3690 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3691 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3692 tp
->link_config
.advertising
&=
3693 ~(ADVERTISED_1000baseT_Half
|
3694 ADVERTISED_1000baseT_Full
);
3696 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3697 tp
->link_config
.flowctrl
);
3699 /* Asking for a specific link mode. */
3700 if (tp
->link_config
.speed
== SPEED_1000
) {
3701 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3702 new_adv
= ADVERTISED_1000baseT_Full
;
3704 new_adv
= ADVERTISED_1000baseT_Half
;
3705 } else if (tp
->link_config
.speed
== SPEED_100
) {
3706 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3707 new_adv
= ADVERTISED_100baseT_Full
;
3709 new_adv
= ADVERTISED_100baseT_Half
;
3711 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3712 new_adv
= ADVERTISED_10baseT_Full
;
3714 new_adv
= ADVERTISED_10baseT_Half
;
3717 tg3_phy_autoneg_cfg(tp
, new_adv
,
3718 tp
->link_config
.flowctrl
);
3721 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3722 tp
->link_config
.speed
!= SPEED_INVALID
) {
3723 u32 bmcr
, orig_bmcr
;
3725 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3726 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3729 switch (tp
->link_config
.speed
) {
3735 bmcr
|= BMCR_SPEED100
;
3739 bmcr
|= BMCR_SPEED1000
;
3743 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3744 bmcr
|= BMCR_FULLDPLX
;
3746 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3747 (bmcr
!= orig_bmcr
)) {
3748 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3749 for (i
= 0; i
< 1500; i
++) {
3753 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3754 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3756 if (!(tmp
& BMSR_LSTATUS
)) {
3761 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3765 tg3_writephy(tp
, MII_BMCR
,
3766 BMCR_ANENABLE
| BMCR_ANRESTART
);
3770 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3774 /* Turn off tap power management. */
3775 /* Set Extended packet length bit */
3776 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3778 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3779 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3780 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3781 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3782 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3789 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3791 u32 adv_reg
, all_mask
= 0;
3793 if (mask
& ADVERTISED_10baseT_Half
)
3794 all_mask
|= ADVERTISE_10HALF
;
3795 if (mask
& ADVERTISED_10baseT_Full
)
3796 all_mask
|= ADVERTISE_10FULL
;
3797 if (mask
& ADVERTISED_100baseT_Half
)
3798 all_mask
|= ADVERTISE_100HALF
;
3799 if (mask
& ADVERTISED_100baseT_Full
)
3800 all_mask
|= ADVERTISE_100FULL
;
3802 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3805 if ((adv_reg
& ADVERTISE_ALL
) != all_mask
)
3808 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3812 if (mask
& ADVERTISED_1000baseT_Half
)
3813 all_mask
|= ADVERTISE_1000HALF
;
3814 if (mask
& ADVERTISED_1000baseT_Full
)
3815 all_mask
|= ADVERTISE_1000FULL
;
3817 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3820 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
3821 if (tg3_ctrl
!= all_mask
)
3828 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3832 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3835 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3836 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3838 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3839 if (curadv
!= reqadv
)
3842 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3843 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3845 /* Reprogram the advertisement register, even if it
3846 * does not affect the current link. If the link
3847 * gets renegotiated in the future, we can save an
3848 * additional renegotiation cycle by advertising
3849 * it correctly in the first place.
3851 if (curadv
!= reqadv
) {
3852 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3853 ADVERTISE_PAUSE_ASYM
);
3854 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3861 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3863 int current_link_up
;
3865 u32 lcl_adv
, rmt_adv
;
3873 (MAC_STATUS_SYNC_CHANGED
|
3874 MAC_STATUS_CFG_CHANGED
|
3875 MAC_STATUS_MI_COMPLETION
|
3876 MAC_STATUS_LNKSTATE_CHANGED
));
3879 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3881 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3885 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3887 /* Some third-party PHYs need to be reset on link going
3890 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3891 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3892 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3893 netif_carrier_ok(tp
->dev
)) {
3894 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3895 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3896 !(bmsr
& BMSR_LSTATUS
))
3902 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3903 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3904 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3905 !tg3_flag(tp
, INIT_COMPLETE
))
3908 if (!(bmsr
& BMSR_LSTATUS
)) {
3909 err
= tg3_init_5401phy_dsp(tp
);
3913 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3914 for (i
= 0; i
< 1000; i
++) {
3916 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3917 (bmsr
& BMSR_LSTATUS
)) {
3923 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3924 TG3_PHY_REV_BCM5401_B0
&&
3925 !(bmsr
& BMSR_LSTATUS
) &&
3926 tp
->link_config
.active_speed
== SPEED_1000
) {
3927 err
= tg3_phy_reset(tp
);
3929 err
= tg3_init_5401phy_dsp(tp
);
3934 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3935 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3936 /* 5701 {A0,B0} CRC bug workaround */
3937 tg3_writephy(tp
, 0x15, 0x0a75);
3938 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3939 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3940 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3943 /* Clear pending interrupts... */
3944 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3945 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3947 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3948 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3949 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3950 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3952 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3953 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3954 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3955 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3956 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3958 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3961 current_link_up
= 0;
3962 current_speed
= SPEED_INVALID
;
3963 current_duplex
= DUPLEX_INVALID
;
3965 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3966 err
= tg3_phy_auxctl_read(tp
,
3967 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3969 if (!err
&& !(val
& (1 << 10))) {
3970 tg3_phy_auxctl_write(tp
,
3971 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3978 for (i
= 0; i
< 100; i
++) {
3979 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3980 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3981 (bmsr
& BMSR_LSTATUS
))
3986 if (bmsr
& BMSR_LSTATUS
) {
3989 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3990 for (i
= 0; i
< 2000; i
++) {
3992 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3997 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4002 for (i
= 0; i
< 200; i
++) {
4003 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4004 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4006 if (bmcr
&& bmcr
!= 0x7fff)
4014 tp
->link_config
.active_speed
= current_speed
;
4015 tp
->link_config
.active_duplex
= current_duplex
;
4017 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4018 if ((bmcr
& BMCR_ANENABLE
) &&
4019 tg3_copper_is_advertising_all(tp
,
4020 tp
->link_config
.advertising
)) {
4021 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
4023 current_link_up
= 1;
4026 if (!(bmcr
& BMCR_ANENABLE
) &&
4027 tp
->link_config
.speed
== current_speed
&&
4028 tp
->link_config
.duplex
== current_duplex
&&
4029 tp
->link_config
.flowctrl
==
4030 tp
->link_config
.active_flowctrl
) {
4031 current_link_up
= 1;
4035 if (current_link_up
== 1 &&
4036 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
4037 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4041 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4042 tg3_phy_copper_begin(tp
);
4044 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4045 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4046 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4047 current_link_up
= 1;
4050 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4051 if (current_link_up
== 1) {
4052 if (tp
->link_config
.active_speed
== SPEED_100
||
4053 tp
->link_config
.active_speed
== SPEED_10
)
4054 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4056 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4057 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4058 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4060 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4062 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4063 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4064 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4066 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4067 if (current_link_up
== 1 &&
4068 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4069 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4071 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4074 /* ??? Without this setting Netgear GA302T PHY does not
4075 * ??? send/receive packets...
4077 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4078 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4079 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4080 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4084 tw32_f(MAC_MODE
, tp
->mac_mode
);
4087 tg3_phy_eee_adjust(tp
, current_link_up
);
4089 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4090 /* Polled via timer. */
4091 tw32_f(MAC_EVENT
, 0);
4093 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4097 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4098 current_link_up
== 1 &&
4099 tp
->link_config
.active_speed
== SPEED_1000
&&
4100 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4103 (MAC_STATUS_SYNC_CHANGED
|
4104 MAC_STATUS_CFG_CHANGED
));
4107 NIC_SRAM_FIRMWARE_MBOX
,
4108 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4111 /* Prevent send BD corruption. */
4112 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4113 u16 oldlnkctl
, newlnkctl
;
4115 pci_read_config_word(tp
->pdev
,
4116 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
4118 if (tp
->link_config
.active_speed
== SPEED_100
||
4119 tp
->link_config
.active_speed
== SPEED_10
)
4120 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4122 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
4123 if (newlnkctl
!= oldlnkctl
)
4124 pci_write_config_word(tp
->pdev
,
4125 pci_pcie_cap(tp
->pdev
) +
4126 PCI_EXP_LNKCTL
, newlnkctl
);
4129 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4130 if (current_link_up
)
4131 netif_carrier_on(tp
->dev
);
4133 netif_carrier_off(tp
->dev
);
4134 tg3_link_report(tp
);
4140 struct tg3_fiber_aneginfo
{
4142 #define ANEG_STATE_UNKNOWN 0
4143 #define ANEG_STATE_AN_ENABLE 1
4144 #define ANEG_STATE_RESTART_INIT 2
4145 #define ANEG_STATE_RESTART 3
4146 #define ANEG_STATE_DISABLE_LINK_OK 4
4147 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4148 #define ANEG_STATE_ABILITY_DETECT 6
4149 #define ANEG_STATE_ACK_DETECT_INIT 7
4150 #define ANEG_STATE_ACK_DETECT 8
4151 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4152 #define ANEG_STATE_COMPLETE_ACK 10
4153 #define ANEG_STATE_IDLE_DETECT_INIT 11
4154 #define ANEG_STATE_IDLE_DETECT 12
4155 #define ANEG_STATE_LINK_OK 13
4156 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4157 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4160 #define MR_AN_ENABLE 0x00000001
4161 #define MR_RESTART_AN 0x00000002
4162 #define MR_AN_COMPLETE 0x00000004
4163 #define MR_PAGE_RX 0x00000008
4164 #define MR_NP_LOADED 0x00000010
4165 #define MR_TOGGLE_TX 0x00000020
4166 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4167 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4168 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4169 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4170 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4171 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4172 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4173 #define MR_TOGGLE_RX 0x00002000
4174 #define MR_NP_RX 0x00004000
4176 #define MR_LINK_OK 0x80000000
4178 unsigned long link_time
, cur_time
;
4180 u32 ability_match_cfg
;
4181 int ability_match_count
;
4183 char ability_match
, idle_match
, ack_match
;
4185 u32 txconfig
, rxconfig
;
4186 #define ANEG_CFG_NP 0x00000080
4187 #define ANEG_CFG_ACK 0x00000040
4188 #define ANEG_CFG_RF2 0x00000020
4189 #define ANEG_CFG_RF1 0x00000010
4190 #define ANEG_CFG_PS2 0x00000001
4191 #define ANEG_CFG_PS1 0x00008000
4192 #define ANEG_CFG_HD 0x00004000
4193 #define ANEG_CFG_FD 0x00002000
4194 #define ANEG_CFG_INVAL 0x00001f06
4199 #define ANEG_TIMER_ENAB 2
4200 #define ANEG_FAILED -1
4202 #define ANEG_STATE_SETTLE_TIME 10000
4204 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4205 struct tg3_fiber_aneginfo
*ap
)
4208 unsigned long delta
;
4212 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4216 ap
->ability_match_cfg
= 0;
4217 ap
->ability_match_count
= 0;
4218 ap
->ability_match
= 0;
4224 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4225 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4227 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4228 ap
->ability_match_cfg
= rx_cfg_reg
;
4229 ap
->ability_match
= 0;
4230 ap
->ability_match_count
= 0;
4232 if (++ap
->ability_match_count
> 1) {
4233 ap
->ability_match
= 1;
4234 ap
->ability_match_cfg
= rx_cfg_reg
;
4237 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4245 ap
->ability_match_cfg
= 0;
4246 ap
->ability_match_count
= 0;
4247 ap
->ability_match
= 0;
4253 ap
->rxconfig
= rx_cfg_reg
;
4256 switch (ap
->state
) {
4257 case ANEG_STATE_UNKNOWN
:
4258 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4259 ap
->state
= ANEG_STATE_AN_ENABLE
;
4262 case ANEG_STATE_AN_ENABLE
:
4263 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4264 if (ap
->flags
& MR_AN_ENABLE
) {
4267 ap
->ability_match_cfg
= 0;
4268 ap
->ability_match_count
= 0;
4269 ap
->ability_match
= 0;
4273 ap
->state
= ANEG_STATE_RESTART_INIT
;
4275 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4279 case ANEG_STATE_RESTART_INIT
:
4280 ap
->link_time
= ap
->cur_time
;
4281 ap
->flags
&= ~(MR_NP_LOADED
);
4283 tw32(MAC_TX_AUTO_NEG
, 0);
4284 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4285 tw32_f(MAC_MODE
, tp
->mac_mode
);
4288 ret
= ANEG_TIMER_ENAB
;
4289 ap
->state
= ANEG_STATE_RESTART
;
4292 case ANEG_STATE_RESTART
:
4293 delta
= ap
->cur_time
- ap
->link_time
;
4294 if (delta
> ANEG_STATE_SETTLE_TIME
)
4295 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4297 ret
= ANEG_TIMER_ENAB
;
4300 case ANEG_STATE_DISABLE_LINK_OK
:
4304 case ANEG_STATE_ABILITY_DETECT_INIT
:
4305 ap
->flags
&= ~(MR_TOGGLE_TX
);
4306 ap
->txconfig
= ANEG_CFG_FD
;
4307 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4308 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4309 ap
->txconfig
|= ANEG_CFG_PS1
;
4310 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4311 ap
->txconfig
|= ANEG_CFG_PS2
;
4312 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4313 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4314 tw32_f(MAC_MODE
, tp
->mac_mode
);
4317 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4320 case ANEG_STATE_ABILITY_DETECT
:
4321 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4322 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4325 case ANEG_STATE_ACK_DETECT_INIT
:
4326 ap
->txconfig
|= ANEG_CFG_ACK
;
4327 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4328 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4329 tw32_f(MAC_MODE
, tp
->mac_mode
);
4332 ap
->state
= ANEG_STATE_ACK_DETECT
;
4335 case ANEG_STATE_ACK_DETECT
:
4336 if (ap
->ack_match
!= 0) {
4337 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4338 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4339 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4341 ap
->state
= ANEG_STATE_AN_ENABLE
;
4343 } else if (ap
->ability_match
!= 0 &&
4344 ap
->rxconfig
== 0) {
4345 ap
->state
= ANEG_STATE_AN_ENABLE
;
4349 case ANEG_STATE_COMPLETE_ACK_INIT
:
4350 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4354 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4355 MR_LP_ADV_HALF_DUPLEX
|
4356 MR_LP_ADV_SYM_PAUSE
|
4357 MR_LP_ADV_ASYM_PAUSE
|
4358 MR_LP_ADV_REMOTE_FAULT1
|
4359 MR_LP_ADV_REMOTE_FAULT2
|
4360 MR_LP_ADV_NEXT_PAGE
|
4363 if (ap
->rxconfig
& ANEG_CFG_FD
)
4364 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4365 if (ap
->rxconfig
& ANEG_CFG_HD
)
4366 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4367 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4368 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4369 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4370 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4371 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4372 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4373 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4374 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4375 if (ap
->rxconfig
& ANEG_CFG_NP
)
4376 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4378 ap
->link_time
= ap
->cur_time
;
4380 ap
->flags
^= (MR_TOGGLE_TX
);
4381 if (ap
->rxconfig
& 0x0008)
4382 ap
->flags
|= MR_TOGGLE_RX
;
4383 if (ap
->rxconfig
& ANEG_CFG_NP
)
4384 ap
->flags
|= MR_NP_RX
;
4385 ap
->flags
|= MR_PAGE_RX
;
4387 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4388 ret
= ANEG_TIMER_ENAB
;
4391 case ANEG_STATE_COMPLETE_ACK
:
4392 if (ap
->ability_match
!= 0 &&
4393 ap
->rxconfig
== 0) {
4394 ap
->state
= ANEG_STATE_AN_ENABLE
;
4397 delta
= ap
->cur_time
- ap
->link_time
;
4398 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4399 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4400 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4402 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4403 !(ap
->flags
& MR_NP_RX
)) {
4404 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4412 case ANEG_STATE_IDLE_DETECT_INIT
:
4413 ap
->link_time
= ap
->cur_time
;
4414 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4415 tw32_f(MAC_MODE
, tp
->mac_mode
);
4418 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4419 ret
= ANEG_TIMER_ENAB
;
4422 case ANEG_STATE_IDLE_DETECT
:
4423 if (ap
->ability_match
!= 0 &&
4424 ap
->rxconfig
== 0) {
4425 ap
->state
= ANEG_STATE_AN_ENABLE
;
4428 delta
= ap
->cur_time
- ap
->link_time
;
4429 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4430 /* XXX another gem from the Broadcom driver :( */
4431 ap
->state
= ANEG_STATE_LINK_OK
;
4435 case ANEG_STATE_LINK_OK
:
4436 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4440 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4441 /* ??? unimplemented */
4444 case ANEG_STATE_NEXT_PAGE_WAIT
:
4445 /* ??? unimplemented */
4456 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4459 struct tg3_fiber_aneginfo aninfo
;
4460 int status
= ANEG_FAILED
;
4464 tw32_f(MAC_TX_AUTO_NEG
, 0);
4466 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4467 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4470 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4473 memset(&aninfo
, 0, sizeof(aninfo
));
4474 aninfo
.flags
|= MR_AN_ENABLE
;
4475 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4476 aninfo
.cur_time
= 0;
4478 while (++tick
< 195000) {
4479 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4480 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4486 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4487 tw32_f(MAC_MODE
, tp
->mac_mode
);
4490 *txflags
= aninfo
.txconfig
;
4491 *rxflags
= aninfo
.flags
;
4493 if (status
== ANEG_DONE
&&
4494 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4495 MR_LP_ADV_FULL_DUPLEX
)))
4501 static void tg3_init_bcm8002(struct tg3
*tp
)
4503 u32 mac_status
= tr32(MAC_STATUS
);
4506 /* Reset when initting first time or we have a link. */
4507 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4508 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4511 /* Set PLL lock range. */
4512 tg3_writephy(tp
, 0x16, 0x8007);
4515 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4517 /* Wait for reset to complete. */
4518 /* XXX schedule_timeout() ... */
4519 for (i
= 0; i
< 500; i
++)
4522 /* Config mode; select PMA/Ch 1 regs. */
4523 tg3_writephy(tp
, 0x10, 0x8411);
4525 /* Enable auto-lock and comdet, select txclk for tx. */
4526 tg3_writephy(tp
, 0x11, 0x0a10);
4528 tg3_writephy(tp
, 0x18, 0x00a0);
4529 tg3_writephy(tp
, 0x16, 0x41ff);
4531 /* Assert and deassert POR. */
4532 tg3_writephy(tp
, 0x13, 0x0400);
4534 tg3_writephy(tp
, 0x13, 0x0000);
4536 tg3_writephy(tp
, 0x11, 0x0a50);
4538 tg3_writephy(tp
, 0x11, 0x0a10);
4540 /* Wait for signal to stabilize */
4541 /* XXX schedule_timeout() ... */
4542 for (i
= 0; i
< 15000; i
++)
4545 /* Deselect the channel register so we can read the PHYID
4548 tg3_writephy(tp
, 0x10, 0x8011);
4551 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4554 u32 sg_dig_ctrl
, sg_dig_status
;
4555 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4556 int workaround
, port_a
;
4557 int current_link_up
;
4560 expected_sg_dig_ctrl
= 0;
4563 current_link_up
= 0;
4565 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4566 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4568 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4571 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4572 /* preserve bits 20-23 for voltage regulator */
4573 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4576 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4578 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4579 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4581 u32 val
= serdes_cfg
;
4587 tw32_f(MAC_SERDES_CFG
, val
);
4590 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4592 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4593 tg3_setup_flow_control(tp
, 0, 0);
4594 current_link_up
= 1;
4599 /* Want auto-negotiation. */
4600 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4602 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4603 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4604 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4605 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4606 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4608 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4609 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4610 tp
->serdes_counter
&&
4611 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4612 MAC_STATUS_RCVD_CFG
)) ==
4613 MAC_STATUS_PCS_SYNCED
)) {
4614 tp
->serdes_counter
--;
4615 current_link_up
= 1;
4620 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4621 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4623 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4625 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4626 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4627 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4628 MAC_STATUS_SIGNAL_DET
)) {
4629 sg_dig_status
= tr32(SG_DIG_STATUS
);
4630 mac_status
= tr32(MAC_STATUS
);
4632 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4633 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4634 u32 local_adv
= 0, remote_adv
= 0;
4636 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4637 local_adv
|= ADVERTISE_1000XPAUSE
;
4638 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4639 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4641 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4642 remote_adv
|= LPA_1000XPAUSE
;
4643 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4644 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4646 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4647 current_link_up
= 1;
4648 tp
->serdes_counter
= 0;
4649 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4650 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4651 if (tp
->serdes_counter
)
4652 tp
->serdes_counter
--;
4655 u32 val
= serdes_cfg
;
4662 tw32_f(MAC_SERDES_CFG
, val
);
4665 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4668 /* Link parallel detection - link is up */
4669 /* only if we have PCS_SYNC and not */
4670 /* receiving config code words */
4671 mac_status
= tr32(MAC_STATUS
);
4672 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4673 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4674 tg3_setup_flow_control(tp
, 0, 0);
4675 current_link_up
= 1;
4677 TG3_PHYFLG_PARALLEL_DETECT
;
4678 tp
->serdes_counter
=
4679 SERDES_PARALLEL_DET_TIMEOUT
;
4681 goto restart_autoneg
;
4685 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4686 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4690 return current_link_up
;
4693 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4695 int current_link_up
= 0;
4697 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4700 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4701 u32 txflags
, rxflags
;
4704 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4705 u32 local_adv
= 0, remote_adv
= 0;
4707 if (txflags
& ANEG_CFG_PS1
)
4708 local_adv
|= ADVERTISE_1000XPAUSE
;
4709 if (txflags
& ANEG_CFG_PS2
)
4710 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4712 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4713 remote_adv
|= LPA_1000XPAUSE
;
4714 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4715 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4717 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4719 current_link_up
= 1;
4721 for (i
= 0; i
< 30; i
++) {
4724 (MAC_STATUS_SYNC_CHANGED
|
4725 MAC_STATUS_CFG_CHANGED
));
4727 if ((tr32(MAC_STATUS
) &
4728 (MAC_STATUS_SYNC_CHANGED
|
4729 MAC_STATUS_CFG_CHANGED
)) == 0)
4733 mac_status
= tr32(MAC_STATUS
);
4734 if (current_link_up
== 0 &&
4735 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4736 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4737 current_link_up
= 1;
4739 tg3_setup_flow_control(tp
, 0, 0);
4741 /* Forcing 1000FD link up. */
4742 current_link_up
= 1;
4744 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4747 tw32_f(MAC_MODE
, tp
->mac_mode
);
4752 return current_link_up
;
4755 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4758 u16 orig_active_speed
;
4759 u8 orig_active_duplex
;
4761 int current_link_up
;
4764 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4765 orig_active_speed
= tp
->link_config
.active_speed
;
4766 orig_active_duplex
= tp
->link_config
.active_duplex
;
4768 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4769 netif_carrier_ok(tp
->dev
) &&
4770 tg3_flag(tp
, INIT_COMPLETE
)) {
4771 mac_status
= tr32(MAC_STATUS
);
4772 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4773 MAC_STATUS_SIGNAL_DET
|
4774 MAC_STATUS_CFG_CHANGED
|
4775 MAC_STATUS_RCVD_CFG
);
4776 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4777 MAC_STATUS_SIGNAL_DET
)) {
4778 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4779 MAC_STATUS_CFG_CHANGED
));
4784 tw32_f(MAC_TX_AUTO_NEG
, 0);
4786 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4787 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4788 tw32_f(MAC_MODE
, tp
->mac_mode
);
4791 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4792 tg3_init_bcm8002(tp
);
4794 /* Enable link change event even when serdes polling. */
4795 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4798 current_link_up
= 0;
4799 mac_status
= tr32(MAC_STATUS
);
4801 if (tg3_flag(tp
, HW_AUTONEG
))
4802 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4804 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4806 tp
->napi
[0].hw_status
->status
=
4807 (SD_STATUS_UPDATED
|
4808 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4810 for (i
= 0; i
< 100; i
++) {
4811 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4812 MAC_STATUS_CFG_CHANGED
));
4814 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4815 MAC_STATUS_CFG_CHANGED
|
4816 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4820 mac_status
= tr32(MAC_STATUS
);
4821 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4822 current_link_up
= 0;
4823 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4824 tp
->serdes_counter
== 0) {
4825 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4826 MAC_MODE_SEND_CONFIGS
));
4828 tw32_f(MAC_MODE
, tp
->mac_mode
);
4832 if (current_link_up
== 1) {
4833 tp
->link_config
.active_speed
= SPEED_1000
;
4834 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4835 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4836 LED_CTRL_LNKLED_OVERRIDE
|
4837 LED_CTRL_1000MBPS_ON
));
4839 tp
->link_config
.active_speed
= SPEED_INVALID
;
4840 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4841 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4842 LED_CTRL_LNKLED_OVERRIDE
|
4843 LED_CTRL_TRAFFIC_OVERRIDE
));
4846 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4847 if (current_link_up
)
4848 netif_carrier_on(tp
->dev
);
4850 netif_carrier_off(tp
->dev
);
4851 tg3_link_report(tp
);
4853 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4854 if (orig_pause_cfg
!= now_pause_cfg
||
4855 orig_active_speed
!= tp
->link_config
.active_speed
||
4856 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4857 tg3_link_report(tp
);
4863 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4865 int current_link_up
, err
= 0;
4869 u32 local_adv
, remote_adv
;
4871 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4872 tw32_f(MAC_MODE
, tp
->mac_mode
);
4878 (MAC_STATUS_SYNC_CHANGED
|
4879 MAC_STATUS_CFG_CHANGED
|
4880 MAC_STATUS_MI_COMPLETION
|
4881 MAC_STATUS_LNKSTATE_CHANGED
));
4887 current_link_up
= 0;
4888 current_speed
= SPEED_INVALID
;
4889 current_duplex
= DUPLEX_INVALID
;
4891 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4892 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4893 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4894 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4895 bmsr
|= BMSR_LSTATUS
;
4897 bmsr
&= ~BMSR_LSTATUS
;
4900 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4902 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4903 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4904 /* do nothing, just check for link up at the end */
4905 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4908 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4909 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4910 ADVERTISE_1000XPAUSE
|
4911 ADVERTISE_1000XPSE_ASYM
|
4914 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4916 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4917 new_adv
|= ADVERTISE_1000XHALF
;
4918 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4919 new_adv
|= ADVERTISE_1000XFULL
;
4921 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4922 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4923 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4924 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4926 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4927 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4928 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4935 bmcr
&= ~BMCR_SPEED1000
;
4936 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4938 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4939 new_bmcr
|= BMCR_FULLDPLX
;
4941 if (new_bmcr
!= bmcr
) {
4942 /* BMCR_SPEED1000 is a reserved bit that needs
4943 * to be set on write.
4945 new_bmcr
|= BMCR_SPEED1000
;
4947 /* Force a linkdown */
4948 if (netif_carrier_ok(tp
->dev
)) {
4951 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4952 adv
&= ~(ADVERTISE_1000XFULL
|
4953 ADVERTISE_1000XHALF
|
4955 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4956 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4960 netif_carrier_off(tp
->dev
);
4962 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4964 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4965 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4966 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4968 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4969 bmsr
|= BMSR_LSTATUS
;
4971 bmsr
&= ~BMSR_LSTATUS
;
4973 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4977 if (bmsr
& BMSR_LSTATUS
) {
4978 current_speed
= SPEED_1000
;
4979 current_link_up
= 1;
4980 if (bmcr
& BMCR_FULLDPLX
)
4981 current_duplex
= DUPLEX_FULL
;
4983 current_duplex
= DUPLEX_HALF
;
4988 if (bmcr
& BMCR_ANENABLE
) {
4991 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4992 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4993 common
= local_adv
& remote_adv
;
4994 if (common
& (ADVERTISE_1000XHALF
|
4995 ADVERTISE_1000XFULL
)) {
4996 if (common
& ADVERTISE_1000XFULL
)
4997 current_duplex
= DUPLEX_FULL
;
4999 current_duplex
= DUPLEX_HALF
;
5000 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5001 /* Link is up via parallel detect */
5003 current_link_up
= 0;
5008 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5009 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5011 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5012 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5013 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5015 tw32_f(MAC_MODE
, tp
->mac_mode
);
5018 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5020 tp
->link_config
.active_speed
= current_speed
;
5021 tp
->link_config
.active_duplex
= current_duplex
;
5023 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5024 if (current_link_up
)
5025 netif_carrier_on(tp
->dev
);
5027 netif_carrier_off(tp
->dev
);
5028 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5030 tg3_link_report(tp
);
5035 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5037 if (tp
->serdes_counter
) {
5038 /* Give autoneg time to complete. */
5039 tp
->serdes_counter
--;
5043 if (!netif_carrier_ok(tp
->dev
) &&
5044 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5047 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5048 if (bmcr
& BMCR_ANENABLE
) {
5051 /* Select shadow register 0x1f */
5052 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5053 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5055 /* Select expansion interrupt status register */
5056 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5057 MII_TG3_DSP_EXP1_INT_STAT
);
5058 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5059 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5061 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5062 /* We have signal detect and not receiving
5063 * config code words, link is up by parallel
5067 bmcr
&= ~BMCR_ANENABLE
;
5068 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5069 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5070 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5073 } else if (netif_carrier_ok(tp
->dev
) &&
5074 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5075 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5078 /* Select expansion interrupt status register */
5079 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5080 MII_TG3_DSP_EXP1_INT_STAT
);
5081 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5085 /* Config code words received, turn on autoneg. */
5086 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5087 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5089 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5095 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5100 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5101 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5102 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5103 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5105 err
= tg3_setup_copper_phy(tp
, force_reset
);
5107 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5110 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5111 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5113 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5118 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5119 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5120 tw32(GRC_MISC_CFG
, val
);
5123 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5124 (6 << TX_LENGTHS_IPG_SHIFT
);
5125 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5126 val
|= tr32(MAC_TX_LENGTHS
) &
5127 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5128 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5130 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5131 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5132 tw32(MAC_TX_LENGTHS
, val
|
5133 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5135 tw32(MAC_TX_LENGTHS
, val
|
5136 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5138 if (!tg3_flag(tp
, 5705_PLUS
)) {
5139 if (netif_carrier_ok(tp
->dev
)) {
5140 tw32(HOSTCC_STAT_COAL_TICKS
,
5141 tp
->coal
.stats_block_coalesce_usecs
);
5143 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5147 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5148 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5149 if (!netif_carrier_ok(tp
->dev
))
5150 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5153 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5154 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5160 static inline int tg3_irq_sync(struct tg3
*tp
)
5162 return tp
->irq_sync
;
5165 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5169 dst
= (u32
*)((u8
*)dst
+ off
);
5170 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5171 *dst
++ = tr32(off
+ i
);
5174 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5176 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5177 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5178 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5179 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5180 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5181 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5182 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5183 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5184 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5185 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5186 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5187 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5188 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5189 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5190 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5191 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5192 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5193 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5194 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5196 if (tg3_flag(tp
, SUPPORT_MSIX
))
5197 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5199 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5200 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5201 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5202 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5203 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5204 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5205 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5206 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5208 if (!tg3_flag(tp
, 5705_PLUS
)) {
5209 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5210 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5211 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5214 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5215 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5216 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5217 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5218 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5220 if (tg3_flag(tp
, NVRAM
))
5221 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5224 static void tg3_dump_state(struct tg3
*tp
)
5229 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5231 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5235 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5236 /* Read up to but not including private PCI registers */
5237 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5238 regs
[i
/ sizeof(u32
)] = tr32(i
);
5240 tg3_dump_legacy_regs(tp
, regs
);
5242 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5243 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5244 !regs
[i
+ 2] && !regs
[i
+ 3])
5247 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5249 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5254 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5255 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5257 /* SW status block */
5259 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5261 tnapi
->hw_status
->status
,
5262 tnapi
->hw_status
->status_tag
,
5263 tnapi
->hw_status
->rx_jumbo_consumer
,
5264 tnapi
->hw_status
->rx_consumer
,
5265 tnapi
->hw_status
->rx_mini_consumer
,
5266 tnapi
->hw_status
->idx
[0].rx_producer
,
5267 tnapi
->hw_status
->idx
[0].tx_consumer
);
5270 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5272 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5273 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5275 tnapi
->prodring
.rx_std_prod_idx
,
5276 tnapi
->prodring
.rx_std_cons_idx
,
5277 tnapi
->prodring
.rx_jmb_prod_idx
,
5278 tnapi
->prodring
.rx_jmb_cons_idx
);
5282 /* This is called whenever we suspect that the system chipset is re-
5283 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5284 * is bogus tx completions. We try to recover by setting the
5285 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5288 static void tg3_tx_recover(struct tg3
*tp
)
5290 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5291 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5293 netdev_warn(tp
->dev
,
5294 "The system may be re-ordering memory-mapped I/O "
5295 "cycles to the network device, attempting to recover. "
5296 "Please report the problem to the driver maintainer "
5297 "and include system chipset information.\n");
5299 spin_lock(&tp
->lock
);
5300 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5301 spin_unlock(&tp
->lock
);
5304 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5306 /* Tell compiler to fetch tx indices from memory. */
5308 return tnapi
->tx_pending
-
5309 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5312 /* Tigon3 never reports partial packet sends. So we do not
5313 * need special logic to handle SKBs that have not had all
5314 * of their frags sent yet, like SunGEM does.
5316 static void tg3_tx(struct tg3_napi
*tnapi
)
5318 struct tg3
*tp
= tnapi
->tp
;
5319 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5320 u32 sw_idx
= tnapi
->tx_cons
;
5321 struct netdev_queue
*txq
;
5322 int index
= tnapi
- tp
->napi
;
5324 if (tg3_flag(tp
, ENABLE_TSS
))
5327 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5329 while (sw_idx
!= hw_idx
) {
5330 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5331 struct sk_buff
*skb
= ri
->skb
;
5334 if (unlikely(skb
== NULL
)) {
5339 pci_unmap_single(tp
->pdev
,
5340 dma_unmap_addr(ri
, mapping
),
5346 while (ri
->fragmented
) {
5347 ri
->fragmented
= false;
5348 sw_idx
= NEXT_TX(sw_idx
);
5349 ri
= &tnapi
->tx_buffers
[sw_idx
];
5352 sw_idx
= NEXT_TX(sw_idx
);
5354 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5355 ri
= &tnapi
->tx_buffers
[sw_idx
];
5356 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5359 pci_unmap_page(tp
->pdev
,
5360 dma_unmap_addr(ri
, mapping
),
5361 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5364 while (ri
->fragmented
) {
5365 ri
->fragmented
= false;
5366 sw_idx
= NEXT_TX(sw_idx
);
5367 ri
= &tnapi
->tx_buffers
[sw_idx
];
5370 sw_idx
= NEXT_TX(sw_idx
);
5375 if (unlikely(tx_bug
)) {
5381 tnapi
->tx_cons
= sw_idx
;
5383 /* Need to make the tx_cons update visible to tg3_start_xmit()
5384 * before checking for netif_queue_stopped(). Without the
5385 * memory barrier, there is a small possibility that tg3_start_xmit()
5386 * will miss it and cause the queue to be stopped forever.
5390 if (unlikely(netif_tx_queue_stopped(txq
) &&
5391 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5392 __netif_tx_lock(txq
, smp_processor_id());
5393 if (netif_tx_queue_stopped(txq
) &&
5394 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5395 netif_tx_wake_queue(txq
);
5396 __netif_tx_unlock(txq
);
5400 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5405 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5406 map_sz
, PCI_DMA_FROMDEVICE
);
5407 dev_kfree_skb_any(ri
->skb
);
5411 /* Returns size of skb allocated or < 0 on error.
5413 * We only need to fill in the address because the other members
5414 * of the RX descriptor are invariant, see tg3_init_rings.
5416 * Note the purposeful assymetry of cpu vs. chip accesses. For
5417 * posting buffers we only dirty the first cache line of the RX
5418 * descriptor (containing the address). Whereas for the RX status
5419 * buffers the cpu only reads the last cacheline of the RX descriptor
5420 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5422 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5423 u32 opaque_key
, u32 dest_idx_unmasked
)
5425 struct tg3_rx_buffer_desc
*desc
;
5426 struct ring_info
*map
;
5427 struct sk_buff
*skb
;
5429 int skb_size
, dest_idx
;
5431 switch (opaque_key
) {
5432 case RXD_OPAQUE_RING_STD
:
5433 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5434 desc
= &tpr
->rx_std
[dest_idx
];
5435 map
= &tpr
->rx_std_buffers
[dest_idx
];
5436 skb_size
= tp
->rx_pkt_map_sz
;
5439 case RXD_OPAQUE_RING_JUMBO
:
5440 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5441 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5442 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5443 skb_size
= TG3_RX_JMB_MAP_SZ
;
5450 /* Do not overwrite any of the map or rp information
5451 * until we are sure we can commit to a new buffer.
5453 * Callers depend upon this behavior and assume that
5454 * we leave everything unchanged if we fail.
5456 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ TG3_RX_OFFSET(tp
));
5460 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5462 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
5463 PCI_DMA_FROMDEVICE
);
5464 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5470 dma_unmap_addr_set(map
, mapping
, mapping
);
5472 desc
->addr_hi
= ((u64
)mapping
>> 32);
5473 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5478 /* We only need to move over in the address because the other
5479 * members of the RX descriptor are invariant. See notes above
5480 * tg3_alloc_rx_skb for full details.
5482 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5483 struct tg3_rx_prodring_set
*dpr
,
5484 u32 opaque_key
, int src_idx
,
5485 u32 dest_idx_unmasked
)
5487 struct tg3
*tp
= tnapi
->tp
;
5488 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5489 struct ring_info
*src_map
, *dest_map
;
5490 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5493 switch (opaque_key
) {
5494 case RXD_OPAQUE_RING_STD
:
5495 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5496 dest_desc
= &dpr
->rx_std
[dest_idx
];
5497 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5498 src_desc
= &spr
->rx_std
[src_idx
];
5499 src_map
= &spr
->rx_std_buffers
[src_idx
];
5502 case RXD_OPAQUE_RING_JUMBO
:
5503 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5504 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5505 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5506 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5507 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5514 dest_map
->skb
= src_map
->skb
;
5515 dma_unmap_addr_set(dest_map
, mapping
,
5516 dma_unmap_addr(src_map
, mapping
));
5517 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5518 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5520 /* Ensure that the update to the skb happens after the physical
5521 * addresses have been transferred to the new BD location.
5525 src_map
->skb
= NULL
;
5528 /* The RX ring scheme is composed of multiple rings which post fresh
5529 * buffers to the chip, and one special ring the chip uses to report
5530 * status back to the host.
5532 * The special ring reports the status of received packets to the
5533 * host. The chip does not write into the original descriptor the
5534 * RX buffer was obtained from. The chip simply takes the original
5535 * descriptor as provided by the host, updates the status and length
5536 * field, then writes this into the next status ring entry.
5538 * Each ring the host uses to post buffers to the chip is described
5539 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5540 * it is first placed into the on-chip ram. When the packet's length
5541 * is known, it walks down the TG3_BDINFO entries to select the ring.
5542 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5543 * which is within the range of the new packet's length is chosen.
5545 * The "separate ring for rx status" scheme may sound queer, but it makes
5546 * sense from a cache coherency perspective. If only the host writes
5547 * to the buffer post rings, and only the chip writes to the rx status
5548 * rings, then cache lines never move beyond shared-modified state.
5549 * If both the host and chip were to write into the same ring, cache line
5550 * eviction could occur since both entities want it in an exclusive state.
5552 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5554 struct tg3
*tp
= tnapi
->tp
;
5555 u32 work_mask
, rx_std_posted
= 0;
5556 u32 std_prod_idx
, jmb_prod_idx
;
5557 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5560 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5562 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5564 * We need to order the read of hw_idx and the read of
5565 * the opaque cookie.
5570 std_prod_idx
= tpr
->rx_std_prod_idx
;
5571 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5572 while (sw_idx
!= hw_idx
&& budget
> 0) {
5573 struct ring_info
*ri
;
5574 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5576 struct sk_buff
*skb
;
5577 dma_addr_t dma_addr
;
5578 u32 opaque_key
, desc_idx
, *post_ptr
;
5580 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5581 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5582 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5583 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5584 dma_addr
= dma_unmap_addr(ri
, mapping
);
5586 post_ptr
= &std_prod_idx
;
5588 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5589 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5590 dma_addr
= dma_unmap_addr(ri
, mapping
);
5592 post_ptr
= &jmb_prod_idx
;
5594 goto next_pkt_nopost
;
5596 work_mask
|= opaque_key
;
5598 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5599 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5601 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5602 desc_idx
, *post_ptr
);
5604 /* Other statistics kept track of by card. */
5609 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5612 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5615 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
5620 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5621 PCI_DMA_FROMDEVICE
);
5623 /* Ensure that the update to the skb happens
5624 * after the usage of the old DMA mapping.
5632 struct sk_buff
*copy_skb
;
5634 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5635 desc_idx
, *post_ptr
);
5637 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
5639 if (copy_skb
== NULL
)
5640 goto drop_it_no_recycle
;
5642 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
5643 skb_put(copy_skb
, len
);
5644 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5645 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
5646 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5648 /* We'll reuse the original ring buffer. */
5652 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5653 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5654 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5655 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5656 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5658 skb_checksum_none_assert(skb
);
5660 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5662 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5663 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5665 goto drop_it_no_recycle
;
5668 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5669 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5670 __vlan_hwaccel_put_tag(skb
,
5671 desc
->err_vlan
& RXD_VLAN_MASK
);
5673 napi_gro_receive(&tnapi
->napi
, skb
);
5681 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5682 tpr
->rx_std_prod_idx
= std_prod_idx
&
5683 tp
->rx_std_ring_mask
;
5684 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5685 tpr
->rx_std_prod_idx
);
5686 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5691 sw_idx
&= tp
->rx_ret_ring_mask
;
5693 /* Refresh hw_idx to see if there is new work */
5694 if (sw_idx
== hw_idx
) {
5695 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5700 /* ACK the status ring. */
5701 tnapi
->rx_rcb_ptr
= sw_idx
;
5702 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5704 /* Refill RX ring(s). */
5705 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5706 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5707 tpr
->rx_std_prod_idx
= std_prod_idx
&
5708 tp
->rx_std_ring_mask
;
5709 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5710 tpr
->rx_std_prod_idx
);
5712 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5713 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5714 tp
->rx_jmb_ring_mask
;
5715 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5716 tpr
->rx_jmb_prod_idx
);
5719 } else if (work_mask
) {
5720 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5721 * updated before the producer indices can be updated.
5725 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5726 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5728 if (tnapi
!= &tp
->napi
[1])
5729 napi_schedule(&tp
->napi
[1].napi
);
5735 static void tg3_poll_link(struct tg3
*tp
)
5737 /* handle link change and other phy events */
5738 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5739 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5741 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5742 sblk
->status
= SD_STATUS_UPDATED
|
5743 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5744 spin_lock(&tp
->lock
);
5745 if (tg3_flag(tp
, USE_PHYLIB
)) {
5747 (MAC_STATUS_SYNC_CHANGED
|
5748 MAC_STATUS_CFG_CHANGED
|
5749 MAC_STATUS_MI_COMPLETION
|
5750 MAC_STATUS_LNKSTATE_CHANGED
));
5753 tg3_setup_phy(tp
, 0);
5754 spin_unlock(&tp
->lock
);
5759 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5760 struct tg3_rx_prodring_set
*dpr
,
5761 struct tg3_rx_prodring_set
*spr
)
5763 u32 si
, di
, cpycnt
, src_prod_idx
;
5767 src_prod_idx
= spr
->rx_std_prod_idx
;
5769 /* Make sure updates to the rx_std_buffers[] entries and the
5770 * standard producer index are seen in the correct order.
5774 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5777 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5778 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5780 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5781 spr
->rx_std_cons_idx
;
5783 cpycnt
= min(cpycnt
,
5784 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5786 si
= spr
->rx_std_cons_idx
;
5787 di
= dpr
->rx_std_prod_idx
;
5789 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5790 if (dpr
->rx_std_buffers
[i
].skb
) {
5800 /* Ensure that updates to the rx_std_buffers ring and the
5801 * shadowed hardware producer ring from tg3_recycle_skb() are
5802 * ordered correctly WRT the skb check above.
5806 memcpy(&dpr
->rx_std_buffers
[di
],
5807 &spr
->rx_std_buffers
[si
],
5808 cpycnt
* sizeof(struct ring_info
));
5810 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5811 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5812 sbd
= &spr
->rx_std
[si
];
5813 dbd
= &dpr
->rx_std
[di
];
5814 dbd
->addr_hi
= sbd
->addr_hi
;
5815 dbd
->addr_lo
= sbd
->addr_lo
;
5818 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5819 tp
->rx_std_ring_mask
;
5820 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5821 tp
->rx_std_ring_mask
;
5825 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5827 /* Make sure updates to the rx_jmb_buffers[] entries and
5828 * the jumbo producer index are seen in the correct order.
5832 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5835 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5836 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5838 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5839 spr
->rx_jmb_cons_idx
;
5841 cpycnt
= min(cpycnt
,
5842 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5844 si
= spr
->rx_jmb_cons_idx
;
5845 di
= dpr
->rx_jmb_prod_idx
;
5847 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5848 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5858 /* Ensure that updates to the rx_jmb_buffers ring and the
5859 * shadowed hardware producer ring from tg3_recycle_skb() are
5860 * ordered correctly WRT the skb check above.
5864 memcpy(&dpr
->rx_jmb_buffers
[di
],
5865 &spr
->rx_jmb_buffers
[si
],
5866 cpycnt
* sizeof(struct ring_info
));
5868 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5869 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5870 sbd
= &spr
->rx_jmb
[si
].std
;
5871 dbd
= &dpr
->rx_jmb
[di
].std
;
5872 dbd
->addr_hi
= sbd
->addr_hi
;
5873 dbd
->addr_lo
= sbd
->addr_lo
;
5876 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5877 tp
->rx_jmb_ring_mask
;
5878 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5879 tp
->rx_jmb_ring_mask
;
5885 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5887 struct tg3
*tp
= tnapi
->tp
;
5889 /* run TX completion thread */
5890 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5892 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5896 /* run RX thread, within the bounds set by NAPI.
5897 * All RX "locking" is done by ensuring outside
5898 * code synchronizes with tg3->napi.poll()
5900 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5901 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5903 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5904 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5906 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5907 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5909 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5910 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5911 &tp
->napi
[i
].prodring
);
5915 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5916 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5917 dpr
->rx_std_prod_idx
);
5919 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5920 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5921 dpr
->rx_jmb_prod_idx
);
5926 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5932 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
5935 schedule_work(&tp
->reset_task
);
5938 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
5940 cancel_work_sync(&tp
->reset_task
);
5941 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
5944 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5946 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5947 struct tg3
*tp
= tnapi
->tp
;
5949 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5952 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5954 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5957 if (unlikely(work_done
>= budget
))
5960 /* tp->last_tag is used in tg3_int_reenable() below
5961 * to tell the hw how much work has been processed,
5962 * so we must read it before checking for more work.
5964 tnapi
->last_tag
= sblk
->status_tag
;
5965 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5968 /* check for RX/TX work to do */
5969 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5970 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5971 napi_complete(napi
);
5972 /* Reenable interrupts. */
5973 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5982 /* work_done is guaranteed to be less than budget. */
5983 napi_complete(napi
);
5984 tg3_reset_task_schedule(tp
);
5988 static void tg3_process_error(struct tg3
*tp
)
5991 bool real_error
= false;
5993 if (tg3_flag(tp
, ERROR_PROCESSED
))
5996 /* Check Flow Attention register */
5997 val
= tr32(HOSTCC_FLOW_ATTN
);
5998 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5999 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6003 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6004 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6008 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6009 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6018 tg3_flag_set(tp
, ERROR_PROCESSED
);
6019 tg3_reset_task_schedule(tp
);
6022 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6024 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6025 struct tg3
*tp
= tnapi
->tp
;
6027 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6030 if (sblk
->status
& SD_STATUS_ERROR
)
6031 tg3_process_error(tp
);
6035 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6037 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6040 if (unlikely(work_done
>= budget
))
6043 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6044 /* tp->last_tag is used in tg3_int_reenable() below
6045 * to tell the hw how much work has been processed,
6046 * so we must read it before checking for more work.
6048 tnapi
->last_tag
= sblk
->status_tag
;
6049 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6052 sblk
->status
&= ~SD_STATUS_UPDATED
;
6054 if (likely(!tg3_has_work(tnapi
))) {
6055 napi_complete(napi
);
6056 tg3_int_reenable(tnapi
);
6064 /* work_done is guaranteed to be less than budget. */
6065 napi_complete(napi
);
6066 tg3_reset_task_schedule(tp
);
6070 static void tg3_napi_disable(struct tg3
*tp
)
6074 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6075 napi_disable(&tp
->napi
[i
].napi
);
6078 static void tg3_napi_enable(struct tg3
*tp
)
6082 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6083 napi_enable(&tp
->napi
[i
].napi
);
6086 static void tg3_napi_init(struct tg3
*tp
)
6090 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6091 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6092 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6095 static void tg3_napi_fini(struct tg3
*tp
)
6099 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6100 netif_napi_del(&tp
->napi
[i
].napi
);
6103 static inline void tg3_netif_stop(struct tg3
*tp
)
6105 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6106 tg3_napi_disable(tp
);
6107 netif_tx_disable(tp
->dev
);
6110 static inline void tg3_netif_start(struct tg3
*tp
)
6112 /* NOTE: unconditional netif_tx_wake_all_queues is only
6113 * appropriate so long as all callers are assured to
6114 * have free tx slots (such as after tg3_init_hw)
6116 netif_tx_wake_all_queues(tp
->dev
);
6118 tg3_napi_enable(tp
);
6119 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6120 tg3_enable_ints(tp
);
6123 static void tg3_irq_quiesce(struct tg3
*tp
)
6127 BUG_ON(tp
->irq_sync
);
6132 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6133 synchronize_irq(tp
->napi
[i
].irq_vec
);
6136 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6137 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6138 * with as well. Most of the time, this is not necessary except when
6139 * shutting down the device.
6141 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6143 spin_lock_bh(&tp
->lock
);
6145 tg3_irq_quiesce(tp
);
6148 static inline void tg3_full_unlock(struct tg3
*tp
)
6150 spin_unlock_bh(&tp
->lock
);
6153 /* One-shot MSI handler - Chip automatically disables interrupt
6154 * after sending MSI so driver doesn't have to do it.
6156 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6158 struct tg3_napi
*tnapi
= dev_id
;
6159 struct tg3
*tp
= tnapi
->tp
;
6161 prefetch(tnapi
->hw_status
);
6163 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6165 if (likely(!tg3_irq_sync(tp
)))
6166 napi_schedule(&tnapi
->napi
);
6171 /* MSI ISR - No need to check for interrupt sharing and no need to
6172 * flush status block and interrupt mailbox. PCI ordering rules
6173 * guarantee that MSI will arrive after the status block.
6175 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6177 struct tg3_napi
*tnapi
= dev_id
;
6178 struct tg3
*tp
= tnapi
->tp
;
6180 prefetch(tnapi
->hw_status
);
6182 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6184 * Writing any value to intr-mbox-0 clears PCI INTA# and
6185 * chip-internal interrupt pending events.
6186 * Writing non-zero to intr-mbox-0 additional tells the
6187 * NIC to stop sending us irqs, engaging "in-intr-handler"
6190 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6191 if (likely(!tg3_irq_sync(tp
)))
6192 napi_schedule(&tnapi
->napi
);
6194 return IRQ_RETVAL(1);
6197 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6199 struct tg3_napi
*tnapi
= dev_id
;
6200 struct tg3
*tp
= tnapi
->tp
;
6201 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6202 unsigned int handled
= 1;
6204 /* In INTx mode, it is possible for the interrupt to arrive at
6205 * the CPU before the status block posted prior to the interrupt.
6206 * Reading the PCI State register will confirm whether the
6207 * interrupt is ours and will flush the status block.
6209 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6210 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6211 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6218 * Writing any value to intr-mbox-0 clears PCI INTA# and
6219 * chip-internal interrupt pending events.
6220 * Writing non-zero to intr-mbox-0 additional tells the
6221 * NIC to stop sending us irqs, engaging "in-intr-handler"
6224 * Flush the mailbox to de-assert the IRQ immediately to prevent
6225 * spurious interrupts. The flush impacts performance but
6226 * excessive spurious interrupts can be worse in some cases.
6228 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6229 if (tg3_irq_sync(tp
))
6231 sblk
->status
&= ~SD_STATUS_UPDATED
;
6232 if (likely(tg3_has_work(tnapi
))) {
6233 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6234 napi_schedule(&tnapi
->napi
);
6236 /* No work, shared interrupt perhaps? re-enable
6237 * interrupts, and flush that PCI write
6239 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6243 return IRQ_RETVAL(handled
);
6246 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6248 struct tg3_napi
*tnapi
= dev_id
;
6249 struct tg3
*tp
= tnapi
->tp
;
6250 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6251 unsigned int handled
= 1;
6253 /* In INTx mode, it is possible for the interrupt to arrive at
6254 * the CPU before the status block posted prior to the interrupt.
6255 * Reading the PCI State register will confirm whether the
6256 * interrupt is ours and will flush the status block.
6258 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6259 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6260 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6267 * writing any value to intr-mbox-0 clears PCI INTA# and
6268 * chip-internal interrupt pending events.
6269 * writing non-zero to intr-mbox-0 additional tells the
6270 * NIC to stop sending us irqs, engaging "in-intr-handler"
6273 * Flush the mailbox to de-assert the IRQ immediately to prevent
6274 * spurious interrupts. The flush impacts performance but
6275 * excessive spurious interrupts can be worse in some cases.
6277 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6280 * In a shared interrupt configuration, sometimes other devices'
6281 * interrupts will scream. We record the current status tag here
6282 * so that the above check can report that the screaming interrupts
6283 * are unhandled. Eventually they will be silenced.
6285 tnapi
->last_irq_tag
= sblk
->status_tag
;
6287 if (tg3_irq_sync(tp
))
6290 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6292 napi_schedule(&tnapi
->napi
);
6295 return IRQ_RETVAL(handled
);
6298 /* ISR for interrupt test */
6299 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6301 struct tg3_napi
*tnapi
= dev_id
;
6302 struct tg3
*tp
= tnapi
->tp
;
6303 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6305 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6306 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6307 tg3_disable_ints(tp
);
6308 return IRQ_RETVAL(1);
6310 return IRQ_RETVAL(0);
6313 static int tg3_init_hw(struct tg3
*, int);
6314 static int tg3_halt(struct tg3
*, int, int);
6316 /* Restart hardware after configuration changes, self-test, etc.
6317 * Invoked with tp->lock held.
6319 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
6320 __releases(tp
->lock
)
6321 __acquires(tp
->lock
)
6325 err
= tg3_init_hw(tp
, reset_phy
);
6328 "Failed to re-initialize device, aborting\n");
6329 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6330 tg3_full_unlock(tp
);
6331 del_timer_sync(&tp
->timer
);
6333 tg3_napi_enable(tp
);
6335 tg3_full_lock(tp
, 0);
6340 #ifdef CONFIG_NET_POLL_CONTROLLER
6341 static void tg3_poll_controller(struct net_device
*dev
)
6344 struct tg3
*tp
= netdev_priv(dev
);
6346 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6347 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6351 static void tg3_reset_task(struct work_struct
*work
)
6353 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
6356 tg3_full_lock(tp
, 0);
6358 if (!netif_running(tp
->dev
)) {
6359 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6360 tg3_full_unlock(tp
);
6364 tg3_full_unlock(tp
);
6370 tg3_full_lock(tp
, 1);
6372 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
6373 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
6374 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
6375 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
6376 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6379 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
6380 err
= tg3_init_hw(tp
, 1);
6384 tg3_netif_start(tp
);
6387 tg3_full_unlock(tp
);
6392 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6395 static void tg3_tx_timeout(struct net_device
*dev
)
6397 struct tg3
*tp
= netdev_priv(dev
);
6399 if (netif_msg_tx_err(tp
)) {
6400 netdev_err(dev
, "transmit timed out, resetting\n");
6404 tg3_reset_task_schedule(tp
);
6407 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6408 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6410 u32 base
= (u32
) mapping
& 0xffffffff;
6412 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6415 /* Test for DMA addresses > 40-bit */
6416 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6419 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6420 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6421 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6428 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6429 dma_addr_t mapping
, u32 len
, u32 flags
,
6432 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6433 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6434 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6435 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6438 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6439 dma_addr_t map
, u32 len
, u32 flags
,
6442 struct tg3
*tp
= tnapi
->tp
;
6445 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6448 if (tg3_4g_overflow_test(map
, len
))
6451 if (tg3_40bit_overflow_test(tp
, map
, len
))
6454 if (tg3_flag(tp
, 4K_FIFO_LIMIT
)) {
6455 u32 prvidx
= *entry
;
6456 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6457 while (len
> TG3_TX_BD_DMA_MAX
&& *budget
) {
6458 u32 frag_len
= TG3_TX_BD_DMA_MAX
;
6459 len
-= TG3_TX_BD_DMA_MAX
;
6461 /* Avoid the 8byte DMA problem */
6463 len
+= TG3_TX_BD_DMA_MAX
/ 2;
6464 frag_len
= TG3_TX_BD_DMA_MAX
/ 2;
6467 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6469 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6470 frag_len
, tmp_flag
, mss
, vlan
);
6473 *entry
= NEXT_TX(*entry
);
6480 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6481 len
, flags
, mss
, vlan
);
6483 *entry
= NEXT_TX(*entry
);
6486 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6490 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6491 len
, flags
, mss
, vlan
);
6492 *entry
= NEXT_TX(*entry
);
6498 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6501 struct sk_buff
*skb
;
6502 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6507 pci_unmap_single(tnapi
->tp
->pdev
,
6508 dma_unmap_addr(txb
, mapping
),
6512 while (txb
->fragmented
) {
6513 txb
->fragmented
= false;
6514 entry
= NEXT_TX(entry
);
6515 txb
= &tnapi
->tx_buffers
[entry
];
6518 for (i
= 0; i
<= last
; i
++) {
6519 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6521 entry
= NEXT_TX(entry
);
6522 txb
= &tnapi
->tx_buffers
[entry
];
6524 pci_unmap_page(tnapi
->tp
->pdev
,
6525 dma_unmap_addr(txb
, mapping
),
6526 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6528 while (txb
->fragmented
) {
6529 txb
->fragmented
= false;
6530 entry
= NEXT_TX(entry
);
6531 txb
= &tnapi
->tx_buffers
[entry
];
6536 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6537 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6538 struct sk_buff
**pskb
,
6539 u32
*entry
, u32
*budget
,
6540 u32 base_flags
, u32 mss
, u32 vlan
)
6542 struct tg3
*tp
= tnapi
->tp
;
6543 struct sk_buff
*new_skb
, *skb
= *pskb
;
6544 dma_addr_t new_addr
= 0;
6547 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6548 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6550 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6552 new_skb
= skb_copy_expand(skb
,
6553 skb_headroom(skb
) + more_headroom
,
6554 skb_tailroom(skb
), GFP_ATOMIC
);
6560 /* New SKB is guaranteed to be linear. */
6561 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6563 /* Make sure the mapping succeeded */
6564 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6565 dev_kfree_skb(new_skb
);
6568 u32 save_entry
= *entry
;
6570 base_flags
|= TXD_FLAG_END
;
6572 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6573 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6576 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6577 new_skb
->len
, base_flags
,
6579 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6580 dev_kfree_skb(new_skb
);
6591 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6593 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6594 * TSO header is greater than 80 bytes.
6596 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6598 struct sk_buff
*segs
, *nskb
;
6599 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6601 /* Estimate the number of fragments in the worst case */
6602 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6603 netif_stop_queue(tp
->dev
);
6605 /* netif_tx_stop_queue() must be done before checking
6606 * checking tx index in tg3_tx_avail() below, because in
6607 * tg3_tx(), we update tx index before checking for
6608 * netif_tx_queue_stopped().
6611 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6612 return NETDEV_TX_BUSY
;
6614 netif_wake_queue(tp
->dev
);
6617 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6619 goto tg3_tso_bug_end
;
6625 tg3_start_xmit(nskb
, tp
->dev
);
6631 return NETDEV_TX_OK
;
6634 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6635 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6637 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6639 struct tg3
*tp
= netdev_priv(dev
);
6640 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6642 int i
= -1, would_hit_hwbug
;
6644 struct tg3_napi
*tnapi
;
6645 struct netdev_queue
*txq
;
6648 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6649 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6650 if (tg3_flag(tp
, ENABLE_TSS
))
6653 budget
= tg3_tx_avail(tnapi
);
6655 /* We are running in BH disabled context with netif_tx_lock
6656 * and TX reclaim runs via tp->napi.poll inside of a software
6657 * interrupt. Furthermore, IRQ processing runs lockless so we have
6658 * no IRQ context deadlocks to worry about either. Rejoice!
6660 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6661 if (!netif_tx_queue_stopped(txq
)) {
6662 netif_tx_stop_queue(txq
);
6664 /* This is a hard error, log it. */
6666 "BUG! Tx Ring full when queue awake!\n");
6668 return NETDEV_TX_BUSY
;
6671 entry
= tnapi
->tx_prod
;
6673 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6674 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6676 mss
= skb_shinfo(skb
)->gso_size
;
6679 u32 tcp_opt_len
, hdr_len
;
6681 if (skb_header_cloned(skb
) &&
6682 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6686 tcp_opt_len
= tcp_optlen(skb
);
6688 if (skb_is_gso_v6(skb
)) {
6689 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
6693 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
6694 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
6697 iph
->tot_len
= htons(mss
+ hdr_len
);
6700 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6701 tg3_flag(tp
, TSO_BUG
))
6702 return tg3_tso_bug(tp
, skb
);
6704 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6705 TXD_FLAG_CPU_POST_DMA
);
6707 if (tg3_flag(tp
, HW_TSO_1
) ||
6708 tg3_flag(tp
, HW_TSO_2
) ||
6709 tg3_flag(tp
, HW_TSO_3
)) {
6710 tcp_hdr(skb
)->check
= 0;
6711 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6713 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6718 if (tg3_flag(tp
, HW_TSO_3
)) {
6719 mss
|= (hdr_len
& 0xc) << 12;
6721 base_flags
|= 0x00000010;
6722 base_flags
|= (hdr_len
& 0x3e0) << 5;
6723 } else if (tg3_flag(tp
, HW_TSO_2
))
6724 mss
|= hdr_len
<< 9;
6725 else if (tg3_flag(tp
, HW_TSO_1
) ||
6726 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6727 if (tcp_opt_len
|| iph
->ihl
> 5) {
6730 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6731 mss
|= (tsflags
<< 11);
6734 if (tcp_opt_len
|| iph
->ihl
> 5) {
6737 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6738 base_flags
|= tsflags
<< 12;
6743 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6744 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6745 base_flags
|= TXD_FLAG_JMB_PKT
;
6747 if (vlan_tx_tag_present(skb
)) {
6748 base_flags
|= TXD_FLAG_VLAN
;
6749 vlan
= vlan_tx_tag_get(skb
);
6752 len
= skb_headlen(skb
);
6754 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6755 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6759 tnapi
->tx_buffers
[entry
].skb
= skb
;
6760 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6762 would_hit_hwbug
= 0;
6764 if (tg3_flag(tp
, 5701_DMA_BUG
))
6765 would_hit_hwbug
= 1;
6767 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6768 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6770 would_hit_hwbug
= 1;
6771 /* Now loop through additional data fragments, and queue them. */
6772 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
6775 if (!tg3_flag(tp
, HW_TSO_1
) &&
6776 !tg3_flag(tp
, HW_TSO_2
) &&
6777 !tg3_flag(tp
, HW_TSO_3
))
6780 last
= skb_shinfo(skb
)->nr_frags
- 1;
6781 for (i
= 0; i
<= last
; i
++) {
6782 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6784 len
= skb_frag_size(frag
);
6785 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
6786 len
, DMA_TO_DEVICE
);
6788 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6789 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6791 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
6795 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
6797 ((i
== last
) ? TXD_FLAG_END
: 0),
6799 would_hit_hwbug
= 1;
6805 if (would_hit_hwbug
) {
6806 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6808 /* If the workaround fails due to memory/mapping
6809 * failure, silently drop this packet.
6811 entry
= tnapi
->tx_prod
;
6812 budget
= tg3_tx_avail(tnapi
);
6813 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
6814 base_flags
, mss
, vlan
))
6818 skb_tx_timestamp(skb
);
6820 /* Packets are ready, update Tx producer idx local and on card. */
6821 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6823 tnapi
->tx_prod
= entry
;
6824 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6825 netif_tx_stop_queue(txq
);
6827 /* netif_tx_stop_queue() must be done before checking
6828 * checking tx index in tg3_tx_avail() below, because in
6829 * tg3_tx(), we update tx index before checking for
6830 * netif_tx_queue_stopped().
6833 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6834 netif_tx_wake_queue(txq
);
6838 return NETDEV_TX_OK
;
6841 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
6842 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6847 return NETDEV_TX_OK
;
6850 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
6853 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
6854 MAC_MODE_PORT_MODE_MASK
);
6856 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6858 if (!tg3_flag(tp
, 5705_PLUS
))
6859 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
6861 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
6862 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6864 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6866 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6868 if (tg3_flag(tp
, 5705_PLUS
) ||
6869 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
6870 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
6871 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6874 tw32(MAC_MODE
, tp
->mac_mode
);
6878 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
6880 u32 val
, bmcr
, mac_mode
, ptest
= 0;
6882 tg3_phy_toggle_apd(tp
, false);
6883 tg3_phy_toggle_automdix(tp
, 0);
6885 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
6888 bmcr
= BMCR_FULLDPLX
;
6893 bmcr
|= BMCR_SPEED100
;
6897 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
6899 bmcr
|= BMCR_SPEED100
;
6902 bmcr
|= BMCR_SPEED1000
;
6907 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
6908 tg3_readphy(tp
, MII_CTRL1000
, &val
);
6909 val
|= CTL1000_AS_MASTER
|
6910 CTL1000_ENABLE_MASTER
;
6911 tg3_writephy(tp
, MII_CTRL1000
, val
);
6913 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
6914 MII_TG3_FET_PTEST_TRIM_2
;
6915 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
6918 bmcr
|= BMCR_LOOPBACK
;
6920 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6922 /* The write needs to be flushed for the FETs */
6923 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
6924 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6928 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
6929 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
6930 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
6931 MII_TG3_FET_PTEST_FRC_TX_LINK
|
6932 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
6934 /* The write needs to be flushed for the AC131 */
6935 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
6938 /* Reset to prevent losing 1st rx packet intermittently */
6939 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
6940 tg3_flag(tp
, 5780_CLASS
)) {
6941 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6943 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6946 mac_mode
= tp
->mac_mode
&
6947 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
6948 if (speed
== SPEED_1000
)
6949 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
6951 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
6953 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
6954 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
6956 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
6957 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
6958 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
6959 mac_mode
|= MAC_MODE_LINK_POLARITY
;
6961 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
6962 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
6965 tw32(MAC_MODE
, mac_mode
);
6971 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6973 struct tg3
*tp
= netdev_priv(dev
);
6975 if (features
& NETIF_F_LOOPBACK
) {
6976 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6979 spin_lock_bh(&tp
->lock
);
6980 tg3_mac_loopback(tp
, true);
6981 netif_carrier_on(tp
->dev
);
6982 spin_unlock_bh(&tp
->lock
);
6983 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6985 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6988 spin_lock_bh(&tp
->lock
);
6989 tg3_mac_loopback(tp
, false);
6990 /* Force link status check */
6991 tg3_setup_phy(tp
, 1);
6992 spin_unlock_bh(&tp
->lock
);
6993 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6997 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6999 struct tg3
*tp
= netdev_priv(dev
);
7001 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7002 features
&= ~NETIF_F_ALL_TSO
;
7007 static int tg3_set_features(struct net_device
*dev
, u32 features
)
7009 u32 changed
= dev
->features
^ features
;
7011 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7012 tg3_set_loopback(dev
, features
);
7017 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
7022 if (new_mtu
> ETH_DATA_LEN
) {
7023 if (tg3_flag(tp
, 5780_CLASS
)) {
7024 netdev_update_features(dev
);
7025 tg3_flag_clear(tp
, TSO_CAPABLE
);
7027 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
7030 if (tg3_flag(tp
, 5780_CLASS
)) {
7031 tg3_flag_set(tp
, TSO_CAPABLE
);
7032 netdev_update_features(dev
);
7034 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
7038 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
7040 struct tg3
*tp
= netdev_priv(dev
);
7043 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
7046 if (!netif_running(dev
)) {
7047 /* We'll just catch it later when the
7050 tg3_set_mtu(dev
, tp
, new_mtu
);
7058 tg3_full_lock(tp
, 1);
7060 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7062 tg3_set_mtu(dev
, tp
, new_mtu
);
7064 err
= tg3_restart_hw(tp
, 0);
7067 tg3_netif_start(tp
);
7069 tg3_full_unlock(tp
);
7077 static void tg3_rx_prodring_free(struct tg3
*tp
,
7078 struct tg3_rx_prodring_set
*tpr
)
7082 if (tpr
!= &tp
->napi
[0].prodring
) {
7083 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7084 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7085 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
7088 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7089 for (i
= tpr
->rx_jmb_cons_idx
;
7090 i
!= tpr
->rx_jmb_prod_idx
;
7091 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7092 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7100 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7101 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
7104 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7105 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7106 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7111 /* Initialize rx rings for packet processing.
7113 * The chip has been shut down and the driver detached from
7114 * the networking, so no interrupts or new tx packets will
7115 * end up in the driver. tp->{tx,}lock are held and thus
7118 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7119 struct tg3_rx_prodring_set
*tpr
)
7121 u32 i
, rx_pkt_dma_sz
;
7123 tpr
->rx_std_cons_idx
= 0;
7124 tpr
->rx_std_prod_idx
= 0;
7125 tpr
->rx_jmb_cons_idx
= 0;
7126 tpr
->rx_jmb_prod_idx
= 0;
7128 if (tpr
!= &tp
->napi
[0].prodring
) {
7129 memset(&tpr
->rx_std_buffers
[0], 0,
7130 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7131 if (tpr
->rx_jmb_buffers
)
7132 memset(&tpr
->rx_jmb_buffers
[0], 0,
7133 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7137 /* Zero out all descriptors. */
7138 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7140 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7141 if (tg3_flag(tp
, 5780_CLASS
) &&
7142 tp
->dev
->mtu
> ETH_DATA_LEN
)
7143 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7144 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7146 /* Initialize invariants of the rings, we only set this
7147 * stuff once. This works because the card does not
7148 * write into the rx buffer posting rings.
7150 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7151 struct tg3_rx_buffer_desc
*rxd
;
7153 rxd
= &tpr
->rx_std
[i
];
7154 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7155 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7156 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7157 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7160 /* Now allocate fresh SKBs for each rx ring. */
7161 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7162 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
7163 netdev_warn(tp
->dev
,
7164 "Using a smaller RX standard ring. Only "
7165 "%d out of %d buffers were allocated "
7166 "successfully\n", i
, tp
->rx_pending
);
7174 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7177 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7179 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7182 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7183 struct tg3_rx_buffer_desc
*rxd
;
7185 rxd
= &tpr
->rx_jmb
[i
].std
;
7186 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7187 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7189 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7190 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7193 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7194 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
7195 netdev_warn(tp
->dev
,
7196 "Using a smaller RX jumbo ring. Only %d "
7197 "out of %d buffers were allocated "
7198 "successfully\n", i
, tp
->rx_jumbo_pending
);
7201 tp
->rx_jumbo_pending
= i
;
7210 tg3_rx_prodring_free(tp
, tpr
);
7214 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7215 struct tg3_rx_prodring_set
*tpr
)
7217 kfree(tpr
->rx_std_buffers
);
7218 tpr
->rx_std_buffers
= NULL
;
7219 kfree(tpr
->rx_jmb_buffers
);
7220 tpr
->rx_jmb_buffers
= NULL
;
7222 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7223 tpr
->rx_std
, tpr
->rx_std_mapping
);
7227 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7228 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7233 static int tg3_rx_prodring_init(struct tg3
*tp
,
7234 struct tg3_rx_prodring_set
*tpr
)
7236 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7238 if (!tpr
->rx_std_buffers
)
7241 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7242 TG3_RX_STD_RING_BYTES(tp
),
7243 &tpr
->rx_std_mapping
,
7248 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7249 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7251 if (!tpr
->rx_jmb_buffers
)
7254 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7255 TG3_RX_JMB_RING_BYTES(tp
),
7256 &tpr
->rx_jmb_mapping
,
7265 tg3_rx_prodring_fini(tp
, tpr
);
7269 /* Free up pending packets in all rx/tx rings.
7271 * The chip has been shut down and the driver detached from
7272 * the networking, so no interrupts or new tx packets will
7273 * end up in the driver. tp->{tx,}lock is not held and we are not
7274 * in an interrupt context and thus may sleep.
7276 static void tg3_free_rings(struct tg3
*tp
)
7280 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7281 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7283 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7285 if (!tnapi
->tx_buffers
)
7288 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7289 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7294 tg3_tx_skb_unmap(tnapi
, i
,
7295 skb_shinfo(skb
)->nr_frags
- 1);
7297 dev_kfree_skb_any(skb
);
7302 /* Initialize tx/rx rings for packet processing.
7304 * The chip has been shut down and the driver detached from
7305 * the networking, so no interrupts or new tx packets will
7306 * end up in the driver. tp->{tx,}lock are held and thus
7309 static int tg3_init_rings(struct tg3
*tp
)
7313 /* Free up all the SKBs. */
7316 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7317 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7319 tnapi
->last_tag
= 0;
7320 tnapi
->last_irq_tag
= 0;
7321 tnapi
->hw_status
->status
= 0;
7322 tnapi
->hw_status
->status_tag
= 0;
7323 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7328 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7330 tnapi
->rx_rcb_ptr
= 0;
7332 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7334 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7344 * Must not be invoked with interrupt sources disabled and
7345 * the hardware shutdown down.
7347 static void tg3_free_consistent(struct tg3
*tp
)
7351 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7352 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7354 if (tnapi
->tx_ring
) {
7355 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7356 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7357 tnapi
->tx_ring
= NULL
;
7360 kfree(tnapi
->tx_buffers
);
7361 tnapi
->tx_buffers
= NULL
;
7363 if (tnapi
->rx_rcb
) {
7364 dma_free_coherent(&tp
->pdev
->dev
,
7365 TG3_RX_RCB_RING_BYTES(tp
),
7367 tnapi
->rx_rcb_mapping
);
7368 tnapi
->rx_rcb
= NULL
;
7371 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7373 if (tnapi
->hw_status
) {
7374 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7376 tnapi
->status_mapping
);
7377 tnapi
->hw_status
= NULL
;
7382 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7383 tp
->hw_stats
, tp
->stats_mapping
);
7384 tp
->hw_stats
= NULL
;
7389 * Must not be invoked with interrupt sources disabled and
7390 * the hardware shutdown down. Can sleep.
7392 static int tg3_alloc_consistent(struct tg3
*tp
)
7396 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7397 sizeof(struct tg3_hw_stats
),
7403 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7405 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7406 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7407 struct tg3_hw_status
*sblk
;
7409 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7411 &tnapi
->status_mapping
,
7413 if (!tnapi
->hw_status
)
7416 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7417 sblk
= tnapi
->hw_status
;
7419 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7422 /* If multivector TSS is enabled, vector 0 does not handle
7423 * tx interrupts. Don't allocate any resources for it.
7425 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7426 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7427 tnapi
->tx_buffers
= kzalloc(
7428 sizeof(struct tg3_tx_ring_info
) *
7429 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7430 if (!tnapi
->tx_buffers
)
7433 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7435 &tnapi
->tx_desc_mapping
,
7437 if (!tnapi
->tx_ring
)
7442 * When RSS is enabled, the status block format changes
7443 * slightly. The "rx_jumbo_consumer", "reserved",
7444 * and "rx_mini_consumer" members get mapped to the
7445 * other three rx return ring producer indexes.
7449 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7452 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7455 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7458 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7463 * If multivector RSS is enabled, vector 0 does not handle
7464 * rx or tx interrupts. Don't allocate any resources for it.
7466 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7469 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7470 TG3_RX_RCB_RING_BYTES(tp
),
7471 &tnapi
->rx_rcb_mapping
,
7476 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7482 tg3_free_consistent(tp
);
7486 #define MAX_WAIT_CNT 1000
7488 /* To stop a block, clear the enable bit and poll till it
7489 * clears. tp->lock is held.
7491 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7496 if (tg3_flag(tp
, 5705_PLUS
)) {
7503 /* We can't enable/disable these bits of the
7504 * 5705/5750, just say success.
7517 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7520 if ((val
& enable_bit
) == 0)
7524 if (i
== MAX_WAIT_CNT
&& !silent
) {
7525 dev_err(&tp
->pdev
->dev
,
7526 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7534 /* tp->lock is held. */
7535 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7539 tg3_disable_ints(tp
);
7541 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7542 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7545 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7546 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7547 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7548 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7549 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7550 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7552 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7553 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7554 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7555 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7556 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7557 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7558 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7560 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7561 tw32_f(MAC_MODE
, tp
->mac_mode
);
7564 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7565 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7567 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7569 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7572 if (i
>= MAX_WAIT_CNT
) {
7573 dev_err(&tp
->pdev
->dev
,
7574 "%s timed out, TX_MODE_ENABLE will not clear "
7575 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7579 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7580 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7581 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7583 tw32(FTQ_RESET
, 0xffffffff);
7584 tw32(FTQ_RESET
, 0x00000000);
7586 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7587 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7589 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7590 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7591 if (tnapi
->hw_status
)
7592 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7595 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7600 /* Save PCI command register before chip reset */
7601 static void tg3_save_pci_state(struct tg3
*tp
)
7603 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7606 /* Restore PCI state after chip reset */
7607 static void tg3_restore_pci_state(struct tg3
*tp
)
7611 /* Re-enable indirect register accesses. */
7612 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7613 tp
->misc_host_ctrl
);
7615 /* Set MAX PCI retry to zero. */
7616 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7617 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7618 tg3_flag(tp
, PCIX_MODE
))
7619 val
|= PCISTATE_RETRY_SAME_DMA
;
7620 /* Allow reads and writes to the APE register and memory space. */
7621 if (tg3_flag(tp
, ENABLE_APE
))
7622 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7623 PCISTATE_ALLOW_APE_SHMEM_WR
|
7624 PCISTATE_ALLOW_APE_PSPACE_WR
;
7625 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7627 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7629 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7630 if (tg3_flag(tp
, PCI_EXPRESS
))
7631 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7633 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7634 tp
->pci_cacheline_sz
);
7635 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7640 /* Make sure PCI-X relaxed ordering bit is clear. */
7641 if (tg3_flag(tp
, PCIX_MODE
)) {
7644 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7646 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7647 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7651 if (tg3_flag(tp
, 5780_CLASS
)) {
7653 /* Chip reset on 5780 will reset MSI enable bit,
7654 * so need to restore it.
7656 if (tg3_flag(tp
, USING_MSI
)) {
7659 pci_read_config_word(tp
->pdev
,
7660 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7662 pci_write_config_word(tp
->pdev
,
7663 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7664 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7665 val
= tr32(MSGINT_MODE
);
7666 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7671 /* tp->lock is held. */
7672 static int tg3_chip_reset(struct tg3
*tp
)
7675 void (*write_op
)(struct tg3
*, u32
, u32
);
7680 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7682 /* No matching tg3_nvram_unlock() after this because
7683 * chip reset below will undo the nvram lock.
7685 tp
->nvram_lock_cnt
= 0;
7687 /* GRC_MISC_CFG core clock reset will clear the memory
7688 * enable bit in PCI register 4 and the MSI enable bit
7689 * on some chips, so we save relevant registers here.
7691 tg3_save_pci_state(tp
);
7693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7694 tg3_flag(tp
, 5755_PLUS
))
7695 tw32(GRC_FASTBOOT_PC
, 0);
7698 * We must avoid the readl() that normally takes place.
7699 * It locks machines, causes machine checks, and other
7700 * fun things. So, temporarily disable the 5701
7701 * hardware workaround, while we do the reset.
7703 write_op
= tp
->write32
;
7704 if (write_op
== tg3_write_flush_reg32
)
7705 tp
->write32
= tg3_write32
;
7707 /* Prevent the irq handler from reading or writing PCI registers
7708 * during chip reset when the memory enable bit in the PCI command
7709 * register may be cleared. The chip does not generate interrupt
7710 * at this time, but the irq handler may still be called due to irq
7711 * sharing or irqpoll.
7713 tg3_flag_set(tp
, CHIP_RESETTING
);
7714 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7715 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7716 if (tnapi
->hw_status
) {
7717 tnapi
->hw_status
->status
= 0;
7718 tnapi
->hw_status
->status_tag
= 0;
7720 tnapi
->last_tag
= 0;
7721 tnapi
->last_irq_tag
= 0;
7725 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7726 synchronize_irq(tp
->napi
[i
].irq_vec
);
7728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7729 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7730 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7734 val
= GRC_MISC_CFG_CORECLK_RESET
;
7736 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7737 /* Force PCIe 1.0a mode */
7738 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7739 !tg3_flag(tp
, 57765_PLUS
) &&
7740 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7741 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7742 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7744 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7745 tw32(GRC_MISC_CFG
, (1 << 29));
7750 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7751 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7752 tw32(GRC_VCPU_EXT_CTRL
,
7753 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7756 /* Manage gphy power for all CPMU absent PCIe devices. */
7757 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7758 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7760 tw32(GRC_MISC_CFG
, val
);
7762 /* restore 5701 hardware bug workaround write method */
7763 tp
->write32
= write_op
;
7765 /* Unfortunately, we have to delay before the PCI read back.
7766 * Some 575X chips even will not respond to a PCI cfg access
7767 * when the reset command is given to the chip.
7769 * How do these hardware designers expect things to work
7770 * properly if the PCI write is posted for a long period
7771 * of time? It is always necessary to have some method by
7772 * which a register read back can occur to push the write
7773 * out which does the reset.
7775 * For most tg3 variants the trick below was working.
7780 /* Flush PCI posted writes. The normal MMIO registers
7781 * are inaccessible at this time so this is the only
7782 * way to make this reliably (actually, this is no longer
7783 * the case, see above). I tried to use indirect
7784 * register read/write but this upset some 5701 variants.
7786 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7790 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7793 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7797 /* Wait for link training to complete. */
7798 for (i
= 0; i
< 5000; i
++)
7801 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7802 pci_write_config_dword(tp
->pdev
, 0xc4,
7803 cfg_val
| (1 << 15));
7806 /* Clear the "no snoop" and "relaxed ordering" bits. */
7807 pci_read_config_word(tp
->pdev
,
7808 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7810 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7811 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7813 * Older PCIe devices only support the 128 byte
7814 * MPS setting. Enforce the restriction.
7816 if (!tg3_flag(tp
, CPMU_PRESENT
))
7817 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7818 pci_write_config_word(tp
->pdev
,
7819 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7822 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7824 /* Clear error status */
7825 pci_write_config_word(tp
->pdev
,
7826 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7827 PCI_EXP_DEVSTA_CED
|
7828 PCI_EXP_DEVSTA_NFED
|
7829 PCI_EXP_DEVSTA_FED
|
7830 PCI_EXP_DEVSTA_URD
);
7833 tg3_restore_pci_state(tp
);
7835 tg3_flag_clear(tp
, CHIP_RESETTING
);
7836 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7839 if (tg3_flag(tp
, 5780_CLASS
))
7840 val
= tr32(MEMARB_MODE
);
7841 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7843 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7845 tw32(0x5000, 0x400);
7848 tw32(GRC_MODE
, tp
->grc_mode
);
7850 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7853 tw32(0xc4, val
| (1 << 15));
7856 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7857 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7858 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7859 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7860 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7861 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7864 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7865 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7867 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7868 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7873 tw32_f(MAC_MODE
, val
);
7876 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7878 err
= tg3_poll_fw(tp
);
7884 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7885 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7886 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7887 !tg3_flag(tp
, 57765_PLUS
)) {
7890 tw32(0x7c00, val
| (1 << 25));
7893 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7894 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7895 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7898 /* Reprobe ASF enable state. */
7899 tg3_flag_clear(tp
, ENABLE_ASF
);
7900 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7901 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7902 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7905 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7906 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7907 tg3_flag_set(tp
, ENABLE_ASF
);
7908 tp
->last_event_jiffies
= jiffies
;
7909 if (tg3_flag(tp
, 5750_PLUS
))
7910 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7917 /* tp->lock is held. */
7918 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7924 tg3_write_sig_pre_reset(tp
, kind
);
7926 tg3_abort_hw(tp
, silent
);
7927 err
= tg3_chip_reset(tp
);
7929 __tg3_set_mac_addr(tp
, 0);
7931 tg3_write_sig_legacy(tp
, kind
);
7932 tg3_write_sig_post_reset(tp
, kind
);
7940 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7942 struct tg3
*tp
= netdev_priv(dev
);
7943 struct sockaddr
*addr
= p
;
7944 int err
= 0, skip_mac_1
= 0;
7946 if (!is_valid_ether_addr(addr
->sa_data
))
7949 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7951 if (!netif_running(dev
))
7954 if (tg3_flag(tp
, ENABLE_ASF
)) {
7955 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7957 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7958 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7959 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7960 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7962 /* Skip MAC addr 1 if ASF is using it. */
7963 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7964 !(addr1_high
== 0 && addr1_low
== 0))
7967 spin_lock_bh(&tp
->lock
);
7968 __tg3_set_mac_addr(tp
, skip_mac_1
);
7969 spin_unlock_bh(&tp
->lock
);
7974 /* tp->lock is held. */
7975 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7976 dma_addr_t mapping
, u32 maxlen_flags
,
7980 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7981 ((u64
) mapping
>> 32));
7983 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7984 ((u64
) mapping
& 0xffffffff));
7986 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7989 if (!tg3_flag(tp
, 5705_PLUS
))
7991 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7995 static void __tg3_set_rx_mode(struct net_device
*);
7996 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8000 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8001 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8002 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8003 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8005 tw32(HOSTCC_TXCOL_TICKS
, 0);
8006 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8007 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8010 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8011 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8012 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8013 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8015 tw32(HOSTCC_RXCOL_TICKS
, 0);
8016 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8017 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8020 if (!tg3_flag(tp
, 5705_PLUS
)) {
8021 u32 val
= ec
->stats_block_coalesce_usecs
;
8023 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8024 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8026 if (!netif_carrier_ok(tp
->dev
))
8029 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8032 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8035 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8036 tw32(reg
, ec
->rx_coalesce_usecs
);
8037 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8038 tw32(reg
, ec
->rx_max_coalesced_frames
);
8039 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8040 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8042 if (tg3_flag(tp
, ENABLE_TSS
)) {
8043 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8044 tw32(reg
, ec
->tx_coalesce_usecs
);
8045 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8046 tw32(reg
, ec
->tx_max_coalesced_frames
);
8047 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8048 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8052 for (; i
< tp
->irq_max
- 1; i
++) {
8053 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8054 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8055 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8057 if (tg3_flag(tp
, ENABLE_TSS
)) {
8058 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8059 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8060 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8065 /* tp->lock is held. */
8066 static void tg3_rings_reset(struct tg3
*tp
)
8069 u32 stblk
, txrcb
, rxrcb
, limit
;
8070 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8072 /* Disable all transmit rings but the first. */
8073 if (!tg3_flag(tp
, 5705_PLUS
))
8074 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8075 else if (tg3_flag(tp
, 5717_PLUS
))
8076 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8077 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8078 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8080 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8082 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8083 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8084 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8085 BDINFO_FLAGS_DISABLED
);
8088 /* Disable all receive return rings but the first. */
8089 if (tg3_flag(tp
, 5717_PLUS
))
8090 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8091 else if (!tg3_flag(tp
, 5705_PLUS
))
8092 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8093 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8094 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8095 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8097 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8099 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8100 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8101 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8102 BDINFO_FLAGS_DISABLED
);
8104 /* Disable interrupts */
8105 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8106 tp
->napi
[0].chk_msi_cnt
= 0;
8107 tp
->napi
[0].last_rx_cons
= 0;
8108 tp
->napi
[0].last_tx_cons
= 0;
8110 /* Zero mailbox registers. */
8111 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8112 for (i
= 1; i
< tp
->irq_max
; i
++) {
8113 tp
->napi
[i
].tx_prod
= 0;
8114 tp
->napi
[i
].tx_cons
= 0;
8115 if (tg3_flag(tp
, ENABLE_TSS
))
8116 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8117 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8118 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8119 tp
->napi
[i
].chk_msi_cnt
= 0;
8120 tp
->napi
[i
].last_rx_cons
= 0;
8121 tp
->napi
[i
].last_tx_cons
= 0;
8123 if (!tg3_flag(tp
, ENABLE_TSS
))
8124 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8126 tp
->napi
[0].tx_prod
= 0;
8127 tp
->napi
[0].tx_cons
= 0;
8128 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8129 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8132 /* Make sure the NIC-based send BD rings are disabled. */
8133 if (!tg3_flag(tp
, 5705_PLUS
)) {
8134 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8135 for (i
= 0; i
< 16; i
++)
8136 tw32_tx_mbox(mbox
+ i
* 8, 0);
8139 txrcb
= NIC_SRAM_SEND_RCB
;
8140 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8142 /* Clear status block in ram. */
8143 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8145 /* Set status block DMA address */
8146 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8147 ((u64
) tnapi
->status_mapping
>> 32));
8148 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8149 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8151 if (tnapi
->tx_ring
) {
8152 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8153 (TG3_TX_RING_SIZE
<<
8154 BDINFO_FLAGS_MAXLEN_SHIFT
),
8155 NIC_SRAM_TX_BUFFER_DESC
);
8156 txrcb
+= TG3_BDINFO_SIZE
;
8159 if (tnapi
->rx_rcb
) {
8160 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8161 (tp
->rx_ret_ring_mask
+ 1) <<
8162 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8163 rxrcb
+= TG3_BDINFO_SIZE
;
8166 stblk
= HOSTCC_STATBLCK_RING1
;
8168 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8169 u64 mapping
= (u64
)tnapi
->status_mapping
;
8170 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8171 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8173 /* Clear status block in ram. */
8174 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8176 if (tnapi
->tx_ring
) {
8177 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8178 (TG3_TX_RING_SIZE
<<
8179 BDINFO_FLAGS_MAXLEN_SHIFT
),
8180 NIC_SRAM_TX_BUFFER_DESC
);
8181 txrcb
+= TG3_BDINFO_SIZE
;
8184 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8185 ((tp
->rx_ret_ring_mask
+ 1) <<
8186 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8189 rxrcb
+= TG3_BDINFO_SIZE
;
8193 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8195 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8197 if (!tg3_flag(tp
, 5750_PLUS
) ||
8198 tg3_flag(tp
, 5780_CLASS
) ||
8199 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8200 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8201 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8202 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8203 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8204 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8206 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8208 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8209 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8211 val
= min(nic_rep_thresh
, host_rep_thresh
);
8212 tw32(RCVBDI_STD_THRESH
, val
);
8214 if (tg3_flag(tp
, 57765_PLUS
))
8215 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8217 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8220 if (!tg3_flag(tp
, 5705_PLUS
))
8221 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8223 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
8225 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8227 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8228 tw32(RCVBDI_JUMBO_THRESH
, val
);
8230 if (tg3_flag(tp
, 57765_PLUS
))
8231 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8234 /* tp->lock is held. */
8235 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8237 u32 val
, rdmac_mode
;
8239 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8241 tg3_disable_ints(tp
);
8245 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8247 if (tg3_flag(tp
, INIT_COMPLETE
))
8248 tg3_abort_hw(tp
, 1);
8250 /* Enable MAC control of LPI */
8251 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8252 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8253 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8254 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8256 tw32_f(TG3_CPMU_EEE_CTRL
,
8257 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8259 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8260 TG3_CPMU_EEEMD_LPI_IN_TX
|
8261 TG3_CPMU_EEEMD_LPI_IN_RX
|
8262 TG3_CPMU_EEEMD_EEE_ENABLE
;
8264 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8265 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8267 if (tg3_flag(tp
, ENABLE_APE
))
8268 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8270 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8272 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8273 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8274 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8276 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8277 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8278 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8284 err
= tg3_chip_reset(tp
);
8288 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8290 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8291 val
= tr32(TG3_CPMU_CTRL
);
8292 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8293 tw32(TG3_CPMU_CTRL
, val
);
8295 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8296 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8297 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8298 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8300 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8301 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8302 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8303 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8305 val
= tr32(TG3_CPMU_HST_ACC
);
8306 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8307 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8308 tw32(TG3_CPMU_HST_ACC
, val
);
8311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8312 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8313 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8314 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8315 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8317 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8318 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8320 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8322 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8323 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8326 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8327 u32 grc_mode
= tr32(GRC_MODE
);
8329 /* Access the lower 1K of PL PCIE block registers. */
8330 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8331 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8333 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8334 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8335 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8337 tw32(GRC_MODE
, grc_mode
);
8340 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
8341 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8342 u32 grc_mode
= tr32(GRC_MODE
);
8344 /* Access the lower 1K of PL PCIE block registers. */
8345 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8346 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8348 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8349 TG3_PCIE_PL_LO_PHYCTL5
);
8350 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8351 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8353 tw32(GRC_MODE
, grc_mode
);
8356 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8357 u32 grc_mode
= tr32(GRC_MODE
);
8359 /* Access the lower 1K of DL PCIE block registers. */
8360 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8361 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8363 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8364 TG3_PCIE_DL_LO_FTSMAX
);
8365 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8366 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8367 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8369 tw32(GRC_MODE
, grc_mode
);
8372 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8373 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8374 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8375 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8378 /* This works around an issue with Athlon chipsets on
8379 * B3 tigon3 silicon. This bit has no effect on any
8380 * other revision. But do not set this on PCI Express
8381 * chips and don't even touch the clocks if the CPMU is present.
8383 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8384 if (!tg3_flag(tp
, PCI_EXPRESS
))
8385 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8386 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8389 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8390 tg3_flag(tp
, PCIX_MODE
)) {
8391 val
= tr32(TG3PCI_PCISTATE
);
8392 val
|= PCISTATE_RETRY_SAME_DMA
;
8393 tw32(TG3PCI_PCISTATE
, val
);
8396 if (tg3_flag(tp
, ENABLE_APE
)) {
8397 /* Allow reads and writes to the
8398 * APE register and memory space.
8400 val
= tr32(TG3PCI_PCISTATE
);
8401 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8402 PCISTATE_ALLOW_APE_SHMEM_WR
|
8403 PCISTATE_ALLOW_APE_PSPACE_WR
;
8404 tw32(TG3PCI_PCISTATE
, val
);
8407 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8408 /* Enable some hw fixes. */
8409 val
= tr32(TG3PCI_MSI_DATA
);
8410 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8411 tw32(TG3PCI_MSI_DATA
, val
);
8414 /* Descriptor ring init may make accesses to the
8415 * NIC SRAM area to setup the TX descriptors, so we
8416 * can only do this after the hardware has been
8417 * successfully reset.
8419 err
= tg3_init_rings(tp
);
8423 if (tg3_flag(tp
, 57765_PLUS
)) {
8424 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8425 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8426 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8427 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8428 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8429 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8430 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8431 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8432 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8433 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8434 /* This value is determined during the probe time DMA
8435 * engine test, tg3_test_dma.
8437 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8440 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8441 GRC_MODE_4X_NIC_SEND_RINGS
|
8442 GRC_MODE_NO_TX_PHDR_CSUM
|
8443 GRC_MODE_NO_RX_PHDR_CSUM
);
8444 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8446 /* Pseudo-header checksum is done by hardware logic and not
8447 * the offload processers, so make the chip do the pseudo-
8448 * header checksums on receive. For transmit it is more
8449 * convenient to do the pseudo-header checksum in software
8450 * as Linux does that on transmit for us in all cases.
8452 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8456 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8458 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8459 val
= tr32(GRC_MISC_CFG
);
8461 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8462 tw32(GRC_MISC_CFG
, val
);
8464 /* Initialize MBUF/DESC pool. */
8465 if (tg3_flag(tp
, 5750_PLUS
)) {
8467 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8468 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8469 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8470 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8472 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8473 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8474 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8475 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8478 fw_len
= tp
->fw_len
;
8479 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8480 tw32(BUFMGR_MB_POOL_ADDR
,
8481 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8482 tw32(BUFMGR_MB_POOL_SIZE
,
8483 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8486 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8487 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8488 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8489 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8490 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8491 tw32(BUFMGR_MB_HIGH_WATER
,
8492 tp
->bufmgr_config
.mbuf_high_water
);
8494 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8495 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8496 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8497 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8498 tw32(BUFMGR_MB_HIGH_WATER
,
8499 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8501 tw32(BUFMGR_DMA_LOW_WATER
,
8502 tp
->bufmgr_config
.dma_low_water
);
8503 tw32(BUFMGR_DMA_HIGH_WATER
,
8504 tp
->bufmgr_config
.dma_high_water
);
8506 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8507 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8508 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8509 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8510 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8511 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8512 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8513 tw32(BUFMGR_MODE
, val
);
8514 for (i
= 0; i
< 2000; i
++) {
8515 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8520 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8524 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8525 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8527 tg3_setup_rxbd_thresholds(tp
);
8529 /* Initialize TG3_BDINFO's at:
8530 * RCVDBDI_STD_BD: standard eth size rx ring
8531 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8532 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8535 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8536 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8537 * ring attribute flags
8538 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8540 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8541 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8543 * The size of each ring is fixed in the firmware, but the location is
8546 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8547 ((u64
) tpr
->rx_std_mapping
>> 32));
8548 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8549 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8550 if (!tg3_flag(tp
, 5717_PLUS
))
8551 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8552 NIC_SRAM_RX_BUFFER_DESC
);
8554 /* Disable the mini ring */
8555 if (!tg3_flag(tp
, 5705_PLUS
))
8556 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8557 BDINFO_FLAGS_DISABLED
);
8559 /* Program the jumbo buffer descriptor ring control
8560 * blocks on those devices that have them.
8562 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8563 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8565 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8566 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8567 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8568 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8569 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8570 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8571 BDINFO_FLAGS_MAXLEN_SHIFT
;
8572 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8573 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8574 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8575 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8576 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8577 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8579 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8580 BDINFO_FLAGS_DISABLED
);
8583 if (tg3_flag(tp
, 57765_PLUS
)) {
8584 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8585 val
= TG3_RX_STD_MAX_SIZE_5700
;
8587 val
= TG3_RX_STD_MAX_SIZE_5717
;
8588 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8589 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8591 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8593 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8595 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8597 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8598 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8600 tpr
->rx_jmb_prod_idx
=
8601 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8602 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8604 tg3_rings_reset(tp
);
8606 /* Initialize MAC address and backoff seed. */
8607 __tg3_set_mac_addr(tp
, 0);
8609 /* MTU + ethernet header + FCS + optional VLAN tag */
8610 tw32(MAC_RX_MTU_SIZE
,
8611 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8613 /* The slot time is changed by tg3_setup_phy if we
8614 * run at gigabit with half duplex.
8616 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8617 (6 << TX_LENGTHS_IPG_SHIFT
) |
8618 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8620 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8621 val
|= tr32(MAC_TX_LENGTHS
) &
8622 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8623 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8625 tw32(MAC_TX_LENGTHS
, val
);
8627 /* Receive rules. */
8628 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8629 tw32(RCVLPC_CONFIG
, 0x0181);
8631 /* Calculate RDMAC_MODE setting early, we need it to determine
8632 * the RCVLPC_STATE_ENABLE mask.
8634 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8635 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8636 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8637 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8638 RDMAC_MODE_LNGREAD_ENAB
);
8640 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8641 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8643 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8644 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8645 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8646 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8647 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8648 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8650 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8651 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8652 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8653 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8654 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8655 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8656 !tg3_flag(tp
, IS_5788
)) {
8657 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8661 if (tg3_flag(tp
, PCI_EXPRESS
))
8662 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8664 if (tg3_flag(tp
, HW_TSO_1
) ||
8665 tg3_flag(tp
, HW_TSO_2
) ||
8666 tg3_flag(tp
, HW_TSO_3
))
8667 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8669 if (tg3_flag(tp
, 57765_PLUS
) ||
8670 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8671 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8672 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8675 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8677 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8678 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8679 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8680 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8681 tg3_flag(tp
, 57765_PLUS
)) {
8682 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8683 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8684 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8685 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8686 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8687 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8688 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8689 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8690 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8692 tw32(TG3_RDMA_RSRVCTRL_REG
,
8693 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8696 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8697 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8698 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8699 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8700 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8704 /* Receive/send statistics. */
8705 if (tg3_flag(tp
, 5750_PLUS
)) {
8706 val
= tr32(RCVLPC_STATS_ENABLE
);
8707 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8708 tw32(RCVLPC_STATS_ENABLE
, val
);
8709 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8710 tg3_flag(tp
, TSO_CAPABLE
)) {
8711 val
= tr32(RCVLPC_STATS_ENABLE
);
8712 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8713 tw32(RCVLPC_STATS_ENABLE
, val
);
8715 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8717 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8718 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8719 tw32(SNDDATAI_STATSCTRL
,
8720 (SNDDATAI_SCTRL_ENABLE
|
8721 SNDDATAI_SCTRL_FASTUPD
));
8723 /* Setup host coalescing engine. */
8724 tw32(HOSTCC_MODE
, 0);
8725 for (i
= 0; i
< 2000; i
++) {
8726 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8731 __tg3_set_coalesce(tp
, &tp
->coal
);
8733 if (!tg3_flag(tp
, 5705_PLUS
)) {
8734 /* Status/statistics block address. See tg3_timer,
8735 * the tg3_periodic_fetch_stats call there, and
8736 * tg3_get_stats to see how this works for 5705/5750 chips.
8738 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8739 ((u64
) tp
->stats_mapping
>> 32));
8740 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8741 ((u64
) tp
->stats_mapping
& 0xffffffff));
8742 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8744 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8746 /* Clear statistics and status block memory areas */
8747 for (i
= NIC_SRAM_STATS_BLK
;
8748 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8750 tg3_write_mem(tp
, i
, 0);
8755 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8757 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8758 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8759 if (!tg3_flag(tp
, 5705_PLUS
))
8760 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8762 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8763 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8764 /* reset to prevent losing 1st rx packet intermittently */
8765 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8769 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8770 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8771 MAC_MODE_FHDE_ENABLE
;
8772 if (tg3_flag(tp
, ENABLE_APE
))
8773 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8774 if (!tg3_flag(tp
, 5705_PLUS
) &&
8775 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8776 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8777 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8778 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8781 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8782 * If TG3_FLAG_IS_NIC is zero, we should read the
8783 * register to preserve the GPIO settings for LOMs. The GPIOs,
8784 * whether used as inputs or outputs, are set by boot code after
8787 if (!tg3_flag(tp
, IS_NIC
)) {
8790 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8791 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8792 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8795 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8796 GRC_LCLCTRL_GPIO_OUTPUT3
;
8798 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8799 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8801 tp
->grc_local_ctrl
&= ~gpio_mask
;
8802 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8804 /* GPIO1 must be driven high for eeprom write protect */
8805 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8806 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8807 GRC_LCLCTRL_GPIO_OUTPUT1
);
8809 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8812 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8813 val
= tr32(MSGINT_MODE
);
8814 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8815 if (!tg3_flag(tp
, 1SHOT_MSI
))
8816 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
8817 tw32(MSGINT_MODE
, val
);
8820 if (!tg3_flag(tp
, 5705_PLUS
)) {
8821 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8825 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8826 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8827 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8828 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8829 WDMAC_MODE_LNGREAD_ENAB
);
8831 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8832 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8833 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8834 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8835 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8837 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8838 !tg3_flag(tp
, IS_5788
)) {
8839 val
|= WDMAC_MODE_RX_ACCEL
;
8843 /* Enable host coalescing bug fix */
8844 if (tg3_flag(tp
, 5755_PLUS
))
8845 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8847 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8848 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8850 tw32_f(WDMAC_MODE
, val
);
8853 if (tg3_flag(tp
, PCIX_MODE
)) {
8856 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8858 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8859 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8860 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8861 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8862 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8863 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8865 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8869 tw32_f(RDMAC_MODE
, rdmac_mode
);
8872 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8873 if (!tg3_flag(tp
, 5705_PLUS
))
8874 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8876 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8878 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8880 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8882 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8883 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8884 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8885 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8886 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8887 tw32(RCVDBDI_MODE
, val
);
8888 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8889 if (tg3_flag(tp
, HW_TSO_1
) ||
8890 tg3_flag(tp
, HW_TSO_2
) ||
8891 tg3_flag(tp
, HW_TSO_3
))
8892 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8893 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8894 if (tg3_flag(tp
, ENABLE_TSS
))
8895 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8896 tw32(SNDBDI_MODE
, val
);
8897 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8899 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8900 err
= tg3_load_5701_a0_firmware_fix(tp
);
8905 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8906 err
= tg3_load_tso_firmware(tp
);
8911 tp
->tx_mode
= TX_MODE_ENABLE
;
8913 if (tg3_flag(tp
, 5755_PLUS
) ||
8914 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8915 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8917 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8918 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8919 tp
->tx_mode
&= ~val
;
8920 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8923 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8926 if (tg3_flag(tp
, ENABLE_RSS
)) {
8928 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8930 if (tp
->irq_cnt
== 2) {
8931 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
+= 8) {
8938 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8939 val
= i
% (tp
->irq_cnt
- 1);
8941 for (; i
% 8; i
++) {
8943 val
|= (i
% (tp
->irq_cnt
- 1));
8950 /* Setup the "secret" hash key. */
8951 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8952 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8953 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8954 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8955 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8956 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8957 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8958 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8959 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8960 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8963 tp
->rx_mode
= RX_MODE_ENABLE
;
8964 if (tg3_flag(tp
, 5755_PLUS
))
8965 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8967 if (tg3_flag(tp
, ENABLE_RSS
))
8968 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8969 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8970 RX_MODE_RSS_IPV6_HASH_EN
|
8971 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8972 RX_MODE_RSS_IPV4_HASH_EN
|
8973 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8975 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8978 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8980 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8981 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8982 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8985 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8988 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8989 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8990 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8991 /* Set drive transmission level to 1.2V */
8992 /* only if the signal pre-emphasis bit is not set */
8993 val
= tr32(MAC_SERDES_CFG
);
8996 tw32(MAC_SERDES_CFG
, val
);
8998 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8999 tw32(MAC_SERDES_CFG
, 0x616000);
9002 /* Prevent chip from dropping frames when flow control
9005 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9009 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9011 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9012 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9013 /* Use hardware link auto-negotiation */
9014 tg3_flag_set(tp
, HW_AUTONEG
);
9017 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9018 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9021 tmp
= tr32(SERDES_RX_CTRL
);
9022 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9023 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9024 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9025 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9028 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9029 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
9030 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9031 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
9032 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
9033 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
9036 err
= tg3_setup_phy(tp
, 0);
9040 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9041 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9044 /* Clear CRC stats. */
9045 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9046 tg3_writephy(tp
, MII_TG3_TEST1
,
9047 tmp
| MII_TG3_TEST1_CRC_EN
);
9048 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9053 __tg3_set_rx_mode(tp
->dev
);
9055 /* Initialize receive rules. */
9056 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9057 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9058 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9059 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9061 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9065 if (tg3_flag(tp
, ENABLE_ASF
))
9069 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9071 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9073 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9075 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9077 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9079 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9081 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9083 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9085 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9087 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9089 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9091 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9093 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9095 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9103 if (tg3_flag(tp
, ENABLE_APE
))
9104 /* Write our heartbeat update interval to APE. */
9105 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9106 APE_HOST_HEARTBEAT_INT_DISABLE
);
9108 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9113 /* Called at device open time to get the chip ready for
9114 * packet processing. Invoked with tp->lock held.
9116 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9118 tg3_switch_clocks(tp
);
9120 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9122 return tg3_reset_hw(tp
, reset_phy
);
9125 #define TG3_STAT_ADD32(PSTAT, REG) \
9126 do { u32 __val = tr32(REG); \
9127 (PSTAT)->low += __val; \
9128 if ((PSTAT)->low < __val) \
9129 (PSTAT)->high += 1; \
9132 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9134 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9136 if (!netif_carrier_ok(tp
->dev
))
9139 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9140 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9141 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9142 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9143 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9144 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9145 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9146 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9147 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9148 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9149 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9150 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9151 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9153 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9154 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9155 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9156 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9157 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9158 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9159 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9160 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9161 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9162 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9163 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9164 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9165 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9166 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9168 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9169 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9170 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9171 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9172 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9174 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9175 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9177 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9178 sp
->rx_discards
.low
+= val
;
9179 if (sp
->rx_discards
.low
< val
)
9180 sp
->rx_discards
.high
+= 1;
9182 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9184 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9187 static void tg3_chk_missed_msi(struct tg3
*tp
)
9191 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9192 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9194 if (tg3_has_work(tnapi
)) {
9195 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9196 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9197 if (tnapi
->chk_msi_cnt
< 1) {
9198 tnapi
->chk_msi_cnt
++;
9204 tnapi
->chk_msi_cnt
= 0;
9205 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9206 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9210 static void tg3_timer(unsigned long __opaque
)
9212 struct tg3
*tp
= (struct tg3
*) __opaque
;
9214 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9217 spin_lock(&tp
->lock
);
9219 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9220 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9221 tg3_chk_missed_msi(tp
);
9223 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9224 /* All of this garbage is because when using non-tagged
9225 * IRQ status the mailbox/status_block protocol the chip
9226 * uses with the cpu is race prone.
9228 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9229 tw32(GRC_LOCAL_CTRL
,
9230 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9232 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9233 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9236 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9237 spin_unlock(&tp
->lock
);
9238 tg3_reset_task_schedule(tp
);
9243 /* This part only runs once per second. */
9244 if (!--tp
->timer_counter
) {
9245 if (tg3_flag(tp
, 5705_PLUS
))
9246 tg3_periodic_fetch_stats(tp
);
9248 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9249 tg3_phy_eee_enable(tp
);
9251 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9255 mac_stat
= tr32(MAC_STATUS
);
9258 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9259 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9261 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9265 tg3_setup_phy(tp
, 0);
9266 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9267 u32 mac_stat
= tr32(MAC_STATUS
);
9270 if (netif_carrier_ok(tp
->dev
) &&
9271 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9274 if (!netif_carrier_ok(tp
->dev
) &&
9275 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9276 MAC_STATUS_SIGNAL_DET
))) {
9280 if (!tp
->serdes_counter
) {
9283 ~MAC_MODE_PORT_MODE_MASK
));
9285 tw32_f(MAC_MODE
, tp
->mac_mode
);
9288 tg3_setup_phy(tp
, 0);
9290 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9291 tg3_flag(tp
, 5780_CLASS
)) {
9292 tg3_serdes_parallel_detect(tp
);
9295 tp
->timer_counter
= tp
->timer_multiplier
;
9298 /* Heartbeat is only sent once every 2 seconds.
9300 * The heartbeat is to tell the ASF firmware that the host
9301 * driver is still alive. In the event that the OS crashes,
9302 * ASF needs to reset the hardware to free up the FIFO space
9303 * that may be filled with rx packets destined for the host.
9304 * If the FIFO is full, ASF will no longer function properly.
9306 * Unintended resets have been reported on real time kernels
9307 * where the timer doesn't run on time. Netpoll will also have
9310 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9311 * to check the ring condition when the heartbeat is expiring
9312 * before doing the reset. This will prevent most unintended
9315 if (!--tp
->asf_counter
) {
9316 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9317 tg3_wait_for_event_ack(tp
);
9319 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9320 FWCMD_NICDRV_ALIVE3
);
9321 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9322 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9323 TG3_FW_UPDATE_TIMEOUT_SEC
);
9325 tg3_generate_fw_event(tp
);
9327 tp
->asf_counter
= tp
->asf_multiplier
;
9330 spin_unlock(&tp
->lock
);
9333 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9334 add_timer(&tp
->timer
);
9337 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9340 unsigned long flags
;
9342 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9344 if (tp
->irq_cnt
== 1)
9345 name
= tp
->dev
->name
;
9347 name
= &tnapi
->irq_lbl
[0];
9348 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9349 name
[IFNAMSIZ
-1] = 0;
9352 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9354 if (tg3_flag(tp
, 1SHOT_MSI
))
9359 if (tg3_flag(tp
, TAGGED_STATUS
))
9360 fn
= tg3_interrupt_tagged
;
9361 flags
= IRQF_SHARED
;
9364 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9367 static int tg3_test_interrupt(struct tg3
*tp
)
9369 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9370 struct net_device
*dev
= tp
->dev
;
9371 int err
, i
, intr_ok
= 0;
9374 if (!netif_running(dev
))
9377 tg3_disable_ints(tp
);
9379 free_irq(tnapi
->irq_vec
, tnapi
);
9382 * Turn off MSI one shot mode. Otherwise this test has no
9383 * observable way to know whether the interrupt was delivered.
9385 if (tg3_flag(tp
, 57765_PLUS
)) {
9386 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9387 tw32(MSGINT_MODE
, val
);
9390 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9391 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9395 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9396 tg3_enable_ints(tp
);
9398 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9401 for (i
= 0; i
< 5; i
++) {
9402 u32 int_mbox
, misc_host_ctrl
;
9404 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9405 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9407 if ((int_mbox
!= 0) ||
9408 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9413 if (tg3_flag(tp
, 57765_PLUS
) &&
9414 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9415 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9420 tg3_disable_ints(tp
);
9422 free_irq(tnapi
->irq_vec
, tnapi
);
9424 err
= tg3_request_irq(tp
, 0);
9430 /* Reenable MSI one shot mode. */
9431 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
9432 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9433 tw32(MSGINT_MODE
, val
);
9441 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9442 * successfully restored
9444 static int tg3_test_msi(struct tg3
*tp
)
9449 if (!tg3_flag(tp
, USING_MSI
))
9452 /* Turn off SERR reporting in case MSI terminates with Master
9455 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9456 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9457 pci_cmd
& ~PCI_COMMAND_SERR
);
9459 err
= tg3_test_interrupt(tp
);
9461 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9466 /* other failures */
9470 /* MSI test failed, go back to INTx mode */
9471 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9472 "to INTx mode. Please report this failure to the PCI "
9473 "maintainer and include system chipset information\n");
9475 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9477 pci_disable_msi(tp
->pdev
);
9479 tg3_flag_clear(tp
, USING_MSI
);
9480 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9482 err
= tg3_request_irq(tp
, 0);
9486 /* Need to reset the chip because the MSI cycle may have terminated
9487 * with Master Abort.
9489 tg3_full_lock(tp
, 1);
9491 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9492 err
= tg3_init_hw(tp
, 1);
9494 tg3_full_unlock(tp
);
9497 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9502 static int tg3_request_firmware(struct tg3
*tp
)
9504 const __be32
*fw_data
;
9506 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9507 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9512 fw_data
= (void *)tp
->fw
->data
;
9514 /* Firmware blob starts with version numbers, followed by
9515 * start address and _full_ length including BSS sections
9516 * (which must be longer than the actual data, of course
9519 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9520 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9521 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9522 tp
->fw_len
, tp
->fw_needed
);
9523 release_firmware(tp
->fw
);
9528 /* We no longer need firmware; we have it. */
9529 tp
->fw_needed
= NULL
;
9533 static bool tg3_enable_msix(struct tg3
*tp
)
9535 int i
, rc
, cpus
= num_online_cpus();
9536 struct msix_entry msix_ent
[tp
->irq_max
];
9539 /* Just fallback to the simpler MSI mode. */
9543 * We want as many rx rings enabled as there are cpus.
9544 * The first MSIX vector only deals with link interrupts, etc,
9545 * so we add one to the number of vectors we are requesting.
9547 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9549 for (i
= 0; i
< tp
->irq_max
; i
++) {
9550 msix_ent
[i
].entry
= i
;
9551 msix_ent
[i
].vector
= 0;
9554 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9557 } else if (rc
!= 0) {
9558 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9560 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9565 for (i
= 0; i
< tp
->irq_max
; i
++)
9566 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9568 netif_set_real_num_tx_queues(tp
->dev
, 1);
9569 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9570 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9571 pci_disable_msix(tp
->pdev
);
9575 if (tp
->irq_cnt
> 1) {
9576 tg3_flag_set(tp
, ENABLE_RSS
);
9578 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9579 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9580 tg3_flag_set(tp
, ENABLE_TSS
);
9581 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9588 static void tg3_ints_init(struct tg3
*tp
)
9590 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9591 !tg3_flag(tp
, TAGGED_STATUS
)) {
9592 /* All MSI supporting chips should support tagged
9593 * status. Assert that this is the case.
9595 netdev_warn(tp
->dev
,
9596 "MSI without TAGGED_STATUS? Not using MSI\n");
9600 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9601 tg3_flag_set(tp
, USING_MSIX
);
9602 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9603 tg3_flag_set(tp
, USING_MSI
);
9605 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9606 u32 msi_mode
= tr32(MSGINT_MODE
);
9607 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9608 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9609 if (!tg3_flag(tp
, 1SHOT_MSI
))
9610 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9611 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9614 if (!tg3_flag(tp
, USING_MSIX
)) {
9616 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9617 netif_set_real_num_tx_queues(tp
->dev
, 1);
9618 netif_set_real_num_rx_queues(tp
->dev
, 1);
9622 static void tg3_ints_fini(struct tg3
*tp
)
9624 if (tg3_flag(tp
, USING_MSIX
))
9625 pci_disable_msix(tp
->pdev
);
9626 else if (tg3_flag(tp
, USING_MSI
))
9627 pci_disable_msi(tp
->pdev
);
9628 tg3_flag_clear(tp
, USING_MSI
);
9629 tg3_flag_clear(tp
, USING_MSIX
);
9630 tg3_flag_clear(tp
, ENABLE_RSS
);
9631 tg3_flag_clear(tp
, ENABLE_TSS
);
9634 static int tg3_open(struct net_device
*dev
)
9636 struct tg3
*tp
= netdev_priv(dev
);
9639 if (tp
->fw_needed
) {
9640 err
= tg3_request_firmware(tp
);
9641 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9645 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9646 tg3_flag_clear(tp
, TSO_CAPABLE
);
9647 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9648 netdev_notice(tp
->dev
, "TSO capability restored\n");
9649 tg3_flag_set(tp
, TSO_CAPABLE
);
9653 netif_carrier_off(tp
->dev
);
9655 err
= tg3_power_up(tp
);
9659 tg3_full_lock(tp
, 0);
9661 tg3_disable_ints(tp
);
9662 tg3_flag_clear(tp
, INIT_COMPLETE
);
9664 tg3_full_unlock(tp
);
9667 * Setup interrupts first so we know how
9668 * many NAPI resources to allocate
9672 /* The placement of this call is tied
9673 * to the setup and use of Host TX descriptors.
9675 err
= tg3_alloc_consistent(tp
);
9681 tg3_napi_enable(tp
);
9683 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9684 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9685 err
= tg3_request_irq(tp
, i
);
9687 for (i
--; i
>= 0; i
--) {
9688 tnapi
= &tp
->napi
[i
];
9689 free_irq(tnapi
->irq_vec
, tnapi
);
9695 tg3_full_lock(tp
, 0);
9697 err
= tg3_init_hw(tp
, 1);
9699 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9702 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9703 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9704 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
)
9705 tp
->timer_offset
= HZ
;
9707 tp
->timer_offset
= HZ
/ 10;
9709 BUG_ON(tp
->timer_offset
> HZ
);
9710 tp
->timer_counter
= tp
->timer_multiplier
=
9711 (HZ
/ tp
->timer_offset
);
9712 tp
->asf_counter
= tp
->asf_multiplier
=
9713 ((HZ
/ tp
->timer_offset
) * 2);
9715 init_timer(&tp
->timer
);
9716 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9717 tp
->timer
.data
= (unsigned long) tp
;
9718 tp
->timer
.function
= tg3_timer
;
9721 tg3_full_unlock(tp
);
9726 if (tg3_flag(tp
, USING_MSI
)) {
9727 err
= tg3_test_msi(tp
);
9730 tg3_full_lock(tp
, 0);
9731 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9733 tg3_full_unlock(tp
);
9738 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9739 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9741 tw32(PCIE_TRANSACTION_CFG
,
9742 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9748 tg3_full_lock(tp
, 0);
9750 add_timer(&tp
->timer
);
9751 tg3_flag_set(tp
, INIT_COMPLETE
);
9752 tg3_enable_ints(tp
);
9754 tg3_full_unlock(tp
);
9756 netif_tx_start_all_queues(dev
);
9759 * Reset loopback feature if it was turned on while the device was down
9760 * make sure that it's installed properly now.
9762 if (dev
->features
& NETIF_F_LOOPBACK
)
9763 tg3_set_loopback(dev
, dev
->features
);
9768 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9769 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9770 free_irq(tnapi
->irq_vec
, tnapi
);
9774 tg3_napi_disable(tp
);
9776 tg3_free_consistent(tp
);
9780 tg3_frob_aux_power(tp
, false);
9781 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9785 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9786 struct rtnl_link_stats64
*);
9787 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9789 static int tg3_close(struct net_device
*dev
)
9792 struct tg3
*tp
= netdev_priv(dev
);
9794 tg3_napi_disable(tp
);
9795 tg3_reset_task_cancel(tp
);
9797 netif_tx_stop_all_queues(dev
);
9799 del_timer_sync(&tp
->timer
);
9803 tg3_full_lock(tp
, 1);
9805 tg3_disable_ints(tp
);
9807 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9809 tg3_flag_clear(tp
, INIT_COMPLETE
);
9811 tg3_full_unlock(tp
);
9813 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9814 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9815 free_irq(tnapi
->irq_vec
, tnapi
);
9820 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9822 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9823 sizeof(tp
->estats_prev
));
9827 tg3_free_consistent(tp
);
9831 netif_carrier_off(tp
->dev
);
9836 static inline u64
get_stat64(tg3_stat64_t
*val
)
9838 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9841 static u64
calc_crc_errors(struct tg3
*tp
)
9843 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9845 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9846 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9847 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9850 spin_lock_bh(&tp
->lock
);
9851 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9852 tg3_writephy(tp
, MII_TG3_TEST1
,
9853 val
| MII_TG3_TEST1_CRC_EN
);
9854 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9857 spin_unlock_bh(&tp
->lock
);
9859 tp
->phy_crc_errors
+= val
;
9861 return tp
->phy_crc_errors
;
9864 return get_stat64(&hw_stats
->rx_fcs_errors
);
9867 #define ESTAT_ADD(member) \
9868 estats->member = old_estats->member + \
9869 get_stat64(&hw_stats->member)
9871 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9873 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9874 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9875 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9880 ESTAT_ADD(rx_octets
);
9881 ESTAT_ADD(rx_fragments
);
9882 ESTAT_ADD(rx_ucast_packets
);
9883 ESTAT_ADD(rx_mcast_packets
);
9884 ESTAT_ADD(rx_bcast_packets
);
9885 ESTAT_ADD(rx_fcs_errors
);
9886 ESTAT_ADD(rx_align_errors
);
9887 ESTAT_ADD(rx_xon_pause_rcvd
);
9888 ESTAT_ADD(rx_xoff_pause_rcvd
);
9889 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9890 ESTAT_ADD(rx_xoff_entered
);
9891 ESTAT_ADD(rx_frame_too_long_errors
);
9892 ESTAT_ADD(rx_jabbers
);
9893 ESTAT_ADD(rx_undersize_packets
);
9894 ESTAT_ADD(rx_in_length_errors
);
9895 ESTAT_ADD(rx_out_length_errors
);
9896 ESTAT_ADD(rx_64_or_less_octet_packets
);
9897 ESTAT_ADD(rx_65_to_127_octet_packets
);
9898 ESTAT_ADD(rx_128_to_255_octet_packets
);
9899 ESTAT_ADD(rx_256_to_511_octet_packets
);
9900 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9901 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9902 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9903 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9904 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9905 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9907 ESTAT_ADD(tx_octets
);
9908 ESTAT_ADD(tx_collisions
);
9909 ESTAT_ADD(tx_xon_sent
);
9910 ESTAT_ADD(tx_xoff_sent
);
9911 ESTAT_ADD(tx_flow_control
);
9912 ESTAT_ADD(tx_mac_errors
);
9913 ESTAT_ADD(tx_single_collisions
);
9914 ESTAT_ADD(tx_mult_collisions
);
9915 ESTAT_ADD(tx_deferred
);
9916 ESTAT_ADD(tx_excessive_collisions
);
9917 ESTAT_ADD(tx_late_collisions
);
9918 ESTAT_ADD(tx_collide_2times
);
9919 ESTAT_ADD(tx_collide_3times
);
9920 ESTAT_ADD(tx_collide_4times
);
9921 ESTAT_ADD(tx_collide_5times
);
9922 ESTAT_ADD(tx_collide_6times
);
9923 ESTAT_ADD(tx_collide_7times
);
9924 ESTAT_ADD(tx_collide_8times
);
9925 ESTAT_ADD(tx_collide_9times
);
9926 ESTAT_ADD(tx_collide_10times
);
9927 ESTAT_ADD(tx_collide_11times
);
9928 ESTAT_ADD(tx_collide_12times
);
9929 ESTAT_ADD(tx_collide_13times
);
9930 ESTAT_ADD(tx_collide_14times
);
9931 ESTAT_ADD(tx_collide_15times
);
9932 ESTAT_ADD(tx_ucast_packets
);
9933 ESTAT_ADD(tx_mcast_packets
);
9934 ESTAT_ADD(tx_bcast_packets
);
9935 ESTAT_ADD(tx_carrier_sense_errors
);
9936 ESTAT_ADD(tx_discards
);
9937 ESTAT_ADD(tx_errors
);
9939 ESTAT_ADD(dma_writeq_full
);
9940 ESTAT_ADD(dma_write_prioq_full
);
9941 ESTAT_ADD(rxbds_empty
);
9942 ESTAT_ADD(rx_discards
);
9943 ESTAT_ADD(rx_errors
);
9944 ESTAT_ADD(rx_threshold_hit
);
9946 ESTAT_ADD(dma_readq_full
);
9947 ESTAT_ADD(dma_read_prioq_full
);
9948 ESTAT_ADD(tx_comp_queue_full
);
9950 ESTAT_ADD(ring_set_send_prod_index
);
9951 ESTAT_ADD(ring_status_update
);
9952 ESTAT_ADD(nic_irqs
);
9953 ESTAT_ADD(nic_avoided_irqs
);
9954 ESTAT_ADD(nic_tx_threshold_hit
);
9956 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9961 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9962 struct rtnl_link_stats64
*stats
)
9964 struct tg3
*tp
= netdev_priv(dev
);
9965 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9966 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9971 stats
->rx_packets
= old_stats
->rx_packets
+
9972 get_stat64(&hw_stats
->rx_ucast_packets
) +
9973 get_stat64(&hw_stats
->rx_mcast_packets
) +
9974 get_stat64(&hw_stats
->rx_bcast_packets
);
9976 stats
->tx_packets
= old_stats
->tx_packets
+
9977 get_stat64(&hw_stats
->tx_ucast_packets
) +
9978 get_stat64(&hw_stats
->tx_mcast_packets
) +
9979 get_stat64(&hw_stats
->tx_bcast_packets
);
9981 stats
->rx_bytes
= old_stats
->rx_bytes
+
9982 get_stat64(&hw_stats
->rx_octets
);
9983 stats
->tx_bytes
= old_stats
->tx_bytes
+
9984 get_stat64(&hw_stats
->tx_octets
);
9986 stats
->rx_errors
= old_stats
->rx_errors
+
9987 get_stat64(&hw_stats
->rx_errors
);
9988 stats
->tx_errors
= old_stats
->tx_errors
+
9989 get_stat64(&hw_stats
->tx_errors
) +
9990 get_stat64(&hw_stats
->tx_mac_errors
) +
9991 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9992 get_stat64(&hw_stats
->tx_discards
);
9994 stats
->multicast
= old_stats
->multicast
+
9995 get_stat64(&hw_stats
->rx_mcast_packets
);
9996 stats
->collisions
= old_stats
->collisions
+
9997 get_stat64(&hw_stats
->tx_collisions
);
9999 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10000 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10001 get_stat64(&hw_stats
->rx_undersize_packets
);
10003 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10004 get_stat64(&hw_stats
->rxbds_empty
);
10005 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10006 get_stat64(&hw_stats
->rx_align_errors
);
10007 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10008 get_stat64(&hw_stats
->tx_discards
);
10009 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10010 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10012 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10013 calc_crc_errors(tp
);
10015 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10016 get_stat64(&hw_stats
->rx_discards
);
10018 stats
->rx_dropped
= tp
->rx_dropped
;
10019 stats
->tx_dropped
= tp
->tx_dropped
;
10024 static inline u32
calc_crc(unsigned char *buf
, int len
)
10032 for (j
= 0; j
< len
; j
++) {
10035 for (k
= 0; k
< 8; k
++) {
10048 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
10050 /* accept or reject all multicast frames */
10051 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
10052 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
10053 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
10054 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
10057 static void __tg3_set_rx_mode(struct net_device
*dev
)
10059 struct tg3
*tp
= netdev_priv(dev
);
10062 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
10063 RX_MODE_KEEP_VLAN_TAG
);
10065 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10066 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10069 if (!tg3_flag(tp
, ENABLE_ASF
))
10070 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
10073 if (dev
->flags
& IFF_PROMISC
) {
10074 /* Promiscuous mode. */
10075 rx_mode
|= RX_MODE_PROMISC
;
10076 } else if (dev
->flags
& IFF_ALLMULTI
) {
10077 /* Accept all multicast. */
10078 tg3_set_multi(tp
, 1);
10079 } else if (netdev_mc_empty(dev
)) {
10080 /* Reject all multicast. */
10081 tg3_set_multi(tp
, 0);
10083 /* Accept one or more multicast(s). */
10084 struct netdev_hw_addr
*ha
;
10085 u32 mc_filter
[4] = { 0, };
10090 netdev_for_each_mc_addr(ha
, dev
) {
10091 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
10093 regidx
= (bit
& 0x60) >> 5;
10095 mc_filter
[regidx
] |= (1 << bit
);
10098 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
10099 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
10100 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
10101 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
10104 if (rx_mode
!= tp
->rx_mode
) {
10105 tp
->rx_mode
= rx_mode
;
10106 tw32_f(MAC_RX_MODE
, rx_mode
);
10111 static void tg3_set_rx_mode(struct net_device
*dev
)
10113 struct tg3
*tp
= netdev_priv(dev
);
10115 if (!netif_running(dev
))
10118 tg3_full_lock(tp
, 0);
10119 __tg3_set_rx_mode(dev
);
10120 tg3_full_unlock(tp
);
10123 static int tg3_get_regs_len(struct net_device
*dev
)
10125 return TG3_REG_BLK_SIZE
;
10128 static void tg3_get_regs(struct net_device
*dev
,
10129 struct ethtool_regs
*regs
, void *_p
)
10131 struct tg3
*tp
= netdev_priv(dev
);
10135 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10137 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10140 tg3_full_lock(tp
, 0);
10142 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10144 tg3_full_unlock(tp
);
10147 static int tg3_get_eeprom_len(struct net_device
*dev
)
10149 struct tg3
*tp
= netdev_priv(dev
);
10151 return tp
->nvram_size
;
10154 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10156 struct tg3
*tp
= netdev_priv(dev
);
10159 u32 i
, offset
, len
, b_offset
, b_count
;
10162 if (tg3_flag(tp
, NO_NVRAM
))
10165 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10168 offset
= eeprom
->offset
;
10172 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10175 /* adjustments to start on required 4 byte boundary */
10176 b_offset
= offset
& 3;
10177 b_count
= 4 - b_offset
;
10178 if (b_count
> len
) {
10179 /* i.e. offset=1 len=2 */
10182 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10185 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10188 eeprom
->len
+= b_count
;
10191 /* read bytes up to the last 4 byte boundary */
10192 pd
= &data
[eeprom
->len
];
10193 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10194 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10199 memcpy(pd
+ i
, &val
, 4);
10204 /* read last bytes not ending on 4 byte boundary */
10205 pd
= &data
[eeprom
->len
];
10207 b_offset
= offset
+ len
- b_count
;
10208 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10211 memcpy(pd
, &val
, b_count
);
10212 eeprom
->len
+= b_count
;
10217 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10219 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10221 struct tg3
*tp
= netdev_priv(dev
);
10223 u32 offset
, len
, b_offset
, odd_len
;
10227 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10230 if (tg3_flag(tp
, NO_NVRAM
) ||
10231 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10234 offset
= eeprom
->offset
;
10237 if ((b_offset
= (offset
& 3))) {
10238 /* adjustments to start on required 4 byte boundary */
10239 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10250 /* adjustments to end on required 4 byte boundary */
10252 len
= (len
+ 3) & ~3;
10253 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10259 if (b_offset
|| odd_len
) {
10260 buf
= kmalloc(len
, GFP_KERNEL
);
10264 memcpy(buf
, &start
, 4);
10266 memcpy(buf
+len
-4, &end
, 4);
10267 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10270 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10278 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10280 struct tg3
*tp
= netdev_priv(dev
);
10282 if (tg3_flag(tp
, USE_PHYLIB
)) {
10283 struct phy_device
*phydev
;
10284 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10286 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10287 return phy_ethtool_gset(phydev
, cmd
);
10290 cmd
->supported
= (SUPPORTED_Autoneg
);
10292 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10293 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10294 SUPPORTED_1000baseT_Full
);
10296 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10297 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10298 SUPPORTED_100baseT_Full
|
10299 SUPPORTED_10baseT_Half
|
10300 SUPPORTED_10baseT_Full
|
10302 cmd
->port
= PORT_TP
;
10304 cmd
->supported
|= SUPPORTED_FIBRE
;
10305 cmd
->port
= PORT_FIBRE
;
10308 cmd
->advertising
= tp
->link_config
.advertising
;
10309 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10310 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10311 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10312 cmd
->advertising
|= ADVERTISED_Pause
;
10314 cmd
->advertising
|= ADVERTISED_Pause
|
10315 ADVERTISED_Asym_Pause
;
10317 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10318 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10321 if (netif_running(dev
)) {
10322 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10323 cmd
->duplex
= tp
->link_config
.active_duplex
;
10325 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10326 cmd
->duplex
= DUPLEX_INVALID
;
10328 cmd
->phy_address
= tp
->phy_addr
;
10329 cmd
->transceiver
= XCVR_INTERNAL
;
10330 cmd
->autoneg
= tp
->link_config
.autoneg
;
10336 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10338 struct tg3
*tp
= netdev_priv(dev
);
10339 u32 speed
= ethtool_cmd_speed(cmd
);
10341 if (tg3_flag(tp
, USE_PHYLIB
)) {
10342 struct phy_device
*phydev
;
10343 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10345 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10346 return phy_ethtool_sset(phydev
, cmd
);
10349 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10350 cmd
->autoneg
!= AUTONEG_DISABLE
)
10353 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10354 cmd
->duplex
!= DUPLEX_FULL
&&
10355 cmd
->duplex
!= DUPLEX_HALF
)
10358 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10359 u32 mask
= ADVERTISED_Autoneg
|
10361 ADVERTISED_Asym_Pause
;
10363 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10364 mask
|= ADVERTISED_1000baseT_Half
|
10365 ADVERTISED_1000baseT_Full
;
10367 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10368 mask
|= ADVERTISED_100baseT_Half
|
10369 ADVERTISED_100baseT_Full
|
10370 ADVERTISED_10baseT_Half
|
10371 ADVERTISED_10baseT_Full
|
10374 mask
|= ADVERTISED_FIBRE
;
10376 if (cmd
->advertising
& ~mask
)
10379 mask
&= (ADVERTISED_1000baseT_Half
|
10380 ADVERTISED_1000baseT_Full
|
10381 ADVERTISED_100baseT_Half
|
10382 ADVERTISED_100baseT_Full
|
10383 ADVERTISED_10baseT_Half
|
10384 ADVERTISED_10baseT_Full
);
10386 cmd
->advertising
&= mask
;
10388 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10389 if (speed
!= SPEED_1000
)
10392 if (cmd
->duplex
!= DUPLEX_FULL
)
10395 if (speed
!= SPEED_100
&&
10401 tg3_full_lock(tp
, 0);
10403 tp
->link_config
.autoneg
= cmd
->autoneg
;
10404 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10405 tp
->link_config
.advertising
= (cmd
->advertising
|
10406 ADVERTISED_Autoneg
);
10407 tp
->link_config
.speed
= SPEED_INVALID
;
10408 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10410 tp
->link_config
.advertising
= 0;
10411 tp
->link_config
.speed
= speed
;
10412 tp
->link_config
.duplex
= cmd
->duplex
;
10415 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10416 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10417 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10419 if (netif_running(dev
))
10420 tg3_setup_phy(tp
, 1);
10422 tg3_full_unlock(tp
);
10427 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10429 struct tg3
*tp
= netdev_priv(dev
);
10431 strcpy(info
->driver
, DRV_MODULE_NAME
);
10432 strcpy(info
->version
, DRV_MODULE_VERSION
);
10433 strcpy(info
->fw_version
, tp
->fw_ver
);
10434 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10437 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10439 struct tg3
*tp
= netdev_priv(dev
);
10441 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10442 wol
->supported
= WAKE_MAGIC
;
10444 wol
->supported
= 0;
10446 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10447 wol
->wolopts
= WAKE_MAGIC
;
10448 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10451 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10453 struct tg3
*tp
= netdev_priv(dev
);
10454 struct device
*dp
= &tp
->pdev
->dev
;
10456 if (wol
->wolopts
& ~WAKE_MAGIC
)
10458 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10459 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10462 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10464 spin_lock_bh(&tp
->lock
);
10465 if (device_may_wakeup(dp
))
10466 tg3_flag_set(tp
, WOL_ENABLE
);
10468 tg3_flag_clear(tp
, WOL_ENABLE
);
10469 spin_unlock_bh(&tp
->lock
);
10474 static u32
tg3_get_msglevel(struct net_device
*dev
)
10476 struct tg3
*tp
= netdev_priv(dev
);
10477 return tp
->msg_enable
;
10480 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10482 struct tg3
*tp
= netdev_priv(dev
);
10483 tp
->msg_enable
= value
;
10486 static int tg3_nway_reset(struct net_device
*dev
)
10488 struct tg3
*tp
= netdev_priv(dev
);
10491 if (!netif_running(dev
))
10494 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10497 if (tg3_flag(tp
, USE_PHYLIB
)) {
10498 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10500 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10504 spin_lock_bh(&tp
->lock
);
10506 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10507 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10508 ((bmcr
& BMCR_ANENABLE
) ||
10509 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10510 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10514 spin_unlock_bh(&tp
->lock
);
10520 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10522 struct tg3
*tp
= netdev_priv(dev
);
10524 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10525 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10526 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10528 ering
->rx_jumbo_max_pending
= 0;
10530 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10532 ering
->rx_pending
= tp
->rx_pending
;
10533 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10534 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10536 ering
->rx_jumbo_pending
= 0;
10538 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10541 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10543 struct tg3
*tp
= netdev_priv(dev
);
10544 int i
, irq_sync
= 0, err
= 0;
10546 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10547 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10548 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10549 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10550 (tg3_flag(tp
, TSO_BUG
) &&
10551 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10554 if (netif_running(dev
)) {
10556 tg3_netif_stop(tp
);
10560 tg3_full_lock(tp
, irq_sync
);
10562 tp
->rx_pending
= ering
->rx_pending
;
10564 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10565 tp
->rx_pending
> 63)
10566 tp
->rx_pending
= 63;
10567 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10569 for (i
= 0; i
< tp
->irq_max
; i
++)
10570 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10572 if (netif_running(dev
)) {
10573 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10574 err
= tg3_restart_hw(tp
, 1);
10576 tg3_netif_start(tp
);
10579 tg3_full_unlock(tp
);
10581 if (irq_sync
&& !err
)
10587 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10589 struct tg3
*tp
= netdev_priv(dev
);
10591 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10593 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10594 epause
->rx_pause
= 1;
10596 epause
->rx_pause
= 0;
10598 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10599 epause
->tx_pause
= 1;
10601 epause
->tx_pause
= 0;
10604 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10606 struct tg3
*tp
= netdev_priv(dev
);
10609 if (tg3_flag(tp
, USE_PHYLIB
)) {
10611 struct phy_device
*phydev
;
10613 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10615 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10616 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10617 (epause
->rx_pause
!= epause
->tx_pause
)))
10620 tp
->link_config
.flowctrl
= 0;
10621 if (epause
->rx_pause
) {
10622 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10624 if (epause
->tx_pause
) {
10625 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10626 newadv
= ADVERTISED_Pause
;
10628 newadv
= ADVERTISED_Pause
|
10629 ADVERTISED_Asym_Pause
;
10630 } else if (epause
->tx_pause
) {
10631 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10632 newadv
= ADVERTISED_Asym_Pause
;
10636 if (epause
->autoneg
)
10637 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10639 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10641 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10642 u32 oldadv
= phydev
->advertising
&
10643 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10644 if (oldadv
!= newadv
) {
10645 phydev
->advertising
&=
10646 ~(ADVERTISED_Pause
|
10647 ADVERTISED_Asym_Pause
);
10648 phydev
->advertising
|= newadv
;
10649 if (phydev
->autoneg
) {
10651 * Always renegotiate the link to
10652 * inform our link partner of our
10653 * flow control settings, even if the
10654 * flow control is forced. Let
10655 * tg3_adjust_link() do the final
10656 * flow control setup.
10658 return phy_start_aneg(phydev
);
10662 if (!epause
->autoneg
)
10663 tg3_setup_flow_control(tp
, 0, 0);
10665 tp
->link_config
.orig_advertising
&=
10666 ~(ADVERTISED_Pause
|
10667 ADVERTISED_Asym_Pause
);
10668 tp
->link_config
.orig_advertising
|= newadv
;
10673 if (netif_running(dev
)) {
10674 tg3_netif_stop(tp
);
10678 tg3_full_lock(tp
, irq_sync
);
10680 if (epause
->autoneg
)
10681 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10683 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10684 if (epause
->rx_pause
)
10685 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10687 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10688 if (epause
->tx_pause
)
10689 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10691 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10693 if (netif_running(dev
)) {
10694 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10695 err
= tg3_restart_hw(tp
, 1);
10697 tg3_netif_start(tp
);
10700 tg3_full_unlock(tp
);
10706 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10710 return TG3_NUM_TEST
;
10712 return TG3_NUM_STATS
;
10714 return -EOPNOTSUPP
;
10718 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10720 switch (stringset
) {
10722 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10725 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10728 WARN_ON(1); /* we need a WARN() */
10733 static int tg3_set_phys_id(struct net_device
*dev
,
10734 enum ethtool_phys_id_state state
)
10736 struct tg3
*tp
= netdev_priv(dev
);
10738 if (!netif_running(tp
->dev
))
10742 case ETHTOOL_ID_ACTIVE
:
10743 return 1; /* cycle on/off once per second */
10745 case ETHTOOL_ID_ON
:
10746 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10747 LED_CTRL_1000MBPS_ON
|
10748 LED_CTRL_100MBPS_ON
|
10749 LED_CTRL_10MBPS_ON
|
10750 LED_CTRL_TRAFFIC_OVERRIDE
|
10751 LED_CTRL_TRAFFIC_BLINK
|
10752 LED_CTRL_TRAFFIC_LED
);
10755 case ETHTOOL_ID_OFF
:
10756 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10757 LED_CTRL_TRAFFIC_OVERRIDE
);
10760 case ETHTOOL_ID_INACTIVE
:
10761 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10768 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10769 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10771 struct tg3
*tp
= netdev_priv(dev
);
10772 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10775 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10779 u32 offset
= 0, len
= 0;
10782 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10785 if (magic
== TG3_EEPROM_MAGIC
) {
10786 for (offset
= TG3_NVM_DIR_START
;
10787 offset
< TG3_NVM_DIR_END
;
10788 offset
+= TG3_NVM_DIRENT_SIZE
) {
10789 if (tg3_nvram_read(tp
, offset
, &val
))
10792 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10793 TG3_NVM_DIRTYPE_EXTVPD
)
10797 if (offset
!= TG3_NVM_DIR_END
) {
10798 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10799 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10802 offset
= tg3_nvram_logical_addr(tp
, offset
);
10806 if (!offset
|| !len
) {
10807 offset
= TG3_NVM_VPD_OFF
;
10808 len
= TG3_NVM_VPD_LEN
;
10811 buf
= kmalloc(len
, GFP_KERNEL
);
10815 if (magic
== TG3_EEPROM_MAGIC
) {
10816 for (i
= 0; i
< len
; i
+= 4) {
10817 /* The data is in little-endian format in NVRAM.
10818 * Use the big-endian read routines to preserve
10819 * the byte order as it exists in NVRAM.
10821 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10827 unsigned int pos
= 0;
10829 ptr
= (u8
*)&buf
[0];
10830 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10831 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10833 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10851 #define NVRAM_TEST_SIZE 0x100
10852 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10853 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10854 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10855 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10856 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10857 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10858 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10859 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10861 static int tg3_test_nvram(struct tg3
*tp
)
10863 u32 csum
, magic
, len
;
10865 int i
, j
, k
, err
= 0, size
;
10867 if (tg3_flag(tp
, NO_NVRAM
))
10870 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10873 if (magic
== TG3_EEPROM_MAGIC
)
10874 size
= NVRAM_TEST_SIZE
;
10875 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10876 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10877 TG3_EEPROM_SB_FORMAT_1
) {
10878 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10879 case TG3_EEPROM_SB_REVISION_0
:
10880 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10882 case TG3_EEPROM_SB_REVISION_2
:
10883 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10885 case TG3_EEPROM_SB_REVISION_3
:
10886 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10888 case TG3_EEPROM_SB_REVISION_4
:
10889 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10891 case TG3_EEPROM_SB_REVISION_5
:
10892 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10894 case TG3_EEPROM_SB_REVISION_6
:
10895 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10902 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10903 size
= NVRAM_SELFBOOT_HW_SIZE
;
10907 buf
= kmalloc(size
, GFP_KERNEL
);
10912 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10913 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10920 /* Selfboot format */
10921 magic
= be32_to_cpu(buf
[0]);
10922 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10923 TG3_EEPROM_MAGIC_FW
) {
10924 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10926 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10927 TG3_EEPROM_SB_REVISION_2
) {
10928 /* For rev 2, the csum doesn't include the MBA. */
10929 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10931 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10934 for (i
= 0; i
< size
; i
++)
10947 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10948 TG3_EEPROM_MAGIC_HW
) {
10949 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10950 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10951 u8
*buf8
= (u8
*) buf
;
10953 /* Separate the parity bits and the data bytes. */
10954 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10955 if ((i
== 0) || (i
== 8)) {
10959 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10960 parity
[k
++] = buf8
[i
] & msk
;
10962 } else if (i
== 16) {
10966 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10967 parity
[k
++] = buf8
[i
] & msk
;
10970 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10971 parity
[k
++] = buf8
[i
] & msk
;
10974 data
[j
++] = buf8
[i
];
10978 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10979 u8 hw8
= hweight8(data
[i
]);
10981 if ((hw8
& 0x1) && parity
[i
])
10983 else if (!(hw8
& 0x1) && !parity
[i
])
10992 /* Bootstrap checksum at offset 0x10 */
10993 csum
= calc_crc((unsigned char *) buf
, 0x10);
10994 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10997 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10998 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10999 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11004 buf
= tg3_vpd_readblock(tp
, &len
);
11008 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11010 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11014 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11017 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11018 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11019 PCI_VPD_RO_KEYWORD_CHKSUM
);
11023 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11025 for (i
= 0; i
<= j
; i
++)
11026 csum8
+= ((u8
*)buf
)[i
];
11040 #define TG3_SERDES_TIMEOUT_SEC 2
11041 #define TG3_COPPER_TIMEOUT_SEC 6
11043 static int tg3_test_link(struct tg3
*tp
)
11047 if (!netif_running(tp
->dev
))
11050 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11051 max
= TG3_SERDES_TIMEOUT_SEC
;
11053 max
= TG3_COPPER_TIMEOUT_SEC
;
11055 for (i
= 0; i
< max
; i
++) {
11056 if (netif_carrier_ok(tp
->dev
))
11059 if (msleep_interruptible(1000))
11066 /* Only test the commonly used registers */
11067 static int tg3_test_registers(struct tg3
*tp
)
11069 int i
, is_5705
, is_5750
;
11070 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11074 #define TG3_FL_5705 0x1
11075 #define TG3_FL_NOT_5705 0x2
11076 #define TG3_FL_NOT_5788 0x4
11077 #define TG3_FL_NOT_5750 0x8
11081 /* MAC Control Registers */
11082 { MAC_MODE
, TG3_FL_NOT_5705
,
11083 0x00000000, 0x00ef6f8c },
11084 { MAC_MODE
, TG3_FL_5705
,
11085 0x00000000, 0x01ef6b8c },
11086 { MAC_STATUS
, TG3_FL_NOT_5705
,
11087 0x03800107, 0x00000000 },
11088 { MAC_STATUS
, TG3_FL_5705
,
11089 0x03800100, 0x00000000 },
11090 { MAC_ADDR_0_HIGH
, 0x0000,
11091 0x00000000, 0x0000ffff },
11092 { MAC_ADDR_0_LOW
, 0x0000,
11093 0x00000000, 0xffffffff },
11094 { MAC_RX_MTU_SIZE
, 0x0000,
11095 0x00000000, 0x0000ffff },
11096 { MAC_TX_MODE
, 0x0000,
11097 0x00000000, 0x00000070 },
11098 { MAC_TX_LENGTHS
, 0x0000,
11099 0x00000000, 0x00003fff },
11100 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11101 0x00000000, 0x000007fc },
11102 { MAC_RX_MODE
, TG3_FL_5705
,
11103 0x00000000, 0x000007dc },
11104 { MAC_HASH_REG_0
, 0x0000,
11105 0x00000000, 0xffffffff },
11106 { MAC_HASH_REG_1
, 0x0000,
11107 0x00000000, 0xffffffff },
11108 { MAC_HASH_REG_2
, 0x0000,
11109 0x00000000, 0xffffffff },
11110 { MAC_HASH_REG_3
, 0x0000,
11111 0x00000000, 0xffffffff },
11113 /* Receive Data and Receive BD Initiator Control Registers. */
11114 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11115 0x00000000, 0xffffffff },
11116 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11117 0x00000000, 0xffffffff },
11118 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11119 0x00000000, 0x00000003 },
11120 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11121 0x00000000, 0xffffffff },
11122 { RCVDBDI_STD_BD
+0, 0x0000,
11123 0x00000000, 0xffffffff },
11124 { RCVDBDI_STD_BD
+4, 0x0000,
11125 0x00000000, 0xffffffff },
11126 { RCVDBDI_STD_BD
+8, 0x0000,
11127 0x00000000, 0xffff0002 },
11128 { RCVDBDI_STD_BD
+0xc, 0x0000,
11129 0x00000000, 0xffffffff },
11131 /* Receive BD Initiator Control Registers. */
11132 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11133 0x00000000, 0xffffffff },
11134 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11135 0x00000000, 0x000003ff },
11136 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11137 0x00000000, 0xffffffff },
11139 /* Host Coalescing Control Registers. */
11140 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11141 0x00000000, 0x00000004 },
11142 { HOSTCC_MODE
, TG3_FL_5705
,
11143 0x00000000, 0x000000f6 },
11144 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11145 0x00000000, 0xffffffff },
11146 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11147 0x00000000, 0x000003ff },
11148 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11149 0x00000000, 0xffffffff },
11150 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11151 0x00000000, 0x000003ff },
11152 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11153 0x00000000, 0xffffffff },
11154 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11155 0x00000000, 0x000000ff },
11156 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11157 0x00000000, 0xffffffff },
11158 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11159 0x00000000, 0x000000ff },
11160 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11161 0x00000000, 0xffffffff },
11162 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11163 0x00000000, 0xffffffff },
11164 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11165 0x00000000, 0xffffffff },
11166 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11167 0x00000000, 0x000000ff },
11168 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11169 0x00000000, 0xffffffff },
11170 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11171 0x00000000, 0x000000ff },
11172 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11173 0x00000000, 0xffffffff },
11174 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11175 0x00000000, 0xffffffff },
11176 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11177 0x00000000, 0xffffffff },
11178 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11179 0x00000000, 0xffffffff },
11180 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11181 0x00000000, 0xffffffff },
11182 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11183 0xffffffff, 0x00000000 },
11184 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11185 0xffffffff, 0x00000000 },
11187 /* Buffer Manager Control Registers. */
11188 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11189 0x00000000, 0x007fff80 },
11190 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11191 0x00000000, 0x007fffff },
11192 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11193 0x00000000, 0x0000003f },
11194 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11195 0x00000000, 0x000001ff },
11196 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11197 0x00000000, 0x000001ff },
11198 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11199 0xffffffff, 0x00000000 },
11200 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11201 0xffffffff, 0x00000000 },
11203 /* Mailbox Registers */
11204 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11205 0x00000000, 0x000001ff },
11206 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11207 0x00000000, 0x000001ff },
11208 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11209 0x00000000, 0x000007ff },
11210 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11211 0x00000000, 0x000001ff },
11213 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11216 is_5705
= is_5750
= 0;
11217 if (tg3_flag(tp
, 5705_PLUS
)) {
11219 if (tg3_flag(tp
, 5750_PLUS
))
11223 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11224 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11227 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11230 if (tg3_flag(tp
, IS_5788
) &&
11231 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11234 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11237 offset
= (u32
) reg_tbl
[i
].offset
;
11238 read_mask
= reg_tbl
[i
].read_mask
;
11239 write_mask
= reg_tbl
[i
].write_mask
;
11241 /* Save the original register content */
11242 save_val
= tr32(offset
);
11244 /* Determine the read-only value. */
11245 read_val
= save_val
& read_mask
;
11247 /* Write zero to the register, then make sure the read-only bits
11248 * are not changed and the read/write bits are all zeros.
11252 val
= tr32(offset
);
11254 /* Test the read-only and read/write bits. */
11255 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11258 /* Write ones to all the bits defined by RdMask and WrMask, then
11259 * make sure the read-only bits are not changed and the
11260 * read/write bits are all ones.
11262 tw32(offset
, read_mask
| write_mask
);
11264 val
= tr32(offset
);
11266 /* Test the read-only bits. */
11267 if ((val
& read_mask
) != read_val
)
11270 /* Test the read/write bits. */
11271 if ((val
& write_mask
) != write_mask
)
11274 tw32(offset
, save_val
);
11280 if (netif_msg_hw(tp
))
11281 netdev_err(tp
->dev
,
11282 "Register test failed at offset %x\n", offset
);
11283 tw32(offset
, save_val
);
11287 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11289 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11293 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11294 for (j
= 0; j
< len
; j
+= 4) {
11297 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11298 tg3_read_mem(tp
, offset
+ j
, &val
);
11299 if (val
!= test_pattern
[i
])
11306 static int tg3_test_memory(struct tg3
*tp
)
11308 static struct mem_entry
{
11311 } mem_tbl_570x
[] = {
11312 { 0x00000000, 0x00b50},
11313 { 0x00002000, 0x1c000},
11314 { 0xffffffff, 0x00000}
11315 }, mem_tbl_5705
[] = {
11316 { 0x00000100, 0x0000c},
11317 { 0x00000200, 0x00008},
11318 { 0x00004000, 0x00800},
11319 { 0x00006000, 0x01000},
11320 { 0x00008000, 0x02000},
11321 { 0x00010000, 0x0e000},
11322 { 0xffffffff, 0x00000}
11323 }, mem_tbl_5755
[] = {
11324 { 0x00000200, 0x00008},
11325 { 0x00004000, 0x00800},
11326 { 0x00006000, 0x00800},
11327 { 0x00008000, 0x02000},
11328 { 0x00010000, 0x0c000},
11329 { 0xffffffff, 0x00000}
11330 }, mem_tbl_5906
[] = {
11331 { 0x00000200, 0x00008},
11332 { 0x00004000, 0x00400},
11333 { 0x00006000, 0x00400},
11334 { 0x00008000, 0x01000},
11335 { 0x00010000, 0x01000},
11336 { 0xffffffff, 0x00000}
11337 }, mem_tbl_5717
[] = {
11338 { 0x00000200, 0x00008},
11339 { 0x00010000, 0x0a000},
11340 { 0x00020000, 0x13c00},
11341 { 0xffffffff, 0x00000}
11342 }, mem_tbl_57765
[] = {
11343 { 0x00000200, 0x00008},
11344 { 0x00004000, 0x00800},
11345 { 0x00006000, 0x09800},
11346 { 0x00010000, 0x0a000},
11347 { 0xffffffff, 0x00000}
11349 struct mem_entry
*mem_tbl
;
11353 if (tg3_flag(tp
, 5717_PLUS
))
11354 mem_tbl
= mem_tbl_5717
;
11355 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11356 mem_tbl
= mem_tbl_57765
;
11357 else if (tg3_flag(tp
, 5755_PLUS
))
11358 mem_tbl
= mem_tbl_5755
;
11359 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11360 mem_tbl
= mem_tbl_5906
;
11361 else if (tg3_flag(tp
, 5705_PLUS
))
11362 mem_tbl
= mem_tbl_5705
;
11364 mem_tbl
= mem_tbl_570x
;
11366 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11367 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11375 #define TG3_TSO_MSS 500
11377 #define TG3_TSO_IP_HDR_LEN 20
11378 #define TG3_TSO_TCP_HDR_LEN 20
11379 #define TG3_TSO_TCP_OPT_LEN 12
11381 static const u8 tg3_tso_header
[] = {
11383 0x45, 0x00, 0x00, 0x00,
11384 0x00, 0x00, 0x40, 0x00,
11385 0x40, 0x06, 0x00, 0x00,
11386 0x0a, 0x00, 0x00, 0x01,
11387 0x0a, 0x00, 0x00, 0x02,
11388 0x0d, 0x00, 0xe0, 0x00,
11389 0x00, 0x00, 0x01, 0x00,
11390 0x00, 0x00, 0x02, 0x00,
11391 0x80, 0x10, 0x10, 0x00,
11392 0x14, 0x09, 0x00, 0x00,
11393 0x01, 0x01, 0x08, 0x0a,
11394 0x11, 0x11, 0x11, 0x11,
11395 0x11, 0x11, 0x11, 0x11,
11398 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11400 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11401 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11403 struct sk_buff
*skb
, *rx_skb
;
11406 int num_pkts
, tx_len
, rx_len
, i
, err
;
11407 struct tg3_rx_buffer_desc
*desc
;
11408 struct tg3_napi
*tnapi
, *rnapi
;
11409 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11411 tnapi
= &tp
->napi
[0];
11412 rnapi
= &tp
->napi
[0];
11413 if (tp
->irq_cnt
> 1) {
11414 if (tg3_flag(tp
, ENABLE_RSS
))
11415 rnapi
= &tp
->napi
[1];
11416 if (tg3_flag(tp
, ENABLE_TSS
))
11417 tnapi
= &tp
->napi
[1];
11419 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11424 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11428 tx_data
= skb_put(skb
, tx_len
);
11429 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11430 memset(tx_data
+ 6, 0x0, 8);
11432 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11434 if (tso_loopback
) {
11435 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11437 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11438 TG3_TSO_TCP_OPT_LEN
;
11440 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11441 sizeof(tg3_tso_header
));
11444 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11445 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11447 /* Set the total length field in the IP header */
11448 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11450 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11451 TXD_FLAG_CPU_POST_DMA
);
11453 if (tg3_flag(tp
, HW_TSO_1
) ||
11454 tg3_flag(tp
, HW_TSO_2
) ||
11455 tg3_flag(tp
, HW_TSO_3
)) {
11457 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11458 th
= (struct tcphdr
*)&tx_data
[val
];
11461 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11463 if (tg3_flag(tp
, HW_TSO_3
)) {
11464 mss
|= (hdr_len
& 0xc) << 12;
11465 if (hdr_len
& 0x10)
11466 base_flags
|= 0x00000010;
11467 base_flags
|= (hdr_len
& 0x3e0) << 5;
11468 } else if (tg3_flag(tp
, HW_TSO_2
))
11469 mss
|= hdr_len
<< 9;
11470 else if (tg3_flag(tp
, HW_TSO_1
) ||
11471 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11472 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11474 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11477 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11480 data_off
= ETH_HLEN
;
11483 for (i
= data_off
; i
< tx_len
; i
++)
11484 tx_data
[i
] = (u8
) (i
& 0xff);
11486 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11487 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11488 dev_kfree_skb(skb
);
11492 val
= tnapi
->tx_prod
;
11493 tnapi
->tx_buffers
[val
].skb
= skb
;
11494 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11496 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11501 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11503 budget
= tg3_tx_avail(tnapi
);
11504 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11505 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11506 tnapi
->tx_buffers
[val
].skb
= NULL
;
11507 dev_kfree_skb(skb
);
11513 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11514 tr32_mailbox(tnapi
->prodmbox
);
11518 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11519 for (i
= 0; i
< 35; i
++) {
11520 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11525 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11526 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11527 if ((tx_idx
== tnapi
->tx_prod
) &&
11528 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11532 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
11533 dev_kfree_skb(skb
);
11535 if (tx_idx
!= tnapi
->tx_prod
)
11538 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11542 while (rx_idx
!= rx_start_idx
) {
11543 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11544 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11545 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11547 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11548 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11551 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11554 if (!tso_loopback
) {
11555 if (rx_len
!= tx_len
)
11558 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11559 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11562 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11565 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11566 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11567 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11571 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11572 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11573 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11575 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11576 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11577 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11582 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11583 PCI_DMA_FROMDEVICE
);
11585 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11586 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11593 /* tg3_free_rings will unmap and free the rx_skb */
11598 #define TG3_STD_LOOPBACK_FAILED 1
11599 #define TG3_JMB_LOOPBACK_FAILED 2
11600 #define TG3_TSO_LOOPBACK_FAILED 4
11601 #define TG3_LOOPBACK_FAILED \
11602 (TG3_STD_LOOPBACK_FAILED | \
11603 TG3_JMB_LOOPBACK_FAILED | \
11604 TG3_TSO_LOOPBACK_FAILED)
11606 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
11611 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11612 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11614 if (!netif_running(tp
->dev
)) {
11615 data
[0] = TG3_LOOPBACK_FAILED
;
11616 data
[1] = TG3_LOOPBACK_FAILED
;
11618 data
[2] = TG3_LOOPBACK_FAILED
;
11622 err
= tg3_reset_hw(tp
, 1);
11624 data
[0] = TG3_LOOPBACK_FAILED
;
11625 data
[1] = TG3_LOOPBACK_FAILED
;
11627 data
[2] = TG3_LOOPBACK_FAILED
;
11631 if (tg3_flag(tp
, ENABLE_RSS
)) {
11634 /* Reroute all rx packets to the 1st queue */
11635 for (i
= MAC_RSS_INDIR_TBL_0
;
11636 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11640 /* HW errata - mac loopback fails in some cases on 5780.
11641 * Normal traffic and PHY loopback are not affected by
11642 * errata. Also, the MAC loopback test is deprecated for
11643 * all newer ASIC revisions.
11645 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
11646 !tg3_flag(tp
, CPMU_PRESENT
)) {
11647 tg3_mac_loopback(tp
, true);
11649 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11650 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
11652 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11653 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11654 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
11656 tg3_mac_loopback(tp
, false);
11659 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11660 !tg3_flag(tp
, USE_PHYLIB
)) {
11663 tg3_phy_lpbk_set(tp
, 0, false);
11665 /* Wait for link */
11666 for (i
= 0; i
< 100; i
++) {
11667 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11672 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11673 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
11674 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11675 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11676 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
11677 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11678 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11679 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
11682 tg3_phy_lpbk_set(tp
, 0, true);
11684 /* All link indications report up, but the hardware
11685 * isn't really ready for about 20 msec. Double it
11690 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
11691 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
11692 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11693 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
11694 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
11695 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11696 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, false))
11697 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
11700 /* Re-enable gphy autopowerdown. */
11701 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11702 tg3_phy_toggle_apd(tp
, true);
11705 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
11708 tp
->phy_flags
|= eee_cap
;
11713 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11716 struct tg3
*tp
= netdev_priv(dev
);
11717 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
11719 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11720 tg3_power_up(tp
)) {
11721 etest
->flags
|= ETH_TEST_FL_FAILED
;
11722 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11726 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11728 if (tg3_test_nvram(tp
) != 0) {
11729 etest
->flags
|= ETH_TEST_FL_FAILED
;
11732 if (!doextlpbk
&& tg3_test_link(tp
)) {
11733 etest
->flags
|= ETH_TEST_FL_FAILED
;
11736 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11737 int err
, err2
= 0, irq_sync
= 0;
11739 if (netif_running(dev
)) {
11741 tg3_netif_stop(tp
);
11745 tg3_full_lock(tp
, irq_sync
);
11747 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11748 err
= tg3_nvram_lock(tp
);
11749 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11750 if (!tg3_flag(tp
, 5705_PLUS
))
11751 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11753 tg3_nvram_unlock(tp
);
11755 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11758 if (tg3_test_registers(tp
) != 0) {
11759 etest
->flags
|= ETH_TEST_FL_FAILED
;
11763 if (tg3_test_memory(tp
) != 0) {
11764 etest
->flags
|= ETH_TEST_FL_FAILED
;
11769 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
11771 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
11772 etest
->flags
|= ETH_TEST_FL_FAILED
;
11774 tg3_full_unlock(tp
);
11776 if (tg3_test_interrupt(tp
) != 0) {
11777 etest
->flags
|= ETH_TEST_FL_FAILED
;
11781 tg3_full_lock(tp
, 0);
11783 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11784 if (netif_running(dev
)) {
11785 tg3_flag_set(tp
, INIT_COMPLETE
);
11786 err2
= tg3_restart_hw(tp
, 1);
11788 tg3_netif_start(tp
);
11791 tg3_full_unlock(tp
);
11793 if (irq_sync
&& !err2
)
11796 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11797 tg3_power_down(tp
);
11801 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11803 struct mii_ioctl_data
*data
= if_mii(ifr
);
11804 struct tg3
*tp
= netdev_priv(dev
);
11807 if (tg3_flag(tp
, USE_PHYLIB
)) {
11808 struct phy_device
*phydev
;
11809 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11811 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11812 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11817 data
->phy_id
= tp
->phy_addr
;
11820 case SIOCGMIIREG
: {
11823 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11824 break; /* We have no PHY */
11826 if (!netif_running(dev
))
11829 spin_lock_bh(&tp
->lock
);
11830 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11831 spin_unlock_bh(&tp
->lock
);
11833 data
->val_out
= mii_regval
;
11839 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11840 break; /* We have no PHY */
11842 if (!netif_running(dev
))
11845 spin_lock_bh(&tp
->lock
);
11846 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11847 spin_unlock_bh(&tp
->lock
);
11855 return -EOPNOTSUPP
;
11858 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11860 struct tg3
*tp
= netdev_priv(dev
);
11862 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11866 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11868 struct tg3
*tp
= netdev_priv(dev
);
11869 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11870 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11872 if (!tg3_flag(tp
, 5705_PLUS
)) {
11873 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11874 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11875 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11876 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11879 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11880 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11881 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11882 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11883 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11884 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11885 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11886 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11887 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11888 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11891 /* No rx interrupts will be generated if both are zero */
11892 if ((ec
->rx_coalesce_usecs
== 0) &&
11893 (ec
->rx_max_coalesced_frames
== 0))
11896 /* No tx interrupts will be generated if both are zero */
11897 if ((ec
->tx_coalesce_usecs
== 0) &&
11898 (ec
->tx_max_coalesced_frames
== 0))
11901 /* Only copy relevant parameters, ignore all others. */
11902 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11903 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11904 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11905 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11906 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11907 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11908 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11909 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11910 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11912 if (netif_running(dev
)) {
11913 tg3_full_lock(tp
, 0);
11914 __tg3_set_coalesce(tp
, &tp
->coal
);
11915 tg3_full_unlock(tp
);
11920 static const struct ethtool_ops tg3_ethtool_ops
= {
11921 .get_settings
= tg3_get_settings
,
11922 .set_settings
= tg3_set_settings
,
11923 .get_drvinfo
= tg3_get_drvinfo
,
11924 .get_regs_len
= tg3_get_regs_len
,
11925 .get_regs
= tg3_get_regs
,
11926 .get_wol
= tg3_get_wol
,
11927 .set_wol
= tg3_set_wol
,
11928 .get_msglevel
= tg3_get_msglevel
,
11929 .set_msglevel
= tg3_set_msglevel
,
11930 .nway_reset
= tg3_nway_reset
,
11931 .get_link
= ethtool_op_get_link
,
11932 .get_eeprom_len
= tg3_get_eeprom_len
,
11933 .get_eeprom
= tg3_get_eeprom
,
11934 .set_eeprom
= tg3_set_eeprom
,
11935 .get_ringparam
= tg3_get_ringparam
,
11936 .set_ringparam
= tg3_set_ringparam
,
11937 .get_pauseparam
= tg3_get_pauseparam
,
11938 .set_pauseparam
= tg3_set_pauseparam
,
11939 .self_test
= tg3_self_test
,
11940 .get_strings
= tg3_get_strings
,
11941 .set_phys_id
= tg3_set_phys_id
,
11942 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11943 .get_coalesce
= tg3_get_coalesce
,
11944 .set_coalesce
= tg3_set_coalesce
,
11945 .get_sset_count
= tg3_get_sset_count
,
11948 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11950 u32 cursize
, val
, magic
;
11952 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11954 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11957 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11958 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11959 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11963 * Size the chip by reading offsets at increasing powers of two.
11964 * When we encounter our validation signature, we know the addressing
11965 * has wrapped around, and thus have our chip size.
11969 while (cursize
< tp
->nvram_size
) {
11970 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11979 tp
->nvram_size
= cursize
;
11982 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11986 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11989 /* Selfboot format */
11990 if (val
!= TG3_EEPROM_MAGIC
) {
11991 tg3_get_eeprom_size(tp
);
11995 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11997 /* This is confusing. We want to operate on the
11998 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11999 * call will read from NVRAM and byteswap the data
12000 * according to the byteswapping settings for all
12001 * other register accesses. This ensures the data we
12002 * want will always reside in the lower 16-bits.
12003 * However, the data in NVRAM is in LE format, which
12004 * means the data from the NVRAM read will always be
12005 * opposite the endianness of the CPU. The 16-bit
12006 * byteswap then brings the data to CPU endianness.
12008 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12012 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12015 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12019 nvcfg1
= tr32(NVRAM_CFG1
);
12020 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12021 tg3_flag_set(tp
, FLASH
);
12023 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12024 tw32(NVRAM_CFG1
, nvcfg1
);
12027 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12028 tg3_flag(tp
, 5780_CLASS
)) {
12029 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12030 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12031 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12032 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12033 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12035 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12036 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12037 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12039 case FLASH_VENDOR_ATMEL_EEPROM
:
12040 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12041 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12042 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12044 case FLASH_VENDOR_ST
:
12045 tp
->nvram_jedecnum
= JEDEC_ST
;
12046 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12047 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12049 case FLASH_VENDOR_SAIFUN
:
12050 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12051 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12053 case FLASH_VENDOR_SST_SMALL
:
12054 case FLASH_VENDOR_SST_LARGE
:
12055 tp
->nvram_jedecnum
= JEDEC_SST
;
12056 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12060 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12061 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12062 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12066 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12068 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12069 case FLASH_5752PAGE_SIZE_256
:
12070 tp
->nvram_pagesize
= 256;
12072 case FLASH_5752PAGE_SIZE_512
:
12073 tp
->nvram_pagesize
= 512;
12075 case FLASH_5752PAGE_SIZE_1K
:
12076 tp
->nvram_pagesize
= 1024;
12078 case FLASH_5752PAGE_SIZE_2K
:
12079 tp
->nvram_pagesize
= 2048;
12081 case FLASH_5752PAGE_SIZE_4K
:
12082 tp
->nvram_pagesize
= 4096;
12084 case FLASH_5752PAGE_SIZE_264
:
12085 tp
->nvram_pagesize
= 264;
12087 case FLASH_5752PAGE_SIZE_528
:
12088 tp
->nvram_pagesize
= 528;
12093 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12097 nvcfg1
= tr32(NVRAM_CFG1
);
12099 /* NVRAM protection for TPM */
12100 if (nvcfg1
& (1 << 27))
12101 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12103 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12104 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12105 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12106 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12107 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12109 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12110 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12111 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12112 tg3_flag_set(tp
, FLASH
);
12114 case FLASH_5752VENDOR_ST_M45PE10
:
12115 case FLASH_5752VENDOR_ST_M45PE20
:
12116 case FLASH_5752VENDOR_ST_M45PE40
:
12117 tp
->nvram_jedecnum
= JEDEC_ST
;
12118 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12119 tg3_flag_set(tp
, FLASH
);
12123 if (tg3_flag(tp
, FLASH
)) {
12124 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12126 /* For eeprom, set pagesize to maximum eeprom size */
12127 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12129 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12130 tw32(NVRAM_CFG1
, nvcfg1
);
12134 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12136 u32 nvcfg1
, protect
= 0;
12138 nvcfg1
= tr32(NVRAM_CFG1
);
12140 /* NVRAM protection for TPM */
12141 if (nvcfg1
& (1 << 27)) {
12142 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12146 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12148 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12149 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12150 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12151 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12152 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12153 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12154 tg3_flag_set(tp
, FLASH
);
12155 tp
->nvram_pagesize
= 264;
12156 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12157 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12158 tp
->nvram_size
= (protect
? 0x3e200 :
12159 TG3_NVRAM_SIZE_512KB
);
12160 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12161 tp
->nvram_size
= (protect
? 0x1f200 :
12162 TG3_NVRAM_SIZE_256KB
);
12164 tp
->nvram_size
= (protect
? 0x1f200 :
12165 TG3_NVRAM_SIZE_128KB
);
12167 case FLASH_5752VENDOR_ST_M45PE10
:
12168 case FLASH_5752VENDOR_ST_M45PE20
:
12169 case FLASH_5752VENDOR_ST_M45PE40
:
12170 tp
->nvram_jedecnum
= JEDEC_ST
;
12171 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12172 tg3_flag_set(tp
, FLASH
);
12173 tp
->nvram_pagesize
= 256;
12174 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12175 tp
->nvram_size
= (protect
?
12176 TG3_NVRAM_SIZE_64KB
:
12177 TG3_NVRAM_SIZE_128KB
);
12178 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12179 tp
->nvram_size
= (protect
?
12180 TG3_NVRAM_SIZE_64KB
:
12181 TG3_NVRAM_SIZE_256KB
);
12183 tp
->nvram_size
= (protect
?
12184 TG3_NVRAM_SIZE_128KB
:
12185 TG3_NVRAM_SIZE_512KB
);
12190 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12194 nvcfg1
= tr32(NVRAM_CFG1
);
12196 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12197 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12198 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12199 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12200 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12201 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12202 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12203 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12205 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12206 tw32(NVRAM_CFG1
, nvcfg1
);
12208 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12209 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12210 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12211 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12212 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12213 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12214 tg3_flag_set(tp
, FLASH
);
12215 tp
->nvram_pagesize
= 264;
12217 case FLASH_5752VENDOR_ST_M45PE10
:
12218 case FLASH_5752VENDOR_ST_M45PE20
:
12219 case FLASH_5752VENDOR_ST_M45PE40
:
12220 tp
->nvram_jedecnum
= JEDEC_ST
;
12221 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12222 tg3_flag_set(tp
, FLASH
);
12223 tp
->nvram_pagesize
= 256;
12228 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12230 u32 nvcfg1
, protect
= 0;
12232 nvcfg1
= tr32(NVRAM_CFG1
);
12234 /* NVRAM protection for TPM */
12235 if (nvcfg1
& (1 << 27)) {
12236 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12240 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12242 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12243 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12244 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12245 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12246 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12247 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12248 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12249 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12250 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12251 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12252 tg3_flag_set(tp
, FLASH
);
12253 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12254 tp
->nvram_pagesize
= 256;
12256 case FLASH_5761VENDOR_ST_A_M45PE20
:
12257 case FLASH_5761VENDOR_ST_A_M45PE40
:
12258 case FLASH_5761VENDOR_ST_A_M45PE80
:
12259 case FLASH_5761VENDOR_ST_A_M45PE16
:
12260 case FLASH_5761VENDOR_ST_M_M45PE20
:
12261 case FLASH_5761VENDOR_ST_M_M45PE40
:
12262 case FLASH_5761VENDOR_ST_M_M45PE80
:
12263 case FLASH_5761VENDOR_ST_M_M45PE16
:
12264 tp
->nvram_jedecnum
= JEDEC_ST
;
12265 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12266 tg3_flag_set(tp
, FLASH
);
12267 tp
->nvram_pagesize
= 256;
12272 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12275 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12276 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12277 case FLASH_5761VENDOR_ST_A_M45PE16
:
12278 case FLASH_5761VENDOR_ST_M_M45PE16
:
12279 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12281 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12282 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12283 case FLASH_5761VENDOR_ST_A_M45PE80
:
12284 case FLASH_5761VENDOR_ST_M_M45PE80
:
12285 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12287 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12288 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12289 case FLASH_5761VENDOR_ST_A_M45PE40
:
12290 case FLASH_5761VENDOR_ST_M_M45PE40
:
12291 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12293 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12294 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12295 case FLASH_5761VENDOR_ST_A_M45PE20
:
12296 case FLASH_5761VENDOR_ST_M_M45PE20
:
12297 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12303 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12305 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12306 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12307 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12310 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12314 nvcfg1
= tr32(NVRAM_CFG1
);
12316 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12317 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12318 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12319 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12320 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12321 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12323 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12324 tw32(NVRAM_CFG1
, nvcfg1
);
12326 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12327 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12328 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12329 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12330 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12331 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12332 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12333 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12334 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12335 tg3_flag_set(tp
, FLASH
);
12337 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12338 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12339 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12340 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12341 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12343 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12344 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12345 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12347 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12348 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12349 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12353 case FLASH_5752VENDOR_ST_M45PE10
:
12354 case FLASH_5752VENDOR_ST_M45PE20
:
12355 case FLASH_5752VENDOR_ST_M45PE40
:
12356 tp
->nvram_jedecnum
= JEDEC_ST
;
12357 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12358 tg3_flag_set(tp
, FLASH
);
12360 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12361 case FLASH_5752VENDOR_ST_M45PE10
:
12362 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12364 case FLASH_5752VENDOR_ST_M45PE20
:
12365 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12367 case FLASH_5752VENDOR_ST_M45PE40
:
12368 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12373 tg3_flag_set(tp
, NO_NVRAM
);
12377 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12378 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12379 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12383 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12387 nvcfg1
= tr32(NVRAM_CFG1
);
12389 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12390 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12391 case FLASH_5717VENDOR_MICRO_EEPROM
:
12392 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12393 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12394 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12396 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12397 tw32(NVRAM_CFG1
, nvcfg1
);
12399 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12400 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12401 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12402 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12403 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12404 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12405 case FLASH_5717VENDOR_ATMEL_45USPT
:
12406 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12407 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12408 tg3_flag_set(tp
, FLASH
);
12410 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12411 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12412 /* Detect size with tg3_nvram_get_size() */
12414 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12415 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12416 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12419 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12423 case FLASH_5717VENDOR_ST_M_M25PE10
:
12424 case FLASH_5717VENDOR_ST_A_M25PE10
:
12425 case FLASH_5717VENDOR_ST_M_M45PE10
:
12426 case FLASH_5717VENDOR_ST_A_M45PE10
:
12427 case FLASH_5717VENDOR_ST_M_M25PE20
:
12428 case FLASH_5717VENDOR_ST_A_M25PE20
:
12429 case FLASH_5717VENDOR_ST_M_M45PE20
:
12430 case FLASH_5717VENDOR_ST_A_M45PE20
:
12431 case FLASH_5717VENDOR_ST_25USPT
:
12432 case FLASH_5717VENDOR_ST_45USPT
:
12433 tp
->nvram_jedecnum
= JEDEC_ST
;
12434 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12435 tg3_flag_set(tp
, FLASH
);
12437 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12438 case FLASH_5717VENDOR_ST_M_M25PE20
:
12439 case FLASH_5717VENDOR_ST_M_M45PE20
:
12440 /* Detect size with tg3_nvram_get_size() */
12442 case FLASH_5717VENDOR_ST_A_M25PE20
:
12443 case FLASH_5717VENDOR_ST_A_M45PE20
:
12444 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12447 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12452 tg3_flag_set(tp
, NO_NVRAM
);
12456 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12457 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12458 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12461 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12463 u32 nvcfg1
, nvmpinstrp
;
12465 nvcfg1
= tr32(NVRAM_CFG1
);
12466 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12468 switch (nvmpinstrp
) {
12469 case FLASH_5720_EEPROM_HD
:
12470 case FLASH_5720_EEPROM_LD
:
12471 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12472 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12474 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12475 tw32(NVRAM_CFG1
, nvcfg1
);
12476 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12477 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12479 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12481 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12482 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12483 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12484 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12485 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12486 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12487 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12488 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12489 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12490 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12491 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12492 case FLASH_5720VENDOR_ATMEL_45USPT
:
12493 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12494 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12495 tg3_flag_set(tp
, FLASH
);
12497 switch (nvmpinstrp
) {
12498 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12499 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12500 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12501 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12503 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12504 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12505 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12506 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12508 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12509 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12510 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12513 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12517 case FLASH_5720VENDOR_M_ST_M25PE10
:
12518 case FLASH_5720VENDOR_M_ST_M45PE10
:
12519 case FLASH_5720VENDOR_A_ST_M25PE10
:
12520 case FLASH_5720VENDOR_A_ST_M45PE10
:
12521 case FLASH_5720VENDOR_M_ST_M25PE20
:
12522 case FLASH_5720VENDOR_M_ST_M45PE20
:
12523 case FLASH_5720VENDOR_A_ST_M25PE20
:
12524 case FLASH_5720VENDOR_A_ST_M45PE20
:
12525 case FLASH_5720VENDOR_M_ST_M25PE40
:
12526 case FLASH_5720VENDOR_M_ST_M45PE40
:
12527 case FLASH_5720VENDOR_A_ST_M25PE40
:
12528 case FLASH_5720VENDOR_A_ST_M45PE40
:
12529 case FLASH_5720VENDOR_M_ST_M25PE80
:
12530 case FLASH_5720VENDOR_M_ST_M45PE80
:
12531 case FLASH_5720VENDOR_A_ST_M25PE80
:
12532 case FLASH_5720VENDOR_A_ST_M45PE80
:
12533 case FLASH_5720VENDOR_ST_25USPT
:
12534 case FLASH_5720VENDOR_ST_45USPT
:
12535 tp
->nvram_jedecnum
= JEDEC_ST
;
12536 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12537 tg3_flag_set(tp
, FLASH
);
12539 switch (nvmpinstrp
) {
12540 case FLASH_5720VENDOR_M_ST_M25PE20
:
12541 case FLASH_5720VENDOR_M_ST_M45PE20
:
12542 case FLASH_5720VENDOR_A_ST_M25PE20
:
12543 case FLASH_5720VENDOR_A_ST_M45PE20
:
12544 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12546 case FLASH_5720VENDOR_M_ST_M25PE40
:
12547 case FLASH_5720VENDOR_M_ST_M45PE40
:
12548 case FLASH_5720VENDOR_A_ST_M25PE40
:
12549 case FLASH_5720VENDOR_A_ST_M45PE40
:
12550 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12552 case FLASH_5720VENDOR_M_ST_M25PE80
:
12553 case FLASH_5720VENDOR_M_ST_M45PE80
:
12554 case FLASH_5720VENDOR_A_ST_M25PE80
:
12555 case FLASH_5720VENDOR_A_ST_M45PE80
:
12556 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12559 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12564 tg3_flag_set(tp
, NO_NVRAM
);
12568 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12569 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12570 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12573 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12574 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12576 tw32_f(GRC_EEPROM_ADDR
,
12577 (EEPROM_ADDR_FSM_RESET
|
12578 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12579 EEPROM_ADDR_CLKPERD_SHIFT
)));
12583 /* Enable seeprom accesses. */
12584 tw32_f(GRC_LOCAL_CTRL
,
12585 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12588 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12589 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12590 tg3_flag_set(tp
, NVRAM
);
12592 if (tg3_nvram_lock(tp
)) {
12593 netdev_warn(tp
->dev
,
12594 "Cannot get nvram lock, %s failed\n",
12598 tg3_enable_nvram_access(tp
);
12600 tp
->nvram_size
= 0;
12602 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12603 tg3_get_5752_nvram_info(tp
);
12604 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12605 tg3_get_5755_nvram_info(tp
);
12606 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12607 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12608 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12609 tg3_get_5787_nvram_info(tp
);
12610 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12611 tg3_get_5761_nvram_info(tp
);
12612 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12613 tg3_get_5906_nvram_info(tp
);
12614 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12615 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12616 tg3_get_57780_nvram_info(tp
);
12617 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12618 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12619 tg3_get_5717_nvram_info(tp
);
12620 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12621 tg3_get_5720_nvram_info(tp
);
12623 tg3_get_nvram_info(tp
);
12625 if (tp
->nvram_size
== 0)
12626 tg3_get_nvram_size(tp
);
12628 tg3_disable_nvram_access(tp
);
12629 tg3_nvram_unlock(tp
);
12632 tg3_flag_clear(tp
, NVRAM
);
12633 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12635 tg3_get_eeprom_size(tp
);
12639 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12640 u32 offset
, u32 len
, u8
*buf
)
12645 for (i
= 0; i
< len
; i
+= 4) {
12651 memcpy(&data
, buf
+ i
, 4);
12654 * The SEEPROM interface expects the data to always be opposite
12655 * the native endian format. We accomplish this by reversing
12656 * all the operations that would have been performed on the
12657 * data from a call to tg3_nvram_read_be32().
12659 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12661 val
= tr32(GRC_EEPROM_ADDR
);
12662 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12664 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12666 tw32(GRC_EEPROM_ADDR
, val
|
12667 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12668 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12669 EEPROM_ADDR_START
|
12670 EEPROM_ADDR_WRITE
);
12672 for (j
= 0; j
< 1000; j
++) {
12673 val
= tr32(GRC_EEPROM_ADDR
);
12675 if (val
& EEPROM_ADDR_COMPLETE
)
12679 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12688 /* offset and length are dword aligned */
12689 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12693 u32 pagesize
= tp
->nvram_pagesize
;
12694 u32 pagemask
= pagesize
- 1;
12698 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12704 u32 phy_addr
, page_off
, size
;
12706 phy_addr
= offset
& ~pagemask
;
12708 for (j
= 0; j
< pagesize
; j
+= 4) {
12709 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12710 (__be32
*) (tmp
+ j
));
12717 page_off
= offset
& pagemask
;
12724 memcpy(tmp
+ page_off
, buf
, size
);
12726 offset
= offset
+ (pagesize
- page_off
);
12728 tg3_enable_nvram_access(tp
);
12731 * Before we can erase the flash page, we need
12732 * to issue a special "write enable" command.
12734 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12736 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12739 /* Erase the target page */
12740 tw32(NVRAM_ADDR
, phy_addr
);
12742 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12743 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12745 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12748 /* Issue another write enable to start the write. */
12749 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12751 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12754 for (j
= 0; j
< pagesize
; j
+= 4) {
12757 data
= *((__be32
*) (tmp
+ j
));
12759 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12761 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12763 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12767 nvram_cmd
|= NVRAM_CMD_FIRST
;
12768 else if (j
== (pagesize
- 4))
12769 nvram_cmd
|= NVRAM_CMD_LAST
;
12771 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12778 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12779 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12786 /* offset and length are dword aligned */
12787 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12792 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12793 u32 page_off
, phy_addr
, nvram_cmd
;
12796 memcpy(&data
, buf
+ i
, 4);
12797 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12799 page_off
= offset
% tp
->nvram_pagesize
;
12801 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12803 tw32(NVRAM_ADDR
, phy_addr
);
12805 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12807 if (page_off
== 0 || i
== 0)
12808 nvram_cmd
|= NVRAM_CMD_FIRST
;
12809 if (page_off
== (tp
->nvram_pagesize
- 4))
12810 nvram_cmd
|= NVRAM_CMD_LAST
;
12812 if (i
== (len
- 4))
12813 nvram_cmd
|= NVRAM_CMD_LAST
;
12815 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12816 !tg3_flag(tp
, 5755_PLUS
) &&
12817 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12818 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12820 if ((ret
= tg3_nvram_exec_cmd(tp
,
12821 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12826 if (!tg3_flag(tp
, FLASH
)) {
12827 /* We always do complete word writes to eeprom. */
12828 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12831 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12837 /* offset and length are dword aligned */
12838 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12842 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12843 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12844 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12848 if (!tg3_flag(tp
, NVRAM
)) {
12849 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12853 ret
= tg3_nvram_lock(tp
);
12857 tg3_enable_nvram_access(tp
);
12858 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12859 tw32(NVRAM_WRITE1
, 0x406);
12861 grc_mode
= tr32(GRC_MODE
);
12862 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12864 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12865 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12868 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12872 grc_mode
= tr32(GRC_MODE
);
12873 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12875 tg3_disable_nvram_access(tp
);
12876 tg3_nvram_unlock(tp
);
12879 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12880 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12887 struct subsys_tbl_ent
{
12888 u16 subsys_vendor
, subsys_devid
;
12892 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12893 /* Broadcom boards. */
12894 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12895 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12896 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12897 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12898 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12899 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12900 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12901 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12908 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12909 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12910 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12911 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12912 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12913 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12914 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12915 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12918 { TG3PCI_SUBVENDOR_ID_3COM
,
12919 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12920 { TG3PCI_SUBVENDOR_ID_3COM
,
12921 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12922 { TG3PCI_SUBVENDOR_ID_3COM
,
12923 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12924 { TG3PCI_SUBVENDOR_ID_3COM
,
12925 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12926 { TG3PCI_SUBVENDOR_ID_3COM
,
12927 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12930 { TG3PCI_SUBVENDOR_ID_DELL
,
12931 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12932 { TG3PCI_SUBVENDOR_ID_DELL
,
12933 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12934 { TG3PCI_SUBVENDOR_ID_DELL
,
12935 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12936 { TG3PCI_SUBVENDOR_ID_DELL
,
12937 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12939 /* Compaq boards. */
12940 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12941 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12942 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12943 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12944 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12945 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12946 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12947 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12948 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12949 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12952 { TG3PCI_SUBVENDOR_ID_IBM
,
12953 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12956 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12960 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12961 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12962 tp
->pdev
->subsystem_vendor
) &&
12963 (subsys_id_to_phy_id
[i
].subsys_devid
==
12964 tp
->pdev
->subsystem_device
))
12965 return &subsys_id_to_phy_id
[i
];
12970 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12974 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12975 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12977 /* Assume an onboard device and WOL capable by default. */
12978 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12979 tg3_flag_set(tp
, WOL_CAP
);
12981 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12982 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12983 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12984 tg3_flag_set(tp
, IS_NIC
);
12986 val
= tr32(VCPU_CFGSHDW
);
12987 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12988 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12989 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12990 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12991 tg3_flag_set(tp
, WOL_ENABLE
);
12992 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12997 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12998 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12999 u32 nic_cfg
, led_cfg
;
13000 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13001 int eeprom_phy_serdes
= 0;
13003 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13004 tp
->nic_sram_data_cfg
= nic_cfg
;
13006 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13007 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13008 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13009 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13010 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13011 (ver
> 0) && (ver
< 0x100))
13012 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13014 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13015 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13017 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13018 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13019 eeprom_phy_serdes
= 1;
13021 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13022 if (nic_phy_id
!= 0) {
13023 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13024 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13026 eeprom_phy_id
= (id1
>> 16) << 10;
13027 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13028 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13032 tp
->phy_id
= eeprom_phy_id
;
13033 if (eeprom_phy_serdes
) {
13034 if (!tg3_flag(tp
, 5705_PLUS
))
13035 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13037 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13040 if (tg3_flag(tp
, 5750_PLUS
))
13041 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13042 SHASTA_EXT_LED_MODE_MASK
);
13044 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13048 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13049 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13052 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13053 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13056 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13057 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13059 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13060 * read on some older 5700/5701 bootcode.
13062 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13064 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13066 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13070 case SHASTA_EXT_LED_SHARED
:
13071 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13072 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13073 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13074 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13075 LED_CTRL_MODE_PHY_2
);
13078 case SHASTA_EXT_LED_MAC
:
13079 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13082 case SHASTA_EXT_LED_COMBO
:
13083 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13084 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13085 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13086 LED_CTRL_MODE_PHY_2
);
13091 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13092 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13093 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13094 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13096 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13097 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13099 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13100 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13101 if ((tp
->pdev
->subsystem_vendor
==
13102 PCI_VENDOR_ID_ARIMA
) &&
13103 (tp
->pdev
->subsystem_device
== 0x205a ||
13104 tp
->pdev
->subsystem_device
== 0x2063))
13105 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13107 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13108 tg3_flag_set(tp
, IS_NIC
);
13111 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13112 tg3_flag_set(tp
, ENABLE_ASF
);
13113 if (tg3_flag(tp
, 5750_PLUS
))
13114 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13117 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13118 tg3_flag(tp
, 5750_PLUS
))
13119 tg3_flag_set(tp
, ENABLE_APE
);
13121 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13122 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13123 tg3_flag_clear(tp
, WOL_CAP
);
13125 if (tg3_flag(tp
, WOL_CAP
) &&
13126 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13127 tg3_flag_set(tp
, WOL_ENABLE
);
13128 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13131 if (cfg2
& (1 << 17))
13132 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13134 /* serdes signal pre-emphasis in register 0x590 set by */
13135 /* bootcode if bit 18 is set */
13136 if (cfg2
& (1 << 18))
13137 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13139 if ((tg3_flag(tp
, 57765_PLUS
) ||
13140 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13141 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13142 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13143 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13145 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13146 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13147 !tg3_flag(tp
, 57765_PLUS
)) {
13150 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13151 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13152 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13155 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13156 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13157 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13158 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13159 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13160 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13163 if (tg3_flag(tp
, WOL_CAP
))
13164 device_set_wakeup_enable(&tp
->pdev
->dev
,
13165 tg3_flag(tp
, WOL_ENABLE
));
13167 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13170 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13175 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13176 tw32(OTP_CTRL
, cmd
);
13178 /* Wait for up to 1 ms for command to execute. */
13179 for (i
= 0; i
< 100; i
++) {
13180 val
= tr32(OTP_STATUS
);
13181 if (val
& OTP_STATUS_CMD_DONE
)
13186 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13189 /* Read the gphy configuration from the OTP region of the chip. The gphy
13190 * configuration is a 32-bit value that straddles the alignment boundary.
13191 * We do two 32-bit reads and then shift and merge the results.
13193 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13195 u32 bhalf_otp
, thalf_otp
;
13197 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13199 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13202 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13204 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13207 thalf_otp
= tr32(OTP_READ_DATA
);
13209 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13211 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13214 bhalf_otp
= tr32(OTP_READ_DATA
);
13216 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13219 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13221 u32 adv
= ADVERTISED_Autoneg
|
13224 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13225 adv
|= ADVERTISED_1000baseT_Half
|
13226 ADVERTISED_1000baseT_Full
;
13228 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13229 adv
|= ADVERTISED_100baseT_Half
|
13230 ADVERTISED_100baseT_Full
|
13231 ADVERTISED_10baseT_Half
|
13232 ADVERTISED_10baseT_Full
|
13235 adv
|= ADVERTISED_FIBRE
;
13237 tp
->link_config
.advertising
= adv
;
13238 tp
->link_config
.speed
= SPEED_INVALID
;
13239 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13240 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13241 tp
->link_config
.active_speed
= SPEED_INVALID
;
13242 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13243 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13244 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13245 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13248 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13250 u32 hw_phy_id_1
, hw_phy_id_2
;
13251 u32 hw_phy_id
, hw_phy_id_masked
;
13254 /* flow control autonegotiation is default behavior */
13255 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13256 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13258 if (tg3_flag(tp
, USE_PHYLIB
))
13259 return tg3_phy_init(tp
);
13261 /* Reading the PHY ID register can conflict with ASF
13262 * firmware access to the PHY hardware.
13265 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13266 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13268 /* Now read the physical PHY_ID from the chip and verify
13269 * that it is sane. If it doesn't look good, we fall back
13270 * to either the hard-coded table based PHY_ID and failing
13271 * that the value found in the eeprom area.
13273 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13274 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13276 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13277 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13278 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13280 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13283 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13284 tp
->phy_id
= hw_phy_id
;
13285 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13286 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13288 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13290 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13291 /* Do nothing, phy ID already set up in
13292 * tg3_get_eeprom_hw_cfg().
13295 struct subsys_tbl_ent
*p
;
13297 /* No eeprom signature? Try the hardcoded
13298 * subsys device table.
13300 p
= tg3_lookup_by_subsys(tp
);
13304 tp
->phy_id
= p
->phy_id
;
13306 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13307 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13311 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13312 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13313 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13314 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13315 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13316 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13317 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13318 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13320 tg3_phy_init_link_config(tp
);
13322 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13323 !tg3_flag(tp
, ENABLE_APE
) &&
13324 !tg3_flag(tp
, ENABLE_ASF
)) {
13327 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13328 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13329 (bmsr
& BMSR_LSTATUS
))
13330 goto skip_phy_reset
;
13332 err
= tg3_phy_reset(tp
);
13336 tg3_phy_set_wirespeed(tp
);
13338 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13339 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13340 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13341 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13342 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13343 tp
->link_config
.flowctrl
);
13345 tg3_writephy(tp
, MII_BMCR
,
13346 BMCR_ANENABLE
| BMCR_ANRESTART
);
13351 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13352 err
= tg3_init_5401phy_dsp(tp
);
13356 err
= tg3_init_5401phy_dsp(tp
);
13362 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13365 unsigned int block_end
, rosize
, len
;
13369 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13373 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13375 goto out_not_found
;
13377 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13378 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13379 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13381 if (block_end
> vpdlen
)
13382 goto out_not_found
;
13384 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13385 PCI_VPD_RO_KEYWORD_MFR_ID
);
13387 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13389 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13390 if (j
+ len
> block_end
|| len
!= 4 ||
13391 memcmp(&vpd_data
[j
], "1028", 4))
13394 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13395 PCI_VPD_RO_KEYWORD_VENDOR0
);
13399 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13401 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13402 if (j
+ len
> block_end
)
13405 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13406 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13410 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13411 PCI_VPD_RO_KEYWORD_PARTNO
);
13413 goto out_not_found
;
13415 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13417 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13418 if (len
> TG3_BPN_SIZE
||
13419 (len
+ i
) > vpdlen
)
13420 goto out_not_found
;
13422 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13426 if (tp
->board_part_number
[0])
13430 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13431 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13432 strcpy(tp
->board_part_number
, "BCM5717");
13433 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13434 strcpy(tp
->board_part_number
, "BCM5718");
13437 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13438 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13439 strcpy(tp
->board_part_number
, "BCM57780");
13440 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13441 strcpy(tp
->board_part_number
, "BCM57760");
13442 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13443 strcpy(tp
->board_part_number
, "BCM57790");
13444 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13445 strcpy(tp
->board_part_number
, "BCM57788");
13448 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13449 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13450 strcpy(tp
->board_part_number
, "BCM57761");
13451 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13452 strcpy(tp
->board_part_number
, "BCM57765");
13453 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13454 strcpy(tp
->board_part_number
, "BCM57781");
13455 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13456 strcpy(tp
->board_part_number
, "BCM57785");
13457 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13458 strcpy(tp
->board_part_number
, "BCM57791");
13459 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13460 strcpy(tp
->board_part_number
, "BCM57795");
13463 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13464 strcpy(tp
->board_part_number
, "BCM95906");
13467 strcpy(tp
->board_part_number
, "none");
13471 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13475 if (tg3_nvram_read(tp
, offset
, &val
) ||
13476 (val
& 0xfc000000) != 0x0c000000 ||
13477 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13484 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13486 u32 val
, offset
, start
, ver_offset
;
13488 bool newver
= false;
13490 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13491 tg3_nvram_read(tp
, 0x4, &start
))
13494 offset
= tg3_nvram_logical_addr(tp
, offset
);
13496 if (tg3_nvram_read(tp
, offset
, &val
))
13499 if ((val
& 0xfc000000) == 0x0c000000) {
13500 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13507 dst_off
= strlen(tp
->fw_ver
);
13510 if (TG3_VER_SIZE
- dst_off
< 16 ||
13511 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13514 offset
= offset
+ ver_offset
- start
;
13515 for (i
= 0; i
< 16; i
+= 4) {
13517 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13520 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13525 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13528 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13529 TG3_NVM_BCVER_MAJSFT
;
13530 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13531 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13532 "v%d.%02d", major
, minor
);
13536 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13538 u32 val
, major
, minor
;
13540 /* Use native endian representation */
13541 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13544 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13545 TG3_NVM_HWSB_CFG1_MAJSFT
;
13546 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13547 TG3_NVM_HWSB_CFG1_MINSFT
;
13549 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13552 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13554 u32 offset
, major
, minor
, build
;
13556 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13558 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13561 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13562 case TG3_EEPROM_SB_REVISION_0
:
13563 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13565 case TG3_EEPROM_SB_REVISION_2
:
13566 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13568 case TG3_EEPROM_SB_REVISION_3
:
13569 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13571 case TG3_EEPROM_SB_REVISION_4
:
13572 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13574 case TG3_EEPROM_SB_REVISION_5
:
13575 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13577 case TG3_EEPROM_SB_REVISION_6
:
13578 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13584 if (tg3_nvram_read(tp
, offset
, &val
))
13587 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13588 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13589 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13590 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13591 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13593 if (minor
> 99 || build
> 26)
13596 offset
= strlen(tp
->fw_ver
);
13597 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13598 " v%d.%02d", major
, minor
);
13601 offset
= strlen(tp
->fw_ver
);
13602 if (offset
< TG3_VER_SIZE
- 1)
13603 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13607 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13609 u32 val
, offset
, start
;
13612 for (offset
= TG3_NVM_DIR_START
;
13613 offset
< TG3_NVM_DIR_END
;
13614 offset
+= TG3_NVM_DIRENT_SIZE
) {
13615 if (tg3_nvram_read(tp
, offset
, &val
))
13618 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13622 if (offset
== TG3_NVM_DIR_END
)
13625 if (!tg3_flag(tp
, 5705_PLUS
))
13626 start
= 0x08000000;
13627 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13630 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13631 !tg3_fw_img_is_valid(tp
, offset
) ||
13632 tg3_nvram_read(tp
, offset
+ 8, &val
))
13635 offset
+= val
- start
;
13637 vlen
= strlen(tp
->fw_ver
);
13639 tp
->fw_ver
[vlen
++] = ',';
13640 tp
->fw_ver
[vlen
++] = ' ';
13642 for (i
= 0; i
< 4; i
++) {
13644 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13647 offset
+= sizeof(v
);
13649 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13650 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13654 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13659 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13665 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13668 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13669 if (apedata
!= APE_SEG_SIG_MAGIC
)
13672 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13673 if (!(apedata
& APE_FW_STATUS_READY
))
13676 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13678 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13679 tg3_flag_set(tp
, APE_HAS_NCSI
);
13685 vlen
= strlen(tp
->fw_ver
);
13687 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13689 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13690 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13691 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13692 (apedata
& APE_FW_VERSION_BLDMSK
));
13695 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13698 bool vpd_vers
= false;
13700 if (tp
->fw_ver
[0] != 0)
13703 if (tg3_flag(tp
, NO_NVRAM
)) {
13704 strcat(tp
->fw_ver
, "sb");
13708 if (tg3_nvram_read(tp
, 0, &val
))
13711 if (val
== TG3_EEPROM_MAGIC
)
13712 tg3_read_bc_ver(tp
);
13713 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13714 tg3_read_sb_ver(tp
, val
);
13715 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13716 tg3_read_hwsb_ver(tp
);
13723 if (tg3_flag(tp
, ENABLE_APE
)) {
13724 if (tg3_flag(tp
, ENABLE_ASF
))
13725 tg3_read_dash_ver(tp
);
13726 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13727 tg3_read_mgmtfw_ver(tp
);
13731 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13734 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13736 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13738 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13739 return TG3_RX_RET_MAX_SIZE_5717
;
13740 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13741 return TG3_RX_RET_MAX_SIZE_5700
;
13743 return TG3_RX_RET_MAX_SIZE_5705
;
13746 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13747 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13748 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13749 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13753 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13756 u32 pci_state_reg
, grc_misc_cfg
;
13761 /* Force memory write invalidate off. If we leave it on,
13762 * then on 5700_BX chips we have to enable a workaround.
13763 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13764 * to match the cacheline size. The Broadcom driver have this
13765 * workaround but turns MWI off all the times so never uses
13766 * it. This seems to suggest that the workaround is insufficient.
13768 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13769 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13770 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13772 /* Important! -- Make sure register accesses are byteswapped
13773 * correctly. Also, for those chips that require it, make
13774 * sure that indirect register accesses are enabled before
13775 * the first operation.
13777 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13779 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13780 MISC_HOST_CTRL_CHIPREV
);
13781 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13782 tp
->misc_host_ctrl
);
13784 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13785 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13786 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13787 u32 prod_id_asic_rev
;
13789 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13790 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13791 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13792 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13793 pci_read_config_dword(tp
->pdev
,
13794 TG3PCI_GEN2_PRODID_ASICREV
,
13795 &prod_id_asic_rev
);
13796 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13797 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13798 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13799 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13800 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13801 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13802 pci_read_config_dword(tp
->pdev
,
13803 TG3PCI_GEN15_PRODID_ASICREV
,
13804 &prod_id_asic_rev
);
13806 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13807 &prod_id_asic_rev
);
13809 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13812 /* Wrong chip ID in 5752 A0. This code can be removed later
13813 * as A0 is not in production.
13815 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13816 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13818 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13819 * we need to disable memory and use config. cycles
13820 * only to access all registers. The 5702/03 chips
13821 * can mistakenly decode the special cycles from the
13822 * ICH chipsets as memory write cycles, causing corruption
13823 * of register and memory space. Only certain ICH bridges
13824 * will drive special cycles with non-zero data during the
13825 * address phase which can fall within the 5703's address
13826 * range. This is not an ICH bug as the PCI spec allows
13827 * non-zero address during special cycles. However, only
13828 * these ICH bridges are known to drive non-zero addresses
13829 * during special cycles.
13831 * Since special cycles do not cross PCI bridges, we only
13832 * enable this workaround if the 5703 is on the secondary
13833 * bus of these ICH bridges.
13835 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13836 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13837 static struct tg3_dev_id
{
13841 } ich_chipsets
[] = {
13842 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13844 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13846 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13848 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13852 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13853 struct pci_dev
*bridge
= NULL
;
13855 while (pci_id
->vendor
!= 0) {
13856 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13862 if (pci_id
->rev
!= PCI_ANY_ID
) {
13863 if (bridge
->revision
> pci_id
->rev
)
13866 if (bridge
->subordinate
&&
13867 (bridge
->subordinate
->number
==
13868 tp
->pdev
->bus
->number
)) {
13869 tg3_flag_set(tp
, ICH_WORKAROUND
);
13870 pci_dev_put(bridge
);
13876 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13877 static struct tg3_dev_id
{
13880 } bridge_chipsets
[] = {
13881 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13882 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13885 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13886 struct pci_dev
*bridge
= NULL
;
13888 while (pci_id
->vendor
!= 0) {
13889 bridge
= pci_get_device(pci_id
->vendor
,
13896 if (bridge
->subordinate
&&
13897 (bridge
->subordinate
->number
<=
13898 tp
->pdev
->bus
->number
) &&
13899 (bridge
->subordinate
->subordinate
>=
13900 tp
->pdev
->bus
->number
)) {
13901 tg3_flag_set(tp
, 5701_DMA_BUG
);
13902 pci_dev_put(bridge
);
13908 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13909 * DMA addresses > 40-bit. This bridge may have other additional
13910 * 57xx devices behind it in some 4-port NIC designs for example.
13911 * Any tg3 device found behind the bridge will also need the 40-bit
13914 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13915 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13916 tg3_flag_set(tp
, 5780_CLASS
);
13917 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13918 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13920 struct pci_dev
*bridge
= NULL
;
13923 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13924 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13926 if (bridge
&& bridge
->subordinate
&&
13927 (bridge
->subordinate
->number
<=
13928 tp
->pdev
->bus
->number
) &&
13929 (bridge
->subordinate
->subordinate
>=
13930 tp
->pdev
->bus
->number
)) {
13931 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13932 pci_dev_put(bridge
);
13938 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13939 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
13940 tp
->pdev_peer
= tg3_find_peer(tp
);
13942 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13943 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13944 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13945 tg3_flag_set(tp
, 5717_PLUS
);
13947 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13948 tg3_flag(tp
, 5717_PLUS
))
13949 tg3_flag_set(tp
, 57765_PLUS
);
13951 /* Intentionally exclude ASIC_REV_5906 */
13952 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13953 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13954 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13955 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13956 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13957 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13958 tg3_flag(tp
, 57765_PLUS
))
13959 tg3_flag_set(tp
, 5755_PLUS
);
13961 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13962 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13963 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13964 tg3_flag(tp
, 5755_PLUS
) ||
13965 tg3_flag(tp
, 5780_CLASS
))
13966 tg3_flag_set(tp
, 5750_PLUS
);
13968 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
13969 tg3_flag(tp
, 5750_PLUS
))
13970 tg3_flag_set(tp
, 5705_PLUS
);
13972 /* Determine TSO capabilities */
13973 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
13974 ; /* Do nothing. HW bug. */
13975 else if (tg3_flag(tp
, 57765_PLUS
))
13976 tg3_flag_set(tp
, HW_TSO_3
);
13977 else if (tg3_flag(tp
, 5755_PLUS
) ||
13978 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13979 tg3_flag_set(tp
, HW_TSO_2
);
13980 else if (tg3_flag(tp
, 5750_PLUS
)) {
13981 tg3_flag_set(tp
, HW_TSO_1
);
13982 tg3_flag_set(tp
, TSO_BUG
);
13983 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13984 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13985 tg3_flag_clear(tp
, TSO_BUG
);
13986 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13987 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13988 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13989 tg3_flag_set(tp
, TSO_BUG
);
13990 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13991 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13993 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13996 /* Selectively allow TSO based on operating conditions */
13997 if (tg3_flag(tp
, HW_TSO_1
) ||
13998 tg3_flag(tp
, HW_TSO_2
) ||
13999 tg3_flag(tp
, HW_TSO_3
) ||
14000 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
14001 tg3_flag_set(tp
, TSO_CAPABLE
);
14003 tg3_flag_clear(tp
, TSO_CAPABLE
);
14004 tg3_flag_clear(tp
, TSO_BUG
);
14005 tp
->fw_needed
= NULL
;
14008 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14009 tp
->fw_needed
= FIRMWARE_TG3
;
14013 if (tg3_flag(tp
, 5750_PLUS
)) {
14014 tg3_flag_set(tp
, SUPPORT_MSI
);
14015 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14016 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14017 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14018 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14019 tp
->pdev_peer
== tp
->pdev
))
14020 tg3_flag_clear(tp
, SUPPORT_MSI
);
14022 if (tg3_flag(tp
, 5755_PLUS
) ||
14023 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14024 tg3_flag_set(tp
, 1SHOT_MSI
);
14027 if (tg3_flag(tp
, 57765_PLUS
)) {
14028 tg3_flag_set(tp
, SUPPORT_MSIX
);
14029 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14033 if (tg3_flag(tp
, 5755_PLUS
))
14034 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14037 tg3_flag_set(tp
, 4K_FIFO_LIMIT
);
14039 if (tg3_flag(tp
, 5717_PLUS
))
14040 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14042 if (tg3_flag(tp
, 57765_PLUS
) &&
14043 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14044 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14046 if (!tg3_flag(tp
, 5705_PLUS
) ||
14047 tg3_flag(tp
, 5780_CLASS
) ||
14048 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14049 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14051 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14054 if (pci_is_pcie(tp
->pdev
)) {
14057 tg3_flag_set(tp
, PCI_EXPRESS
);
14059 tp
->pcie_readrq
= 4096;
14060 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14061 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14062 tp
->pcie_readrq
= 2048;
14064 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
14066 pci_read_config_word(tp
->pdev
,
14067 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
14069 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14070 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14072 tg3_flag_clear(tp
, HW_TSO_2
);
14073 tg3_flag_clear(tp
, TSO_CAPABLE
);
14075 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14076 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14077 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14078 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14079 tg3_flag_set(tp
, CLKREQ_BUG
);
14080 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14081 tg3_flag_set(tp
, L1PLLPD_EN
);
14083 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14084 /* BCM5785 devices are effectively PCIe devices, and should
14085 * follow PCIe codepaths, but do not have a PCIe capabilities
14088 tg3_flag_set(tp
, PCI_EXPRESS
);
14089 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14090 tg3_flag(tp
, 5780_CLASS
)) {
14091 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14092 if (!tp
->pcix_cap
) {
14093 dev_err(&tp
->pdev
->dev
,
14094 "Cannot find PCI-X capability, aborting\n");
14098 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14099 tg3_flag_set(tp
, PCIX_MODE
);
14102 /* If we have an AMD 762 or VIA K8T800 chipset, write
14103 * reordering to the mailbox registers done by the host
14104 * controller can cause major troubles. We read back from
14105 * every mailbox register write to force the writes to be
14106 * posted to the chip in order.
14108 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14109 !tg3_flag(tp
, PCI_EXPRESS
))
14110 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14112 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14113 &tp
->pci_cacheline_sz
);
14114 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14115 &tp
->pci_lat_timer
);
14116 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14117 tp
->pci_lat_timer
< 64) {
14118 tp
->pci_lat_timer
= 64;
14119 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14120 tp
->pci_lat_timer
);
14123 /* Important! -- It is critical that the PCI-X hw workaround
14124 * situation is decided before the first MMIO register access.
14126 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14127 /* 5700 BX chips need to have their TX producer index
14128 * mailboxes written twice to workaround a bug.
14130 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14132 /* If we are in PCI-X mode, enable register write workaround.
14134 * The workaround is to use indirect register accesses
14135 * for all chip writes not to mailbox registers.
14137 if (tg3_flag(tp
, PCIX_MODE
)) {
14140 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14142 /* The chip can have it's power management PCI config
14143 * space registers clobbered due to this bug.
14144 * So explicitly force the chip into D0 here.
14146 pci_read_config_dword(tp
->pdev
,
14147 tp
->pm_cap
+ PCI_PM_CTRL
,
14149 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14150 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14151 pci_write_config_dword(tp
->pdev
,
14152 tp
->pm_cap
+ PCI_PM_CTRL
,
14155 /* Also, force SERR#/PERR# in PCI command. */
14156 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14157 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14158 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14162 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14163 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14164 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14165 tg3_flag_set(tp
, PCI_32BIT
);
14167 /* Chip-specific fixup from Broadcom driver */
14168 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14169 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14170 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14171 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14174 /* Default fast path register access methods */
14175 tp
->read32
= tg3_read32
;
14176 tp
->write32
= tg3_write32
;
14177 tp
->read32_mbox
= tg3_read32
;
14178 tp
->write32_mbox
= tg3_write32
;
14179 tp
->write32_tx_mbox
= tg3_write32
;
14180 tp
->write32_rx_mbox
= tg3_write32
;
14182 /* Various workaround register access methods */
14183 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14184 tp
->write32
= tg3_write_indirect_reg32
;
14185 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14186 (tg3_flag(tp
, PCI_EXPRESS
) &&
14187 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14189 * Back to back register writes can cause problems on these
14190 * chips, the workaround is to read back all reg writes
14191 * except those to mailbox regs.
14193 * See tg3_write_indirect_reg32().
14195 tp
->write32
= tg3_write_flush_reg32
;
14198 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14199 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14200 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14201 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14204 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14205 tp
->read32
= tg3_read_indirect_reg32
;
14206 tp
->write32
= tg3_write_indirect_reg32
;
14207 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14208 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14209 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14210 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14215 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14216 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14217 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14219 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14220 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14221 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14222 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14223 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14226 if (tp
->write32
== tg3_write_indirect_reg32
||
14227 (tg3_flag(tp
, PCIX_MODE
) &&
14228 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14229 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14230 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14232 /* The memory arbiter has to be enabled in order for SRAM accesses
14233 * to succeed. Normally on powerup the tg3 chip firmware will make
14234 * sure it is enabled, but other entities such as system netboot
14235 * code might disable it.
14237 val
= tr32(MEMARB_MODE
);
14238 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14240 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14241 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14242 tg3_flag(tp
, 5780_CLASS
)) {
14243 if (tg3_flag(tp
, PCIX_MODE
)) {
14244 pci_read_config_dword(tp
->pdev
,
14245 tp
->pcix_cap
+ PCI_X_STATUS
,
14247 tp
->pci_fn
= val
& 0x7;
14249 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14250 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14251 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14252 NIC_SRAM_CPMUSTAT_SIG
) {
14253 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14254 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14256 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14257 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14258 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14259 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14260 NIC_SRAM_CPMUSTAT_SIG
) {
14261 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14262 TG3_CPMU_STATUS_FSHFT_5719
;
14266 /* Get eeprom hw config before calling tg3_set_power_state().
14267 * In particular, the TG3_FLAG_IS_NIC flag must be
14268 * determined before calling tg3_set_power_state() so that
14269 * we know whether or not to switch out of Vaux power.
14270 * When the flag is set, it means that GPIO1 is used for eeprom
14271 * write protect and also implies that it is a LOM where GPIOs
14272 * are not used to switch power.
14274 tg3_get_eeprom_hw_cfg(tp
);
14276 if (tg3_flag(tp
, ENABLE_APE
)) {
14277 /* Allow reads and writes to the
14278 * APE register and memory space.
14280 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14281 PCISTATE_ALLOW_APE_SHMEM_WR
|
14282 PCISTATE_ALLOW_APE_PSPACE_WR
;
14283 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14286 tg3_ape_lock_init(tp
);
14289 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14290 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14291 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14292 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14293 tg3_flag(tp
, 57765_PLUS
))
14294 tg3_flag_set(tp
, CPMU_PRESENT
);
14296 /* Set up tp->grc_local_ctrl before calling
14297 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14298 * will bring 5700's external PHY out of reset.
14299 * It is also used as eeprom write protect on LOMs.
14301 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14303 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14304 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14305 GRC_LCLCTRL_GPIO_OUTPUT1
);
14306 /* Unused GPIO3 must be driven as output on 5752 because there
14307 * are no pull-up resistors on unused GPIO pins.
14309 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14310 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14312 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14313 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14314 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
14315 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14317 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14318 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14319 /* Turn off the debug UART. */
14320 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14321 if (tg3_flag(tp
, IS_NIC
))
14322 /* Keep VMain power. */
14323 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14324 GRC_LCLCTRL_GPIO_OUTPUT0
;
14327 /* Switch out of Vaux if it is a NIC */
14328 tg3_pwrsrc_switch_to_vmain(tp
);
14330 /* Derive initial jumbo mode from MTU assigned in
14331 * ether_setup() via the alloc_etherdev() call
14333 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14334 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14336 /* Determine WakeOnLan speed to use. */
14337 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14338 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14339 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14340 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14341 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14343 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14346 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14347 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14349 /* A few boards don't want Ethernet@WireSpeed phy feature */
14350 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14351 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14352 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14353 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14354 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14355 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14356 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14358 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14359 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14360 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14361 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14362 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14364 if (tg3_flag(tp
, 5705_PLUS
) &&
14365 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14366 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14367 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14368 !tg3_flag(tp
, 57765_PLUS
)) {
14369 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14370 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14371 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14372 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14373 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14374 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14375 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14376 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14377 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14379 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14382 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14383 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14384 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14385 if (tp
->phy_otp
== 0)
14386 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14389 if (tg3_flag(tp
, CPMU_PRESENT
))
14390 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14392 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14394 tp
->coalesce_mode
= 0;
14395 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14396 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14397 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14399 /* Set these bits to enable statistics workaround. */
14400 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14401 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14402 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14403 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14404 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14407 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14408 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14409 tg3_flag_set(tp
, USE_PHYLIB
);
14411 err
= tg3_mdio_init(tp
);
14415 /* Initialize data/descriptor byte/word swapping. */
14416 val
= tr32(GRC_MODE
);
14417 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14418 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14419 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14420 GRC_MODE_B2HRX_ENABLE
|
14421 GRC_MODE_HTX2B_ENABLE
|
14422 GRC_MODE_HOST_STACKUP
);
14424 val
&= GRC_MODE_HOST_STACKUP
;
14426 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14428 tg3_switch_clocks(tp
);
14430 /* Clear this out for sanity. */
14431 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14433 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14435 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14436 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14437 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14439 if (chiprevid
== CHIPREV_ID_5701_A0
||
14440 chiprevid
== CHIPREV_ID_5701_B0
||
14441 chiprevid
== CHIPREV_ID_5701_B2
||
14442 chiprevid
== CHIPREV_ID_5701_B5
) {
14443 void __iomem
*sram_base
;
14445 /* Write some dummy words into the SRAM status block
14446 * area, see if it reads back correctly. If the return
14447 * value is bad, force enable the PCIX workaround.
14449 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14451 writel(0x00000000, sram_base
);
14452 writel(0x00000000, sram_base
+ 4);
14453 writel(0xffffffff, sram_base
+ 4);
14454 if (readl(sram_base
) != 0x00000000)
14455 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14460 tg3_nvram_init(tp
);
14462 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14463 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14465 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14466 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14467 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14468 tg3_flag_set(tp
, IS_5788
);
14470 if (!tg3_flag(tp
, IS_5788
) &&
14471 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14472 tg3_flag_set(tp
, TAGGED_STATUS
);
14473 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14474 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14475 HOSTCC_MODE_CLRTICK_TXBD
);
14477 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14478 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14479 tp
->misc_host_ctrl
);
14482 /* Preserve the APE MAC_MODE bits */
14483 if (tg3_flag(tp
, ENABLE_APE
))
14484 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14488 /* these are limited to 10/100 only */
14489 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14490 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14491 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14492 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14493 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14494 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14495 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14496 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14497 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14498 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14499 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14500 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14501 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14502 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14503 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14504 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14506 err
= tg3_phy_probe(tp
);
14508 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14509 /* ... but do not return immediately ... */
14514 tg3_read_fw_ver(tp
);
14516 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14517 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14519 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14520 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14522 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14525 /* 5700 {AX,BX} chips have a broken status block link
14526 * change bit implementation, so we must use the
14527 * status register in those cases.
14529 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14530 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14532 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14534 /* The led_ctrl is set during tg3_phy_probe, here we might
14535 * have to force the link status polling mechanism based
14536 * upon subsystem IDs.
14538 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14539 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14540 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14541 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14542 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14545 /* For all SERDES we poll the MAC status register. */
14546 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14547 tg3_flag_set(tp
, POLL_SERDES
);
14549 tg3_flag_clear(tp
, POLL_SERDES
);
14551 tp
->rx_offset
= NET_IP_ALIGN
;
14552 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14553 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14554 tg3_flag(tp
, PCIX_MODE
)) {
14556 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14557 tp
->rx_copy_thresh
= ~(u16
)0;
14561 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14562 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14563 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14565 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14567 /* Increment the rx prod index on the rx std ring by at most
14568 * 8 for these chips to workaround hw errata.
14570 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14571 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14572 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14573 tp
->rx_std_max_post
= 8;
14575 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14576 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14577 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14582 #ifdef CONFIG_SPARC
14583 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14585 struct net_device
*dev
= tp
->dev
;
14586 struct pci_dev
*pdev
= tp
->pdev
;
14587 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14588 const unsigned char *addr
;
14591 addr
= of_get_property(dp
, "local-mac-address", &len
);
14592 if (addr
&& len
== 6) {
14593 memcpy(dev
->dev_addr
, addr
, 6);
14594 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14600 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14602 struct net_device
*dev
= tp
->dev
;
14604 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14605 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14610 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14612 struct net_device
*dev
= tp
->dev
;
14613 u32 hi
, lo
, mac_offset
;
14616 #ifdef CONFIG_SPARC
14617 if (!tg3_get_macaddr_sparc(tp
))
14622 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14623 tg3_flag(tp
, 5780_CLASS
)) {
14624 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14626 if (tg3_nvram_lock(tp
))
14627 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14629 tg3_nvram_unlock(tp
);
14630 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14631 if (tp
->pci_fn
& 1)
14633 if (tp
->pci_fn
> 1)
14634 mac_offset
+= 0x18c;
14635 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14638 /* First try to get it from MAC address mailbox. */
14639 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14640 if ((hi
>> 16) == 0x484b) {
14641 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14642 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14644 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14645 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14646 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14647 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14648 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14650 /* Some old bootcode may report a 0 MAC address in SRAM */
14651 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14654 /* Next, try NVRAM. */
14655 if (!tg3_flag(tp
, NO_NVRAM
) &&
14656 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14657 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14658 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14659 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14661 /* Finally just fetch it out of the MAC control regs. */
14663 hi
= tr32(MAC_ADDR_0_HIGH
);
14664 lo
= tr32(MAC_ADDR_0_LOW
);
14666 dev
->dev_addr
[5] = lo
& 0xff;
14667 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14668 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14669 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14670 dev
->dev_addr
[1] = hi
& 0xff;
14671 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14675 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14676 #ifdef CONFIG_SPARC
14677 if (!tg3_get_default_macaddr_sparc(tp
))
14682 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14686 #define BOUNDARY_SINGLE_CACHELINE 1
14687 #define BOUNDARY_MULTI_CACHELINE 2
14689 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14691 int cacheline_size
;
14695 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14697 cacheline_size
= 1024;
14699 cacheline_size
= (int) byte
* 4;
14701 /* On 5703 and later chips, the boundary bits have no
14704 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14705 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14706 !tg3_flag(tp
, PCI_EXPRESS
))
14709 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14710 goal
= BOUNDARY_MULTI_CACHELINE
;
14712 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14713 goal
= BOUNDARY_SINGLE_CACHELINE
;
14719 if (tg3_flag(tp
, 57765_PLUS
)) {
14720 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14727 /* PCI controllers on most RISC systems tend to disconnect
14728 * when a device tries to burst across a cache-line boundary.
14729 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14731 * Unfortunately, for PCI-E there are only limited
14732 * write-side controls for this, and thus for reads
14733 * we will still get the disconnects. We'll also waste
14734 * these PCI cycles for both read and write for chips
14735 * other than 5700 and 5701 which do not implement the
14738 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14739 switch (cacheline_size
) {
14744 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14745 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14746 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14748 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14749 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14754 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14755 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14759 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14760 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14763 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14764 switch (cacheline_size
) {
14768 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14769 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14770 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14776 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14777 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14781 switch (cacheline_size
) {
14783 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14784 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14785 DMA_RWCTRL_WRITE_BNDRY_16
);
14790 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14791 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14792 DMA_RWCTRL_WRITE_BNDRY_32
);
14797 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14798 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14799 DMA_RWCTRL_WRITE_BNDRY_64
);
14804 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14805 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14806 DMA_RWCTRL_WRITE_BNDRY_128
);
14811 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14812 DMA_RWCTRL_WRITE_BNDRY_256
);
14815 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14816 DMA_RWCTRL_WRITE_BNDRY_512
);
14820 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14821 DMA_RWCTRL_WRITE_BNDRY_1024
);
14830 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14832 struct tg3_internal_buffer_desc test_desc
;
14833 u32 sram_dma_descs
;
14836 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14838 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14839 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14840 tw32(RDMAC_STATUS
, 0);
14841 tw32(WDMAC_STATUS
, 0);
14843 tw32(BUFMGR_MODE
, 0);
14844 tw32(FTQ_RESET
, 0);
14846 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14847 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14848 test_desc
.nic_mbuf
= 0x00002100;
14849 test_desc
.len
= size
;
14852 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14853 * the *second* time the tg3 driver was getting loaded after an
14856 * Broadcom tells me:
14857 * ...the DMA engine is connected to the GRC block and a DMA
14858 * reset may affect the GRC block in some unpredictable way...
14859 * The behavior of resets to individual blocks has not been tested.
14861 * Broadcom noted the GRC reset will also reset all sub-components.
14864 test_desc
.cqid_sqid
= (13 << 8) | 2;
14866 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14869 test_desc
.cqid_sqid
= (16 << 8) | 7;
14871 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14874 test_desc
.flags
= 0x00000005;
14876 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14879 val
= *(((u32
*)&test_desc
) + i
);
14880 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14881 sram_dma_descs
+ (i
* sizeof(u32
)));
14882 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14884 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14887 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14889 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14892 for (i
= 0; i
< 40; i
++) {
14896 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14898 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14899 if ((val
& 0xffff) == sram_dma_descs
) {
14910 #define TEST_BUFFER_SIZE 0x2000
14912 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14913 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14917 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14919 dma_addr_t buf_dma
;
14920 u32
*buf
, saved_dma_rwctrl
;
14923 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14924 &buf_dma
, GFP_KERNEL
);
14930 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14931 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14933 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14935 if (tg3_flag(tp
, 57765_PLUS
))
14938 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14939 /* DMA read watermark not used on PCIE */
14940 tp
->dma_rwctrl
|= 0x00180000;
14941 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14942 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14943 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14944 tp
->dma_rwctrl
|= 0x003f0000;
14946 tp
->dma_rwctrl
|= 0x003f000f;
14948 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14949 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14950 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14951 u32 read_water
= 0x7;
14953 /* If the 5704 is behind the EPB bridge, we can
14954 * do the less restrictive ONE_DMA workaround for
14955 * better performance.
14957 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14958 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14959 tp
->dma_rwctrl
|= 0x8000;
14960 else if (ccval
== 0x6 || ccval
== 0x7)
14961 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14963 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14965 /* Set bit 23 to enable PCIX hw bug fix */
14967 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14968 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14970 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14971 /* 5780 always in PCIX mode */
14972 tp
->dma_rwctrl
|= 0x00144000;
14973 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14974 /* 5714 always in PCIX mode */
14975 tp
->dma_rwctrl
|= 0x00148000;
14977 tp
->dma_rwctrl
|= 0x001b000f;
14981 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14982 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14983 tp
->dma_rwctrl
&= 0xfffffff0;
14985 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14986 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14987 /* Remove this if it causes problems for some boards. */
14988 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14990 /* On 5700/5701 chips, we need to set this bit.
14991 * Otherwise the chip will issue cacheline transactions
14992 * to streamable DMA memory with not all the byte
14993 * enables turned on. This is an error on several
14994 * RISC PCI controllers, in particular sparc64.
14996 * On 5703/5704 chips, this bit has been reassigned
14997 * a different meaning. In particular, it is used
14998 * on those chips to enable a PCI-X workaround.
15000 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15003 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15006 /* Unneeded, already done by tg3_get_invariants. */
15007 tg3_switch_clocks(tp
);
15010 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15011 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15014 /* It is best to perform DMA test with maximum write burst size
15015 * to expose the 5700/5701 write DMA bug.
15017 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15018 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15019 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15024 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15027 /* Send the buffer to the chip. */
15028 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15030 dev_err(&tp
->pdev
->dev
,
15031 "%s: Buffer write failed. err = %d\n",
15037 /* validate data reached card RAM correctly. */
15038 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15040 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15041 if (le32_to_cpu(val
) != p
[i
]) {
15042 dev_err(&tp
->pdev
->dev
,
15043 "%s: Buffer corrupted on device! "
15044 "(%d != %d)\n", __func__
, val
, i
);
15045 /* ret = -ENODEV here? */
15050 /* Now read it back. */
15051 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15053 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15054 "err = %d\n", __func__
, ret
);
15059 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15063 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15064 DMA_RWCTRL_WRITE_BNDRY_16
) {
15065 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15066 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15067 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15070 dev_err(&tp
->pdev
->dev
,
15071 "%s: Buffer corrupted on read back! "
15072 "(%d != %d)\n", __func__
, p
[i
], i
);
15078 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15084 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15085 DMA_RWCTRL_WRITE_BNDRY_16
) {
15086 /* DMA test passed without adjusting DMA boundary,
15087 * now look for chipsets that are known to expose the
15088 * DMA bug without failing the test.
15090 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15091 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15092 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15094 /* Safe to use the calculated DMA boundary. */
15095 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15098 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15102 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15107 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15109 if (tg3_flag(tp
, 57765_PLUS
)) {
15110 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15111 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15112 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15113 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15114 tp
->bufmgr_config
.mbuf_high_water
=
15115 DEFAULT_MB_HIGH_WATER_57765
;
15117 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15118 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15119 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15120 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15121 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15122 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15123 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15124 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15125 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15126 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15127 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15128 tp
->bufmgr_config
.mbuf_high_water
=
15129 DEFAULT_MB_HIGH_WATER_5705
;
15130 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15131 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15132 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15133 tp
->bufmgr_config
.mbuf_high_water
=
15134 DEFAULT_MB_HIGH_WATER_5906
;
15137 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15138 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15139 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15140 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15141 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15142 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15144 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15145 DEFAULT_MB_RDMA_LOW_WATER
;
15146 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15147 DEFAULT_MB_MACRX_LOW_WATER
;
15148 tp
->bufmgr_config
.mbuf_high_water
=
15149 DEFAULT_MB_HIGH_WATER
;
15151 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15152 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15153 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15154 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15155 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15156 DEFAULT_MB_HIGH_WATER_JUMBO
;
15159 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15160 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15163 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15165 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15166 case TG3_PHY_ID_BCM5400
: return "5400";
15167 case TG3_PHY_ID_BCM5401
: return "5401";
15168 case TG3_PHY_ID_BCM5411
: return "5411";
15169 case TG3_PHY_ID_BCM5701
: return "5701";
15170 case TG3_PHY_ID_BCM5703
: return "5703";
15171 case TG3_PHY_ID_BCM5704
: return "5704";
15172 case TG3_PHY_ID_BCM5705
: return "5705";
15173 case TG3_PHY_ID_BCM5750
: return "5750";
15174 case TG3_PHY_ID_BCM5752
: return "5752";
15175 case TG3_PHY_ID_BCM5714
: return "5714";
15176 case TG3_PHY_ID_BCM5780
: return "5780";
15177 case TG3_PHY_ID_BCM5755
: return "5755";
15178 case TG3_PHY_ID_BCM5787
: return "5787";
15179 case TG3_PHY_ID_BCM5784
: return "5784";
15180 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15181 case TG3_PHY_ID_BCM5906
: return "5906";
15182 case TG3_PHY_ID_BCM5761
: return "5761";
15183 case TG3_PHY_ID_BCM5718C
: return "5718C";
15184 case TG3_PHY_ID_BCM5718S
: return "5718S";
15185 case TG3_PHY_ID_BCM57765
: return "57765";
15186 case TG3_PHY_ID_BCM5719C
: return "5719C";
15187 case TG3_PHY_ID_BCM5720C
: return "5720C";
15188 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15189 case 0: return "serdes";
15190 default: return "unknown";
15194 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15196 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15197 strcpy(str
, "PCI Express");
15199 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15200 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15202 strcpy(str
, "PCIX:");
15204 if ((clock_ctrl
== 7) ||
15205 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15206 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15207 strcat(str
, "133MHz");
15208 else if (clock_ctrl
== 0)
15209 strcat(str
, "33MHz");
15210 else if (clock_ctrl
== 2)
15211 strcat(str
, "50MHz");
15212 else if (clock_ctrl
== 4)
15213 strcat(str
, "66MHz");
15214 else if (clock_ctrl
== 6)
15215 strcat(str
, "100MHz");
15217 strcpy(str
, "PCI:");
15218 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15219 strcat(str
, "66MHz");
15221 strcat(str
, "33MHz");
15223 if (tg3_flag(tp
, PCI_32BIT
))
15224 strcat(str
, ":32-bit");
15226 strcat(str
, ":64-bit");
15230 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15232 struct pci_dev
*peer
;
15233 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15235 for (func
= 0; func
< 8; func
++) {
15236 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15237 if (peer
&& peer
!= tp
->pdev
)
15241 /* 5704 can be configured in single-port mode, set peer to
15242 * tp->pdev in that case.
15250 * We don't need to keep the refcount elevated; there's no way
15251 * to remove one half of this device without removing the other
15258 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15260 struct ethtool_coalesce
*ec
= &tp
->coal
;
15262 memset(ec
, 0, sizeof(*ec
));
15263 ec
->cmd
= ETHTOOL_GCOALESCE
;
15264 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15265 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15266 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15267 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15268 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15269 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15270 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15271 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15272 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15274 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15275 HOSTCC_MODE_CLRTICK_TXBD
)) {
15276 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15277 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15278 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15279 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15282 if (tg3_flag(tp
, 5705_PLUS
)) {
15283 ec
->rx_coalesce_usecs_irq
= 0;
15284 ec
->tx_coalesce_usecs_irq
= 0;
15285 ec
->stats_block_coalesce_usecs
= 0;
15289 static const struct net_device_ops tg3_netdev_ops
= {
15290 .ndo_open
= tg3_open
,
15291 .ndo_stop
= tg3_close
,
15292 .ndo_start_xmit
= tg3_start_xmit
,
15293 .ndo_get_stats64
= tg3_get_stats64
,
15294 .ndo_validate_addr
= eth_validate_addr
,
15295 .ndo_set_rx_mode
= tg3_set_rx_mode
,
15296 .ndo_set_mac_address
= tg3_set_mac_addr
,
15297 .ndo_do_ioctl
= tg3_ioctl
,
15298 .ndo_tx_timeout
= tg3_tx_timeout
,
15299 .ndo_change_mtu
= tg3_change_mtu
,
15300 .ndo_fix_features
= tg3_fix_features
,
15301 .ndo_set_features
= tg3_set_features
,
15302 #ifdef CONFIG_NET_POLL_CONTROLLER
15303 .ndo_poll_controller
= tg3_poll_controller
,
15307 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15308 const struct pci_device_id
*ent
)
15310 struct net_device
*dev
;
15312 int i
, err
, pm_cap
;
15313 u32 sndmbx
, rcvmbx
, intmbx
;
15315 u64 dma_mask
, persist_dma_mask
;
15318 printk_once(KERN_INFO
"%s\n", version
);
15320 err
= pci_enable_device(pdev
);
15322 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15326 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15328 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15329 goto err_out_disable_pdev
;
15332 pci_set_master(pdev
);
15334 /* Find power-management capability. */
15335 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15337 dev_err(&pdev
->dev
,
15338 "Cannot find Power Management capability, aborting\n");
15340 goto err_out_free_res
;
15343 err
= pci_set_power_state(pdev
, PCI_D0
);
15345 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15346 goto err_out_free_res
;
15349 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15351 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15353 goto err_out_power_down
;
15356 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15358 tp
= netdev_priv(dev
);
15361 tp
->pm_cap
= pm_cap
;
15362 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15363 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15366 tp
->msg_enable
= tg3_debug
;
15368 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15370 /* The word/byte swap controls here control register access byte
15371 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15374 tp
->misc_host_ctrl
=
15375 MISC_HOST_CTRL_MASK_PCI_INT
|
15376 MISC_HOST_CTRL_WORD_SWAP
|
15377 MISC_HOST_CTRL_INDIR_ACCESS
|
15378 MISC_HOST_CTRL_PCISTATE_RW
;
15380 /* The NONFRM (non-frame) byte/word swap controls take effect
15381 * on descriptor entries, anything which isn't packet data.
15383 * The StrongARM chips on the board (one for tx, one for rx)
15384 * are running in big-endian mode.
15386 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15387 GRC_MODE_WSWAP_NONFRM_DATA
);
15388 #ifdef __BIG_ENDIAN
15389 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15391 spin_lock_init(&tp
->lock
);
15392 spin_lock_init(&tp
->indirect_lock
);
15393 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15395 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15397 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15399 goto err_out_free_dev
;
15402 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15403 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15404 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15405 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15406 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15407 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15408 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15409 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15410 tg3_flag_set(tp
, ENABLE_APE
);
15411 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15412 if (!tp
->aperegs
) {
15413 dev_err(&pdev
->dev
,
15414 "Cannot map APE registers, aborting\n");
15416 goto err_out_iounmap
;
15420 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15421 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15423 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15424 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15425 dev
->netdev_ops
= &tg3_netdev_ops
;
15426 dev
->irq
= pdev
->irq
;
15428 err
= tg3_get_invariants(tp
);
15430 dev_err(&pdev
->dev
,
15431 "Problem fetching invariants of chip, aborting\n");
15432 goto err_out_apeunmap
;
15435 /* The EPB bridge inside 5714, 5715, and 5780 and any
15436 * device behind the EPB cannot support DMA addresses > 40-bit.
15437 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15438 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15439 * do DMA address check in tg3_start_xmit().
15441 if (tg3_flag(tp
, IS_5788
))
15442 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15443 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15444 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15445 #ifdef CONFIG_HIGHMEM
15446 dma_mask
= DMA_BIT_MASK(64);
15449 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15451 /* Configure DMA attributes. */
15452 if (dma_mask
> DMA_BIT_MASK(32)) {
15453 err
= pci_set_dma_mask(pdev
, dma_mask
);
15455 features
|= NETIF_F_HIGHDMA
;
15456 err
= pci_set_consistent_dma_mask(pdev
,
15459 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15460 "DMA for consistent allocations\n");
15461 goto err_out_apeunmap
;
15465 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15466 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15468 dev_err(&pdev
->dev
,
15469 "No usable DMA configuration, aborting\n");
15470 goto err_out_apeunmap
;
15474 tg3_init_bufmgr_config(tp
);
15476 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15478 /* 5700 B0 chips do not support checksumming correctly due
15479 * to hardware bugs.
15481 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15482 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15484 if (tg3_flag(tp
, 5755_PLUS
))
15485 features
|= NETIF_F_IPV6_CSUM
;
15488 /* TSO is on by default on chips that support hardware TSO.
15489 * Firmware TSO on older chips gives lower performance, so it
15490 * is off by default, but can be enabled using ethtool.
15492 if ((tg3_flag(tp
, HW_TSO_1
) ||
15493 tg3_flag(tp
, HW_TSO_2
) ||
15494 tg3_flag(tp
, HW_TSO_3
)) &&
15495 (features
& NETIF_F_IP_CSUM
))
15496 features
|= NETIF_F_TSO
;
15497 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15498 if (features
& NETIF_F_IPV6_CSUM
)
15499 features
|= NETIF_F_TSO6
;
15500 if (tg3_flag(tp
, HW_TSO_3
) ||
15501 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15502 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15503 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15504 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15505 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15506 features
|= NETIF_F_TSO_ECN
;
15509 dev
->features
|= features
;
15510 dev
->vlan_features
|= features
;
15513 * Add loopback capability only for a subset of devices that support
15514 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15515 * loopback for the remaining devices.
15517 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15518 !tg3_flag(tp
, CPMU_PRESENT
))
15519 /* Add the loopback capability */
15520 features
|= NETIF_F_LOOPBACK
;
15522 dev
->hw_features
|= features
;
15524 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15525 !tg3_flag(tp
, TSO_CAPABLE
) &&
15526 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15527 tg3_flag_set(tp
, MAX_RXPEND_64
);
15528 tp
->rx_pending
= 63;
15531 err
= tg3_get_device_address(tp
);
15533 dev_err(&pdev
->dev
,
15534 "Could not obtain valid ethernet address, aborting\n");
15535 goto err_out_apeunmap
;
15539 * Reset chip in case UNDI or EFI driver did not shutdown
15540 * DMA self test will enable WDMAC and we'll see (spurious)
15541 * pending DMA on the PCI bus at that point.
15543 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15544 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15545 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15546 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15549 err
= tg3_test_dma(tp
);
15551 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15552 goto err_out_apeunmap
;
15555 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15556 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15557 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15558 for (i
= 0; i
< tp
->irq_max
; i
++) {
15559 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15562 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15564 tnapi
->int_mbox
= intmbx
;
15570 tnapi
->consmbox
= rcvmbx
;
15571 tnapi
->prodmbox
= sndmbx
;
15574 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15576 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15578 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15582 * If we support MSIX, we'll be using RSS. If we're using
15583 * RSS, the first vector only handles link interrupts and the
15584 * remaining vectors handle rx and tx interrupts. Reuse the
15585 * mailbox values for the next iteration. The values we setup
15586 * above are still useful for the single vectored mode.
15601 pci_set_drvdata(pdev
, dev
);
15603 if (tg3_flag(tp
, 5717_PLUS
)) {
15604 /* Resume a low-power mode */
15605 tg3_frob_aux_power(tp
, false);
15608 err
= register_netdev(dev
);
15610 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15611 goto err_out_apeunmap
;
15614 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15615 tp
->board_part_number
,
15616 tp
->pci_chip_rev_id
,
15617 tg3_bus_string(tp
, str
),
15620 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15621 struct phy_device
*phydev
;
15622 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15624 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15625 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15629 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15630 ethtype
= "10/100Base-TX";
15631 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15632 ethtype
= "1000Base-SX";
15634 ethtype
= "10/100/1000Base-T";
15636 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15637 "(WireSpeed[%d], EEE[%d])\n",
15638 tg3_phy_string(tp
), ethtype
,
15639 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15640 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15643 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15644 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15645 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15646 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15647 tg3_flag(tp
, ENABLE_ASF
) != 0,
15648 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15649 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15651 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15652 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15654 pci_save_state(pdev
);
15660 iounmap(tp
->aperegs
);
15661 tp
->aperegs
= NULL
;
15673 err_out_power_down
:
15674 pci_set_power_state(pdev
, PCI_D3hot
);
15677 pci_release_regions(pdev
);
15679 err_out_disable_pdev
:
15680 pci_disable_device(pdev
);
15681 pci_set_drvdata(pdev
, NULL
);
15685 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15687 struct net_device
*dev
= pci_get_drvdata(pdev
);
15690 struct tg3
*tp
= netdev_priv(dev
);
15693 release_firmware(tp
->fw
);
15695 tg3_reset_task_cancel(tp
);
15697 if (tg3_flag(tp
, USE_PHYLIB
)) {
15702 unregister_netdev(dev
);
15704 iounmap(tp
->aperegs
);
15705 tp
->aperegs
= NULL
;
15712 pci_release_regions(pdev
);
15713 pci_disable_device(pdev
);
15714 pci_set_drvdata(pdev
, NULL
);
15718 #ifdef CONFIG_PM_SLEEP
15719 static int tg3_suspend(struct device
*device
)
15721 struct pci_dev
*pdev
= to_pci_dev(device
);
15722 struct net_device
*dev
= pci_get_drvdata(pdev
);
15723 struct tg3
*tp
= netdev_priv(dev
);
15726 if (!netif_running(dev
))
15729 tg3_reset_task_cancel(tp
);
15731 tg3_netif_stop(tp
);
15733 del_timer_sync(&tp
->timer
);
15735 tg3_full_lock(tp
, 1);
15736 tg3_disable_ints(tp
);
15737 tg3_full_unlock(tp
);
15739 netif_device_detach(dev
);
15741 tg3_full_lock(tp
, 0);
15742 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15743 tg3_flag_clear(tp
, INIT_COMPLETE
);
15744 tg3_full_unlock(tp
);
15746 err
= tg3_power_down_prepare(tp
);
15750 tg3_full_lock(tp
, 0);
15752 tg3_flag_set(tp
, INIT_COMPLETE
);
15753 err2
= tg3_restart_hw(tp
, 1);
15757 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15758 add_timer(&tp
->timer
);
15760 netif_device_attach(dev
);
15761 tg3_netif_start(tp
);
15764 tg3_full_unlock(tp
);
15773 static int tg3_resume(struct device
*device
)
15775 struct pci_dev
*pdev
= to_pci_dev(device
);
15776 struct net_device
*dev
= pci_get_drvdata(pdev
);
15777 struct tg3
*tp
= netdev_priv(dev
);
15780 if (!netif_running(dev
))
15783 netif_device_attach(dev
);
15785 tg3_full_lock(tp
, 0);
15787 tg3_flag_set(tp
, INIT_COMPLETE
);
15788 err
= tg3_restart_hw(tp
, 1);
15792 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15793 add_timer(&tp
->timer
);
15795 tg3_netif_start(tp
);
15798 tg3_full_unlock(tp
);
15806 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15807 #define TG3_PM_OPS (&tg3_pm_ops)
15811 #define TG3_PM_OPS NULL
15813 #endif /* CONFIG_PM_SLEEP */
15816 * tg3_io_error_detected - called when PCI error is detected
15817 * @pdev: Pointer to PCI device
15818 * @state: The current pci connection state
15820 * This function is called after a PCI bus error affecting
15821 * this device has been detected.
15823 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15824 pci_channel_state_t state
)
15826 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15827 struct tg3
*tp
= netdev_priv(netdev
);
15828 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15830 netdev_info(netdev
, "PCI I/O error detected\n");
15834 if (!netif_running(netdev
))
15839 tg3_netif_stop(tp
);
15841 del_timer_sync(&tp
->timer
);
15843 /* Want to make sure that the reset task doesn't run */
15844 tg3_reset_task_cancel(tp
);
15845 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15847 netif_device_detach(netdev
);
15849 /* Clean up software state, even if MMIO is blocked */
15850 tg3_full_lock(tp
, 0);
15851 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15852 tg3_full_unlock(tp
);
15855 if (state
== pci_channel_io_perm_failure
)
15856 err
= PCI_ERS_RESULT_DISCONNECT
;
15858 pci_disable_device(pdev
);
15866 * tg3_io_slot_reset - called after the pci bus has been reset.
15867 * @pdev: Pointer to PCI device
15869 * Restart the card from scratch, as if from a cold-boot.
15870 * At this point, the card has exprienced a hard reset,
15871 * followed by fixups by BIOS, and has its config space
15872 * set up identically to what it was at cold boot.
15874 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15876 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15877 struct tg3
*tp
= netdev_priv(netdev
);
15878 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15883 if (pci_enable_device(pdev
)) {
15884 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15888 pci_set_master(pdev
);
15889 pci_restore_state(pdev
);
15890 pci_save_state(pdev
);
15892 if (!netif_running(netdev
)) {
15893 rc
= PCI_ERS_RESULT_RECOVERED
;
15897 err
= tg3_power_up(tp
);
15901 rc
= PCI_ERS_RESULT_RECOVERED
;
15910 * tg3_io_resume - called when traffic can start flowing again.
15911 * @pdev: Pointer to PCI device
15913 * This callback is called when the error recovery driver tells
15914 * us that its OK to resume normal operation.
15916 static void tg3_io_resume(struct pci_dev
*pdev
)
15918 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15919 struct tg3
*tp
= netdev_priv(netdev
);
15924 if (!netif_running(netdev
))
15927 tg3_full_lock(tp
, 0);
15928 tg3_flag_set(tp
, INIT_COMPLETE
);
15929 err
= tg3_restart_hw(tp
, 1);
15930 tg3_full_unlock(tp
);
15932 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15936 netif_device_attach(netdev
);
15938 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15939 add_timer(&tp
->timer
);
15941 tg3_netif_start(tp
);
15949 static struct pci_error_handlers tg3_err_handler
= {
15950 .error_detected
= tg3_io_error_detected
,
15951 .slot_reset
= tg3_io_slot_reset
,
15952 .resume
= tg3_io_resume
15955 static struct pci_driver tg3_driver
= {
15956 .name
= DRV_MODULE_NAME
,
15957 .id_table
= tg3_pci_tbl
,
15958 .probe
= tg3_init_one
,
15959 .remove
= __devexit_p(tg3_remove_one
),
15960 .err_handler
= &tg3_err_handler
,
15961 .driver
.pm
= TG3_PM_OPS
,
15964 static int __init
tg3_init(void)
15966 return pci_register_driver(&tg3_driver
);
15969 static void __exit
tg3_cleanup(void)
15971 pci_unregister_driver(&tg3_driver
);
15974 module_init(tg3_init
);
15975 module_exit(tg3_cleanup
);