2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
193 #define TG3_TX_BD_DMA_MAX 4096
195 #define TG3_RAW_IP_ALIGN 2
197 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
199 #define FIRMWARE_TG3 "tigon/tg3.bin"
200 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
201 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
203 static char version
[] __devinitdata
=
204 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
206 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
207 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
208 MODULE_LICENSE("GPL");
209 MODULE_VERSION(DRV_MODULE_VERSION
);
210 MODULE_FIRMWARE(FIRMWARE_TG3
);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
212 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
214 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
215 module_param(tg3_debug
, int, 0);
216 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
218 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
299 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
303 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
305 static const struct {
306 const char string
[ETH_GSTRING_LEN
];
307 } ethtool_stats_keys
[] = {
310 { "rx_ucast_packets" },
311 { "rx_mcast_packets" },
312 { "rx_bcast_packets" },
314 { "rx_align_errors" },
315 { "rx_xon_pause_rcvd" },
316 { "rx_xoff_pause_rcvd" },
317 { "rx_mac_ctrl_rcvd" },
318 { "rx_xoff_entered" },
319 { "rx_frame_too_long_errors" },
321 { "rx_undersize_packets" },
322 { "rx_in_length_errors" },
323 { "rx_out_length_errors" },
324 { "rx_64_or_less_octet_packets" },
325 { "rx_65_to_127_octet_packets" },
326 { "rx_128_to_255_octet_packets" },
327 { "rx_256_to_511_octet_packets" },
328 { "rx_512_to_1023_octet_packets" },
329 { "rx_1024_to_1522_octet_packets" },
330 { "rx_1523_to_2047_octet_packets" },
331 { "rx_2048_to_4095_octet_packets" },
332 { "rx_4096_to_8191_octet_packets" },
333 { "rx_8192_to_9022_octet_packets" },
340 { "tx_flow_control" },
342 { "tx_single_collisions" },
343 { "tx_mult_collisions" },
345 { "tx_excessive_collisions" },
346 { "tx_late_collisions" },
347 { "tx_collide_2times" },
348 { "tx_collide_3times" },
349 { "tx_collide_4times" },
350 { "tx_collide_5times" },
351 { "tx_collide_6times" },
352 { "tx_collide_7times" },
353 { "tx_collide_8times" },
354 { "tx_collide_9times" },
355 { "tx_collide_10times" },
356 { "tx_collide_11times" },
357 { "tx_collide_12times" },
358 { "tx_collide_13times" },
359 { "tx_collide_14times" },
360 { "tx_collide_15times" },
361 { "tx_ucast_packets" },
362 { "tx_mcast_packets" },
363 { "tx_bcast_packets" },
364 { "tx_carrier_sense_errors" },
368 { "dma_writeq_full" },
369 { "dma_write_prioq_full" },
373 { "rx_threshold_hit" },
375 { "dma_readq_full" },
376 { "dma_read_prioq_full" },
377 { "tx_comp_queue_full" },
379 { "ring_set_send_prod_index" },
380 { "ring_status_update" },
382 { "nic_avoided_irqs" },
383 { "nic_tx_threshold_hit" },
385 { "mbuf_lwm_thresh_hit" },
388 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
391 static const struct {
392 const char string
[ETH_GSTRING_LEN
];
393 } ethtool_test_keys
[] = {
394 { "nvram test (online) " },
395 { "link test (online) " },
396 { "register test (offline)" },
397 { "memory test (offline)" },
398 { "loopback test (offline)" },
399 { "interrupt test (offline)" },
402 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
405 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
407 writel(val
, tp
->regs
+ off
);
410 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
412 return readl(tp
->regs
+ off
);
415 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
417 writel(val
, tp
->aperegs
+ off
);
420 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
422 return readl(tp
->aperegs
+ off
);
425 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
429 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
430 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
431 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
432 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
435 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
437 writel(val
, tp
->regs
+ off
);
438 readl(tp
->regs
+ off
);
441 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
446 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
447 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
448 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
449 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
453 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
457 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
458 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
459 TG3_64BIT_REG_LOW
, val
);
462 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
463 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
464 TG3_64BIT_REG_LOW
, val
);
468 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
469 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
470 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
471 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
473 /* In indirect mode when disabling interrupts, we also need
474 * to clear the interrupt bit in the GRC local ctrl register.
476 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
478 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
479 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
483 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
488 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
490 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
491 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
495 /* usec_wait specifies the wait time in usec when writing to certain registers
496 * where it is unsafe to read back the register without some delay.
497 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
498 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
500 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
502 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
503 /* Non-posted methods */
504 tp
->write32(tp
, off
, val
);
507 tg3_write32(tp
, off
, val
);
512 /* Wait again after the read for the posted method to guarantee that
513 * the wait time is met.
519 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
521 tp
->write32_mbox(tp
, off
, val
);
522 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
523 tp
->read32_mbox(tp
, off
);
526 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
528 void __iomem
*mbox
= tp
->regs
+ off
;
530 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
532 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
536 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
538 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
541 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
543 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
546 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
547 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
548 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
549 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
550 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
552 #define tw32(reg, val) tp->write32(tp, reg, val)
553 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
554 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
555 #define tr32(reg) tp->read32(tp, reg)
557 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
561 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
562 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
565 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
566 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
567 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
568 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
570 /* Always leave this as zero. */
571 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
573 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
574 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
576 /* Always leave this as zero. */
577 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
579 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
582 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
586 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
587 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
592 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
593 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
594 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
595 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
597 /* Always leave this as zero. */
598 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
600 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
601 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
603 /* Always leave this as zero. */
604 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
606 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
609 static void tg3_ape_lock_init(struct tg3
*tp
)
614 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
615 regbase
= TG3_APE_LOCK_GRANT
;
617 regbase
= TG3_APE_PER_LOCK_GRANT
;
619 /* Make sure the driver hasn't any stale locks. */
620 for (i
= 0; i
< 8; i
++) {
621 if (i
== TG3_APE_LOCK_GPIO
)
623 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
626 /* Clear the correct bit of the GPIO lock too. */
628 bit
= APE_LOCK_GRANT_DRIVER
;
630 bit
= 1 << tp
->pci_fn
;
632 tg3_ape_write32(tp
, regbase
+ 4 * TG3_APE_LOCK_GPIO
, bit
);
635 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
639 u32 status
, req
, gnt
, bit
;
641 if (!tg3_flag(tp
, ENABLE_APE
))
645 case TG3_APE_LOCK_GPIO
:
646 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
648 case TG3_APE_LOCK_GRC
:
649 case TG3_APE_LOCK_MEM
:
655 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
656 req
= TG3_APE_LOCK_REQ
;
657 gnt
= TG3_APE_LOCK_GRANT
;
659 req
= TG3_APE_PER_LOCK_REQ
;
660 gnt
= TG3_APE_PER_LOCK_GRANT
;
665 if (locknum
!= TG3_APE_LOCK_GPIO
|| !tp
->pci_fn
)
666 bit
= APE_LOCK_REQ_DRIVER
;
668 bit
= 1 << tp
->pci_fn
;
670 tg3_ape_write32(tp
, req
+ off
, bit
);
672 /* Wait for up to 1 millisecond to acquire lock. */
673 for (i
= 0; i
< 100; i
++) {
674 status
= tg3_ape_read32(tp
, gnt
+ off
);
681 /* Revoke the lock request. */
682 tg3_ape_write32(tp
, gnt
+ off
, bit
);
689 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
693 if (!tg3_flag(tp
, ENABLE_APE
))
697 case TG3_APE_LOCK_GPIO
:
698 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
700 case TG3_APE_LOCK_GRC
:
701 case TG3_APE_LOCK_MEM
:
707 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
708 gnt
= TG3_APE_LOCK_GRANT
;
710 gnt
= TG3_APE_PER_LOCK_GRANT
;
712 if (locknum
!= TG3_APE_LOCK_GPIO
|| !tp
->pci_fn
)
713 bit
= APE_LOCK_GRANT_DRIVER
;
715 bit
= 1 << tp
->pci_fn
;
717 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
720 static void tg3_disable_ints(struct tg3
*tp
)
724 tw32(TG3PCI_MISC_HOST_CTRL
,
725 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
726 for (i
= 0; i
< tp
->irq_max
; i
++)
727 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
730 static void tg3_enable_ints(struct tg3
*tp
)
737 tw32(TG3PCI_MISC_HOST_CTRL
,
738 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
740 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
741 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
742 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
744 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
745 if (tg3_flag(tp
, 1SHOT_MSI
))
746 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
748 tp
->coal_now
|= tnapi
->coal_now
;
751 /* Force an initial interrupt */
752 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
753 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
754 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
756 tw32(HOSTCC_MODE
, tp
->coal_now
);
758 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
761 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
763 struct tg3
*tp
= tnapi
->tp
;
764 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
765 unsigned int work_exists
= 0;
767 /* check for phy events */
768 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
769 if (sblk
->status
& SD_STATUS_LINK_CHG
)
772 /* check for RX/TX work to do */
773 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
774 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
781 * similar to tg3_enable_ints, but it accurately determines whether there
782 * is new work pending and can return without flushing the PIO write
783 * which reenables interrupts
785 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
787 struct tg3
*tp
= tnapi
->tp
;
789 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
792 /* When doing tagged status, this work check is unnecessary.
793 * The last_tag we write above tells the chip which piece of
794 * work we've completed.
796 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
797 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
798 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
801 static void tg3_switch_clocks(struct tg3
*tp
)
806 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
809 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
811 orig_clock_ctrl
= clock_ctrl
;
812 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
813 CLOCK_CTRL_CLKRUN_OENABLE
|
815 tp
->pci_clock_ctrl
= clock_ctrl
;
817 if (tg3_flag(tp
, 5705_PLUS
)) {
818 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
819 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
820 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
822 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
823 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
825 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
827 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
828 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
831 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
834 #define PHY_BUSY_LOOPS 5000
836 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
842 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
844 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
850 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
851 MI_COM_PHY_ADDR_MASK
);
852 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
853 MI_COM_REG_ADDR_MASK
);
854 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
856 tw32_f(MAC_MI_COM
, frame_val
);
858 loops
= PHY_BUSY_LOOPS
;
861 frame_val
= tr32(MAC_MI_COM
);
863 if ((frame_val
& MI_COM_BUSY
) == 0) {
865 frame_val
= tr32(MAC_MI_COM
);
873 *val
= frame_val
& MI_COM_DATA_MASK
;
877 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
878 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
885 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
891 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
892 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
895 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
897 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
901 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
902 MI_COM_PHY_ADDR_MASK
);
903 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
904 MI_COM_REG_ADDR_MASK
);
905 frame_val
|= (val
& MI_COM_DATA_MASK
);
906 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
908 tw32_f(MAC_MI_COM
, frame_val
);
910 loops
= PHY_BUSY_LOOPS
;
913 frame_val
= tr32(MAC_MI_COM
);
914 if ((frame_val
& MI_COM_BUSY
) == 0) {
916 frame_val
= tr32(MAC_MI_COM
);
926 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
927 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
934 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
938 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
942 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
946 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
947 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
951 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
957 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
961 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
965 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
969 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
970 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
974 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
980 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
984 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
986 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
991 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
995 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
997 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1002 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1006 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1007 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1008 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1010 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1015 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1017 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1018 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1020 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1023 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1024 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1025 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1026 MII_TG3_AUXCTL_ACTL_TX_6DB)
1028 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1029 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1030 MII_TG3_AUXCTL_ACTL_TX_6DB);
1032 static int tg3_bmcr_reset(struct tg3
*tp
)
1037 /* OK, reset it, and poll the BMCR_RESET bit until it
1038 * clears or we time out.
1040 phy_control
= BMCR_RESET
;
1041 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1047 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1051 if ((phy_control
& BMCR_RESET
) == 0) {
1063 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1065 struct tg3
*tp
= bp
->priv
;
1068 spin_lock_bh(&tp
->lock
);
1070 if (tg3_readphy(tp
, reg
, &val
))
1073 spin_unlock_bh(&tp
->lock
);
1078 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1080 struct tg3
*tp
= bp
->priv
;
1083 spin_lock_bh(&tp
->lock
);
1085 if (tg3_writephy(tp
, reg
, val
))
1088 spin_unlock_bh(&tp
->lock
);
1093 static int tg3_mdio_reset(struct mii_bus
*bp
)
1098 static void tg3_mdio_config_5785(struct tg3
*tp
)
1101 struct phy_device
*phydev
;
1103 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1104 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1105 case PHY_ID_BCM50610
:
1106 case PHY_ID_BCM50610M
:
1107 val
= MAC_PHYCFG2_50610_LED_MODES
;
1109 case PHY_ID_BCMAC131
:
1110 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1112 case PHY_ID_RTL8211C
:
1113 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1115 case PHY_ID_RTL8201E
:
1116 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1122 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1123 tw32(MAC_PHYCFG2
, val
);
1125 val
= tr32(MAC_PHYCFG1
);
1126 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1127 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1128 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1129 tw32(MAC_PHYCFG1
, val
);
1134 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1135 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1136 MAC_PHYCFG2_FMODE_MASK_MASK
|
1137 MAC_PHYCFG2_GMODE_MASK_MASK
|
1138 MAC_PHYCFG2_ACT_MASK_MASK
|
1139 MAC_PHYCFG2_QUAL_MASK_MASK
|
1140 MAC_PHYCFG2_INBAND_ENABLE
;
1142 tw32(MAC_PHYCFG2
, val
);
1144 val
= tr32(MAC_PHYCFG1
);
1145 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1146 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1147 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1148 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1149 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1150 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1151 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1153 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1154 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1155 tw32(MAC_PHYCFG1
, val
);
1157 val
= tr32(MAC_EXT_RGMII_MODE
);
1158 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1159 MAC_RGMII_MODE_RX_QUALITY
|
1160 MAC_RGMII_MODE_RX_ACTIVITY
|
1161 MAC_RGMII_MODE_RX_ENG_DET
|
1162 MAC_RGMII_MODE_TX_ENABLE
|
1163 MAC_RGMII_MODE_TX_LOWPWR
|
1164 MAC_RGMII_MODE_TX_RESET
);
1165 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1166 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1167 val
|= MAC_RGMII_MODE_RX_INT_B
|
1168 MAC_RGMII_MODE_RX_QUALITY
|
1169 MAC_RGMII_MODE_RX_ACTIVITY
|
1170 MAC_RGMII_MODE_RX_ENG_DET
;
1171 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1172 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1173 MAC_RGMII_MODE_TX_LOWPWR
|
1174 MAC_RGMII_MODE_TX_RESET
;
1176 tw32(MAC_EXT_RGMII_MODE
, val
);
1179 static void tg3_mdio_start(struct tg3
*tp
)
1181 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1182 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1185 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1186 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1187 tg3_mdio_config_5785(tp
);
1190 static int tg3_mdio_init(struct tg3
*tp
)
1194 struct phy_device
*phydev
;
1196 if (tg3_flag(tp
, 5717_PLUS
)) {
1199 tp
->phy_addr
= tp
->pci_fn
+ 1;
1201 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1202 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1204 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1205 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1209 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1213 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1216 tp
->mdio_bus
= mdiobus_alloc();
1217 if (tp
->mdio_bus
== NULL
)
1220 tp
->mdio_bus
->name
= "tg3 mdio bus";
1221 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1222 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1223 tp
->mdio_bus
->priv
= tp
;
1224 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1225 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1226 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1227 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1228 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1229 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1231 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1232 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1234 /* The bus registration will look for all the PHYs on the mdio bus.
1235 * Unfortunately, it does not ensure the PHY is powered up before
1236 * accessing the PHY ID registers. A chip reset is the
1237 * quickest way to bring the device back to an operational state..
1239 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1242 i
= mdiobus_register(tp
->mdio_bus
);
1244 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1245 mdiobus_free(tp
->mdio_bus
);
1249 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1251 if (!phydev
|| !phydev
->drv
) {
1252 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1253 mdiobus_unregister(tp
->mdio_bus
);
1254 mdiobus_free(tp
->mdio_bus
);
1258 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1259 case PHY_ID_BCM57780
:
1260 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1261 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1263 case PHY_ID_BCM50610
:
1264 case PHY_ID_BCM50610M
:
1265 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1266 PHY_BRCM_RX_REFCLK_UNUSED
|
1267 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1268 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1269 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1270 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1271 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1272 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1273 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1274 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1276 case PHY_ID_RTL8211C
:
1277 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1279 case PHY_ID_RTL8201E
:
1280 case PHY_ID_BCMAC131
:
1281 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1282 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1283 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1287 tg3_flag_set(tp
, MDIOBUS_INITED
);
1289 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1290 tg3_mdio_config_5785(tp
);
1295 static void tg3_mdio_fini(struct tg3
*tp
)
1297 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1298 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1299 mdiobus_unregister(tp
->mdio_bus
);
1300 mdiobus_free(tp
->mdio_bus
);
1304 /* tp->lock is held. */
1305 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1309 val
= tr32(GRC_RX_CPU_EVENT
);
1310 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1311 tw32_f(GRC_RX_CPU_EVENT
, val
);
1313 tp
->last_event_jiffies
= jiffies
;
1316 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1318 /* tp->lock is held. */
1319 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1322 unsigned int delay_cnt
;
1325 /* If enough time has passed, no wait is necessary. */
1326 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1327 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1329 if (time_remain
< 0)
1332 /* Check if we can shorten the wait time. */
1333 delay_cnt
= jiffies_to_usecs(time_remain
);
1334 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1335 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1336 delay_cnt
= (delay_cnt
>> 3) + 1;
1338 for (i
= 0; i
< delay_cnt
; i
++) {
1339 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1345 /* tp->lock is held. */
1346 static void tg3_ump_link_report(struct tg3
*tp
)
1351 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1354 tg3_wait_for_event_ack(tp
);
1356 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1358 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1361 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1363 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1364 val
|= (reg
& 0xffff);
1365 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1368 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1370 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1371 val
|= (reg
& 0xffff);
1372 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1375 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1376 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1378 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1379 val
|= (reg
& 0xffff);
1381 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1383 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1387 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1389 tg3_generate_fw_event(tp
);
1392 static void tg3_link_report(struct tg3
*tp
)
1394 if (!netif_carrier_ok(tp
->dev
)) {
1395 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1396 tg3_ump_link_report(tp
);
1397 } else if (netif_msg_link(tp
)) {
1398 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1399 (tp
->link_config
.active_speed
== SPEED_1000
?
1401 (tp
->link_config
.active_speed
== SPEED_100
?
1403 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1406 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1407 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1409 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1412 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1413 netdev_info(tp
->dev
, "EEE is %s\n",
1414 tp
->setlpicnt
? "enabled" : "disabled");
1416 tg3_ump_link_report(tp
);
1420 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1424 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1425 miireg
= ADVERTISE_PAUSE_CAP
;
1426 else if (flow_ctrl
& FLOW_CTRL_TX
)
1427 miireg
= ADVERTISE_PAUSE_ASYM
;
1428 else if (flow_ctrl
& FLOW_CTRL_RX
)
1429 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1436 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1440 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1441 miireg
= ADVERTISE_1000XPAUSE
;
1442 else if (flow_ctrl
& FLOW_CTRL_TX
)
1443 miireg
= ADVERTISE_1000XPSE_ASYM
;
1444 else if (flow_ctrl
& FLOW_CTRL_RX
)
1445 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1452 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1456 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1457 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1458 if (rmtadv
& LPA_1000XPAUSE
)
1459 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1460 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1463 if (rmtadv
& LPA_1000XPAUSE
)
1464 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1466 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1467 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1474 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1478 u32 old_rx_mode
= tp
->rx_mode
;
1479 u32 old_tx_mode
= tp
->tx_mode
;
1481 if (tg3_flag(tp
, USE_PHYLIB
))
1482 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1484 autoneg
= tp
->link_config
.autoneg
;
1486 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1487 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1488 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1490 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1492 flowctrl
= tp
->link_config
.flowctrl
;
1494 tp
->link_config
.active_flowctrl
= flowctrl
;
1496 if (flowctrl
& FLOW_CTRL_RX
)
1497 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1499 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1501 if (old_rx_mode
!= tp
->rx_mode
)
1502 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1504 if (flowctrl
& FLOW_CTRL_TX
)
1505 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1507 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1509 if (old_tx_mode
!= tp
->tx_mode
)
1510 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1513 static void tg3_adjust_link(struct net_device
*dev
)
1515 u8 oldflowctrl
, linkmesg
= 0;
1516 u32 mac_mode
, lcl_adv
, rmt_adv
;
1517 struct tg3
*tp
= netdev_priv(dev
);
1518 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1520 spin_lock_bh(&tp
->lock
);
1522 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1523 MAC_MODE_HALF_DUPLEX
);
1525 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1531 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1532 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1533 else if (phydev
->speed
== SPEED_1000
||
1534 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1535 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1537 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1539 if (phydev
->duplex
== DUPLEX_HALF
)
1540 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1542 lcl_adv
= tg3_advert_flowctrl_1000T(
1543 tp
->link_config
.flowctrl
);
1546 rmt_adv
= LPA_PAUSE_CAP
;
1547 if (phydev
->asym_pause
)
1548 rmt_adv
|= LPA_PAUSE_ASYM
;
1551 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1553 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1555 if (mac_mode
!= tp
->mac_mode
) {
1556 tp
->mac_mode
= mac_mode
;
1557 tw32_f(MAC_MODE
, tp
->mac_mode
);
1561 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1562 if (phydev
->speed
== SPEED_10
)
1564 MAC_MI_STAT_10MBPS_MODE
|
1565 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1567 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1570 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1571 tw32(MAC_TX_LENGTHS
,
1572 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1573 (6 << TX_LENGTHS_IPG_SHIFT
) |
1574 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1576 tw32(MAC_TX_LENGTHS
,
1577 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1578 (6 << TX_LENGTHS_IPG_SHIFT
) |
1579 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1581 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1582 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1583 phydev
->speed
!= tp
->link_config
.active_speed
||
1584 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1585 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1588 tp
->link_config
.active_speed
= phydev
->speed
;
1589 tp
->link_config
.active_duplex
= phydev
->duplex
;
1591 spin_unlock_bh(&tp
->lock
);
1594 tg3_link_report(tp
);
1597 static int tg3_phy_init(struct tg3
*tp
)
1599 struct phy_device
*phydev
;
1601 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1604 /* Bring the PHY back to a known state. */
1607 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1609 /* Attach the MAC to the PHY. */
1610 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1611 phydev
->dev_flags
, phydev
->interface
);
1612 if (IS_ERR(phydev
)) {
1613 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1614 return PTR_ERR(phydev
);
1617 /* Mask with MAC supported features. */
1618 switch (phydev
->interface
) {
1619 case PHY_INTERFACE_MODE_GMII
:
1620 case PHY_INTERFACE_MODE_RGMII
:
1621 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1622 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1624 SUPPORTED_Asym_Pause
);
1628 case PHY_INTERFACE_MODE_MII
:
1629 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1631 SUPPORTED_Asym_Pause
);
1634 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1638 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1640 phydev
->advertising
= phydev
->supported
;
1645 static void tg3_phy_start(struct tg3
*tp
)
1647 struct phy_device
*phydev
;
1649 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1652 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1654 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1655 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1656 phydev
->speed
= tp
->link_config
.orig_speed
;
1657 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1658 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1659 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1664 phy_start_aneg(phydev
);
1667 static void tg3_phy_stop(struct tg3
*tp
)
1669 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1672 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1675 static void tg3_phy_fini(struct tg3
*tp
)
1677 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1678 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1679 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1683 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1687 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1690 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1691 phytest
| MII_TG3_FET_SHADOW_EN
);
1692 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1694 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1696 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1697 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1699 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1703 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1707 if (!tg3_flag(tp
, 5705_PLUS
) ||
1708 (tg3_flag(tp
, 5717_PLUS
) &&
1709 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1712 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1713 tg3_phy_fet_toggle_apd(tp
, enable
);
1717 reg
= MII_TG3_MISC_SHDW_WREN
|
1718 MII_TG3_MISC_SHDW_SCR5_SEL
|
1719 MII_TG3_MISC_SHDW_SCR5_LPED
|
1720 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1721 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1722 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1723 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1724 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1726 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1729 reg
= MII_TG3_MISC_SHDW_WREN
|
1730 MII_TG3_MISC_SHDW_APD_SEL
|
1731 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1733 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1735 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1738 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1742 if (!tg3_flag(tp
, 5705_PLUS
) ||
1743 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1746 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1749 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1750 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1752 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1753 ephy
| MII_TG3_FET_SHADOW_EN
);
1754 if (!tg3_readphy(tp
, reg
, &phy
)) {
1756 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1758 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1759 tg3_writephy(tp
, reg
, phy
);
1761 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1766 ret
= tg3_phy_auxctl_read(tp
,
1767 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
1770 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1772 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1773 tg3_phy_auxctl_write(tp
,
1774 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
1779 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1784 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1787 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
1789 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
1790 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
1793 static void tg3_phy_apply_otp(struct tg3
*tp
)
1802 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
1805 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1806 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1807 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1809 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1810 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1811 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1813 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1814 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1815 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1817 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1818 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1820 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1821 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1823 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1824 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1825 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1827 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1830 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1834 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1839 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1840 current_link_up
== 1 &&
1841 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1842 (tp
->link_config
.active_speed
== SPEED_100
||
1843 tp
->link_config
.active_speed
== SPEED_1000
)) {
1846 if (tp
->link_config
.active_speed
== SPEED_1000
)
1847 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1849 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1851 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1853 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1854 TG3_CL45_D7_EEERES_STAT
, &val
);
1856 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
1857 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
1861 if (!tp
->setlpicnt
) {
1862 if (current_link_up
== 1 &&
1863 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1864 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
1865 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1868 val
= tr32(TG3_CPMU_EEE_MODE
);
1869 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1873 static void tg3_phy_eee_enable(struct tg3
*tp
)
1877 if (tp
->link_config
.active_speed
== SPEED_1000
&&
1878 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
1879 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
1880 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
1881 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1882 val
= MII_TG3_DSP_TAP26_ALNOKO
|
1883 MII_TG3_DSP_TAP26_RMRXSTO
;
1884 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
1885 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1888 val
= tr32(TG3_CPMU_EEE_MODE
);
1889 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
1892 static int tg3_wait_macro_done(struct tg3
*tp
)
1899 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1900 if ((tmp32
& 0x1000) == 0)
1910 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1912 static const u32 test_pat
[4][6] = {
1913 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1914 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1915 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1916 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1920 for (chan
= 0; chan
< 4; chan
++) {
1923 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1924 (chan
* 0x2000) | 0x0200);
1925 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1927 for (i
= 0; i
< 6; i
++)
1928 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1931 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1932 if (tg3_wait_macro_done(tp
)) {
1937 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1938 (chan
* 0x2000) | 0x0200);
1939 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1940 if (tg3_wait_macro_done(tp
)) {
1945 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1946 if (tg3_wait_macro_done(tp
)) {
1951 for (i
= 0; i
< 6; i
+= 2) {
1954 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1955 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1956 tg3_wait_macro_done(tp
)) {
1962 if (low
!= test_pat
[chan
][i
] ||
1963 high
!= test_pat
[chan
][i
+1]) {
1964 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1965 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1966 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1976 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1980 for (chan
= 0; chan
< 4; chan
++) {
1983 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1984 (chan
* 0x2000) | 0x0200);
1985 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1986 for (i
= 0; i
< 6; i
++)
1987 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1988 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1989 if (tg3_wait_macro_done(tp
))
1996 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1998 u32 reg32
, phy9_orig
;
1999 int retries
, do_phy_reset
, err
;
2005 err
= tg3_bmcr_reset(tp
);
2011 /* Disable transmitter and interrupt. */
2012 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2016 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2018 /* Set full-duplex, 1000 mbps. */
2019 tg3_writephy(tp
, MII_BMCR
,
2020 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2022 /* Set to master mode. */
2023 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2026 tg3_writephy(tp
, MII_CTRL1000
,
2027 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2029 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2033 /* Block the PHY control access. */
2034 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2036 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2039 } while (--retries
);
2041 err
= tg3_phy_reset_chanpat(tp
);
2045 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2047 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2048 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2050 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2052 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2054 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2056 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2063 /* This will reset the tigon3 PHY if there is no valid
2064 * link unless the FORCE argument is non-zero.
2066 static int tg3_phy_reset(struct tg3
*tp
)
2071 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2072 val
= tr32(GRC_MISC_CFG
);
2073 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2076 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2077 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2081 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2082 netif_carrier_off(tp
->dev
);
2083 tg3_link_report(tp
);
2086 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2087 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2088 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2089 err
= tg3_phy_reset_5703_4_5(tp
);
2096 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2097 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2098 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2099 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2101 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2104 err
= tg3_bmcr_reset(tp
);
2108 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2109 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2110 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2112 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2115 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2116 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2117 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2118 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2119 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2120 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2126 if (tg3_flag(tp
, 5717_PLUS
) &&
2127 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2130 tg3_phy_apply_otp(tp
);
2132 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2133 tg3_phy_toggle_apd(tp
, true);
2135 tg3_phy_toggle_apd(tp
, false);
2138 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2139 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2140 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2141 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2142 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2145 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2146 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2147 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2150 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2151 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2152 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2153 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2154 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2155 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2157 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2158 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2159 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2160 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2161 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2162 tg3_writephy(tp
, MII_TG3_TEST1
,
2163 MII_TG3_TEST1_TRIM_EN
| 0x4);
2165 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2167 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2171 /* Set Extended packet length bit (bit 14) on all chips that */
2172 /* support jumbo frames */
2173 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2174 /* Cannot do read-modify-write on 5401 */
2175 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2176 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2177 /* Set bit 14 with read-modify-write to preserve other bits */
2178 err
= tg3_phy_auxctl_read(tp
,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2181 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2182 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2185 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2186 * jumbo frames transmission.
2188 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2189 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2190 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2191 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2194 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2195 /* adjust output voltage */
2196 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2199 tg3_phy_toggle_automdix(tp
, 1);
2200 tg3_phy_set_wirespeed(tp
);
2204 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2205 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2206 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2207 TG3_GPIO_MSG_NEED_VAUX)
2208 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2209 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2212 (TG3_GPIO_MSG_DRVR_PRES << 12))
2214 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2215 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2218 (TG3_GPIO_MSG_NEED_VAUX << 12))
2220 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2224 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2225 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2226 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2228 status
= tr32(TG3_CPMU_DRV_STATUS
);
2230 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2231 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2232 status
|= (newstat
<< shift
);
2234 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2235 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2236 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2238 tw32(TG3_CPMU_DRV_STATUS
, status
);
2240 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2243 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2245 if (!tg3_flag(tp
, IS_NIC
))
2248 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2249 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2250 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2251 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2254 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2256 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2257 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2259 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2261 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2262 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2268 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2272 if (!tg3_flag(tp
, IS_NIC
) ||
2273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2274 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2277 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2279 tw32_wait_f(GRC_LOCAL_CTRL
,
2280 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2281 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2283 tw32_wait_f(GRC_LOCAL_CTRL
,
2285 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2287 tw32_wait_f(GRC_LOCAL_CTRL
,
2288 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2289 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2292 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2294 if (!tg3_flag(tp
, IS_NIC
))
2297 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2298 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2299 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2300 (GRC_LCLCTRL_GPIO_OE0
|
2301 GRC_LCLCTRL_GPIO_OE1
|
2302 GRC_LCLCTRL_GPIO_OE2
|
2303 GRC_LCLCTRL_GPIO_OUTPUT0
|
2304 GRC_LCLCTRL_GPIO_OUTPUT1
),
2305 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2306 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2307 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2308 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2309 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2310 GRC_LCLCTRL_GPIO_OE1
|
2311 GRC_LCLCTRL_GPIO_OE2
|
2312 GRC_LCLCTRL_GPIO_OUTPUT0
|
2313 GRC_LCLCTRL_GPIO_OUTPUT1
|
2315 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2316 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2318 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2319 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2320 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2322 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2323 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2324 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2327 u32 grc_local_ctrl
= 0;
2329 /* Workaround to prevent overdrawing Amps. */
2330 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2331 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2332 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2334 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2337 /* On 5753 and variants, GPIO2 cannot be used. */
2338 no_gpio2
= tp
->nic_sram_data_cfg
&
2339 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2341 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2342 GRC_LCLCTRL_GPIO_OE1
|
2343 GRC_LCLCTRL_GPIO_OE2
|
2344 GRC_LCLCTRL_GPIO_OUTPUT1
|
2345 GRC_LCLCTRL_GPIO_OUTPUT2
;
2347 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2348 GRC_LCLCTRL_GPIO_OUTPUT2
);
2350 tw32_wait_f(GRC_LOCAL_CTRL
,
2351 tp
->grc_local_ctrl
| grc_local_ctrl
,
2352 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2354 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2356 tw32_wait_f(GRC_LOCAL_CTRL
,
2357 tp
->grc_local_ctrl
| grc_local_ctrl
,
2358 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2361 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2362 tw32_wait_f(GRC_LOCAL_CTRL
,
2363 tp
->grc_local_ctrl
| grc_local_ctrl
,
2364 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2369 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2373 /* Serialize power state transitions */
2374 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2377 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2378 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2380 msg
= tg3_set_function_status(tp
, msg
);
2382 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2385 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2386 tg3_pwrsrc_switch_to_vaux(tp
);
2388 tg3_pwrsrc_die_with_vmain(tp
);
2391 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2394 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2396 bool need_vaux
= false;
2398 /* The GPIOs do something completely different on 57765. */
2399 if (!tg3_flag(tp
, IS_NIC
) ||
2400 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2403 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2404 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2405 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2406 tg3_frob_aux_power_5717(tp
, include_wol
?
2407 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2411 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2412 struct net_device
*dev_peer
;
2414 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2416 /* remove_one() may have been run on the peer. */
2418 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2420 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2423 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2424 tg3_flag(tp_peer
, ENABLE_ASF
))
2429 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2430 tg3_flag(tp
, ENABLE_ASF
))
2434 tg3_pwrsrc_switch_to_vaux(tp
);
2436 tg3_pwrsrc_die_with_vmain(tp
);
2439 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2441 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2443 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2444 if (speed
!= SPEED_10
)
2446 } else if (speed
== SPEED_10
)
2452 static int tg3_setup_phy(struct tg3
*, int);
2454 #define RESET_KIND_SHUTDOWN 0
2455 #define RESET_KIND_INIT 1
2456 #define RESET_KIND_SUSPEND 2
2458 static void tg3_write_sig_post_reset(struct tg3
*, int);
2459 static int tg3_halt_cpu(struct tg3
*, u32
);
2461 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2465 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2466 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2467 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2468 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2471 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2472 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2473 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2478 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2480 val
= tr32(GRC_MISC_CFG
);
2481 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2484 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2486 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2489 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2490 tg3_writephy(tp
, MII_BMCR
,
2491 BMCR_ANENABLE
| BMCR_ANRESTART
);
2493 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2494 phytest
| MII_TG3_FET_SHADOW_EN
);
2495 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2496 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2498 MII_TG3_FET_SHDW_AUXMODE4
,
2501 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2504 } else if (do_low_power
) {
2505 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2506 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2508 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2509 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2510 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2511 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2514 /* The PHY should not be powered down on some chips because
2517 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2519 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2520 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2523 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2524 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2525 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2526 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2527 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2528 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2531 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2534 /* tp->lock is held. */
2535 static int tg3_nvram_lock(struct tg3
*tp
)
2537 if (tg3_flag(tp
, NVRAM
)) {
2540 if (tp
->nvram_lock_cnt
== 0) {
2541 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2542 for (i
= 0; i
< 8000; i
++) {
2543 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2548 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2552 tp
->nvram_lock_cnt
++;
2557 /* tp->lock is held. */
2558 static void tg3_nvram_unlock(struct tg3
*tp
)
2560 if (tg3_flag(tp
, NVRAM
)) {
2561 if (tp
->nvram_lock_cnt
> 0)
2562 tp
->nvram_lock_cnt
--;
2563 if (tp
->nvram_lock_cnt
== 0)
2564 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2568 /* tp->lock is held. */
2569 static void tg3_enable_nvram_access(struct tg3
*tp
)
2571 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2572 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2574 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2578 /* tp->lock is held. */
2579 static void tg3_disable_nvram_access(struct tg3
*tp
)
2581 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2582 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2584 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2588 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2589 u32 offset
, u32
*val
)
2594 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2597 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2598 EEPROM_ADDR_DEVID_MASK
|
2600 tw32(GRC_EEPROM_ADDR
,
2602 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2603 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2604 EEPROM_ADDR_ADDR_MASK
) |
2605 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2607 for (i
= 0; i
< 1000; i
++) {
2608 tmp
= tr32(GRC_EEPROM_ADDR
);
2610 if (tmp
& EEPROM_ADDR_COMPLETE
)
2614 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2617 tmp
= tr32(GRC_EEPROM_DATA
);
2620 * The data will always be opposite the native endian
2621 * format. Perform a blind byteswap to compensate.
2628 #define NVRAM_CMD_TIMEOUT 10000
2630 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2634 tw32(NVRAM_CMD
, nvram_cmd
);
2635 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2637 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2643 if (i
== NVRAM_CMD_TIMEOUT
)
2649 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2651 if (tg3_flag(tp
, NVRAM
) &&
2652 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2653 tg3_flag(tp
, FLASH
) &&
2654 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2655 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2657 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2658 ATMEL_AT45DB0X1B_PAGE_POS
) +
2659 (addr
% tp
->nvram_pagesize
);
2664 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2666 if (tg3_flag(tp
, NVRAM
) &&
2667 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2668 tg3_flag(tp
, FLASH
) &&
2669 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2670 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2672 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2673 tp
->nvram_pagesize
) +
2674 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2679 /* NOTE: Data read in from NVRAM is byteswapped according to
2680 * the byteswapping settings for all other register accesses.
2681 * tg3 devices are BE devices, so on a BE machine, the data
2682 * returned will be exactly as it is seen in NVRAM. On a LE
2683 * machine, the 32-bit value will be byteswapped.
2685 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2689 if (!tg3_flag(tp
, NVRAM
))
2690 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2692 offset
= tg3_nvram_phys_addr(tp
, offset
);
2694 if (offset
> NVRAM_ADDR_MSK
)
2697 ret
= tg3_nvram_lock(tp
);
2701 tg3_enable_nvram_access(tp
);
2703 tw32(NVRAM_ADDR
, offset
);
2704 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2705 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2708 *val
= tr32(NVRAM_RDDATA
);
2710 tg3_disable_nvram_access(tp
);
2712 tg3_nvram_unlock(tp
);
2717 /* Ensures NVRAM data is in bytestream format. */
2718 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2721 int res
= tg3_nvram_read(tp
, offset
, &v
);
2723 *val
= cpu_to_be32(v
);
2727 /* tp->lock is held. */
2728 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2730 u32 addr_high
, addr_low
;
2733 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2734 tp
->dev
->dev_addr
[1]);
2735 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2736 (tp
->dev
->dev_addr
[3] << 16) |
2737 (tp
->dev
->dev_addr
[4] << 8) |
2738 (tp
->dev
->dev_addr
[5] << 0));
2739 for (i
= 0; i
< 4; i
++) {
2740 if (i
== 1 && skip_mac_1
)
2742 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2743 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2746 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2747 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2748 for (i
= 0; i
< 12; i
++) {
2749 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2750 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2754 addr_high
= (tp
->dev
->dev_addr
[0] +
2755 tp
->dev
->dev_addr
[1] +
2756 tp
->dev
->dev_addr
[2] +
2757 tp
->dev
->dev_addr
[3] +
2758 tp
->dev
->dev_addr
[4] +
2759 tp
->dev
->dev_addr
[5]) &
2760 TX_BACKOFF_SEED_MASK
;
2761 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2764 static void tg3_enable_register_access(struct tg3
*tp
)
2767 * Make sure register accesses (indirect or otherwise) will function
2770 pci_write_config_dword(tp
->pdev
,
2771 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2774 static int tg3_power_up(struct tg3
*tp
)
2778 tg3_enable_register_access(tp
);
2780 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
2782 /* Switch out of Vaux if it is a NIC */
2783 tg3_pwrsrc_switch_to_vmain(tp
);
2785 netdev_err(tp
->dev
, "Transition to D0 failed\n");
2791 static int tg3_power_down_prepare(struct tg3
*tp
)
2794 bool device_should_wake
, do_low_power
;
2796 tg3_enable_register_access(tp
);
2798 /* Restore the CLKREQ setting. */
2799 if (tg3_flag(tp
, CLKREQ_BUG
)) {
2802 pci_read_config_word(tp
->pdev
,
2803 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
2805 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2806 pci_write_config_word(tp
->pdev
,
2807 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
2811 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2812 tw32(TG3PCI_MISC_HOST_CTRL
,
2813 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2815 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2816 tg3_flag(tp
, WOL_ENABLE
);
2818 if (tg3_flag(tp
, USE_PHYLIB
)) {
2819 do_low_power
= false;
2820 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2821 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2822 struct phy_device
*phydev
;
2823 u32 phyid
, advertising
;
2825 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2827 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2829 tp
->link_config
.orig_speed
= phydev
->speed
;
2830 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2831 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2832 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2834 advertising
= ADVERTISED_TP
|
2836 ADVERTISED_Autoneg
|
2837 ADVERTISED_10baseT_Half
;
2839 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
2840 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2842 ADVERTISED_100baseT_Half
|
2843 ADVERTISED_100baseT_Full
|
2844 ADVERTISED_10baseT_Full
;
2846 advertising
|= ADVERTISED_10baseT_Full
;
2849 phydev
->advertising
= advertising
;
2851 phy_start_aneg(phydev
);
2853 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2854 if (phyid
!= PHY_ID_BCMAC131
) {
2855 phyid
&= PHY_BCM_OUI_MASK
;
2856 if (phyid
== PHY_BCM_OUI_1
||
2857 phyid
== PHY_BCM_OUI_2
||
2858 phyid
== PHY_BCM_OUI_3
)
2859 do_low_power
= true;
2863 do_low_power
= true;
2865 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2866 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2867 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2868 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2869 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2872 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2873 tp
->link_config
.speed
= SPEED_10
;
2874 tp
->link_config
.duplex
= DUPLEX_HALF
;
2875 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2876 tg3_setup_phy(tp
, 0);
2880 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2883 val
= tr32(GRC_VCPU_EXT_CTRL
);
2884 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2885 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
2889 for (i
= 0; i
< 200; i
++) {
2890 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2891 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2896 if (tg3_flag(tp
, WOL_CAP
))
2897 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2898 WOL_DRV_STATE_SHUTDOWN
|
2902 if (device_should_wake
) {
2905 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2907 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
2908 tg3_phy_auxctl_write(tp
,
2909 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
2910 MII_TG3_AUXCTL_PCTL_WOL_EN
|
2911 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2912 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
2916 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2917 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2919 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2921 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2922 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2924 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
2925 SPEED_100
: SPEED_10
;
2926 if (tg3_5700_link_polarity(tp
, speed
))
2927 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2929 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2932 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2935 if (!tg3_flag(tp
, 5750_PLUS
))
2936 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2938 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2939 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
2940 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
2941 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2943 if (tg3_flag(tp
, ENABLE_APE
))
2944 mac_mode
|= MAC_MODE_APE_TX_EN
|
2945 MAC_MODE_APE_RX_EN
|
2946 MAC_MODE_TDE_ENABLE
;
2948 tw32_f(MAC_MODE
, mac_mode
);
2951 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2955 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
2956 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2957 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2960 base_val
= tp
->pci_clock_ctrl
;
2961 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2962 CLOCK_CTRL_TXCLK_DISABLE
);
2964 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2965 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2966 } else if (tg3_flag(tp
, 5780_CLASS
) ||
2967 tg3_flag(tp
, CPMU_PRESENT
) ||
2968 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2970 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
2971 u32 newbits1
, newbits2
;
2973 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2974 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2975 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2976 CLOCK_CTRL_TXCLK_DISABLE
|
2978 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2979 } else if (tg3_flag(tp
, 5705_PLUS
)) {
2980 newbits1
= CLOCK_CTRL_625_CORE
;
2981 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2983 newbits1
= CLOCK_CTRL_ALTCLK
;
2984 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2987 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2990 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2993 if (!tg3_flag(tp
, 5705_PLUS
)) {
2996 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2997 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2998 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2999 CLOCK_CTRL_TXCLK_DISABLE
|
3000 CLOCK_CTRL_44MHZ_CORE
);
3002 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3005 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3006 tp
->pci_clock_ctrl
| newbits3
, 40);
3010 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3011 tg3_power_down_phy(tp
, do_low_power
);
3013 tg3_frob_aux_power(tp
, true);
3015 /* Workaround for unstable PLL clock */
3016 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3017 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3018 u32 val
= tr32(0x7d00);
3020 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3022 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3025 err
= tg3_nvram_lock(tp
);
3026 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3028 tg3_nvram_unlock(tp
);
3032 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3037 static void tg3_power_down(struct tg3
*tp
)
3039 tg3_power_down_prepare(tp
);
3041 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3042 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3045 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3047 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3048 case MII_TG3_AUX_STAT_10HALF
:
3050 *duplex
= DUPLEX_HALF
;
3053 case MII_TG3_AUX_STAT_10FULL
:
3055 *duplex
= DUPLEX_FULL
;
3058 case MII_TG3_AUX_STAT_100HALF
:
3060 *duplex
= DUPLEX_HALF
;
3063 case MII_TG3_AUX_STAT_100FULL
:
3065 *duplex
= DUPLEX_FULL
;
3068 case MII_TG3_AUX_STAT_1000HALF
:
3069 *speed
= SPEED_1000
;
3070 *duplex
= DUPLEX_HALF
;
3073 case MII_TG3_AUX_STAT_1000FULL
:
3074 *speed
= SPEED_1000
;
3075 *duplex
= DUPLEX_FULL
;
3079 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3080 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3082 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3086 *speed
= SPEED_INVALID
;
3087 *duplex
= DUPLEX_INVALID
;
3092 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3097 new_adv
= ADVERTISE_CSMA
;
3098 if (advertise
& ADVERTISED_10baseT_Half
)
3099 new_adv
|= ADVERTISE_10HALF
;
3100 if (advertise
& ADVERTISED_10baseT_Full
)
3101 new_adv
|= ADVERTISE_10FULL
;
3102 if (advertise
& ADVERTISED_100baseT_Half
)
3103 new_adv
|= ADVERTISE_100HALF
;
3104 if (advertise
& ADVERTISED_100baseT_Full
)
3105 new_adv
|= ADVERTISE_100FULL
;
3107 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
3109 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3113 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3117 if (advertise
& ADVERTISED_1000baseT_Half
)
3118 new_adv
|= ADVERTISE_1000HALF
;
3119 if (advertise
& ADVERTISED_1000baseT_Full
)
3120 new_adv
|= ADVERTISE_1000FULL
;
3122 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3123 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3124 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3126 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3130 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3133 tw32(TG3_CPMU_EEE_MODE
,
3134 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3136 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3141 /* Advertise 100-BaseTX EEE ability */
3142 if (advertise
& ADVERTISED_100baseT_Full
)
3143 val
|= MDIO_AN_EEE_ADV_100TX
;
3144 /* Advertise 1000-BaseT EEE ability */
3145 if (advertise
& ADVERTISED_1000baseT_Full
)
3146 val
|= MDIO_AN_EEE_ADV_1000T
;
3147 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3151 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3153 case ASIC_REV_57765
:
3155 /* If we advertised any eee advertisements above... */
3157 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3158 MII_TG3_DSP_TAP26_RMRXSTO
|
3159 MII_TG3_DSP_TAP26_OPCSINPT
;
3160 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3163 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3164 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3165 MII_TG3_DSP_CH34TP2_HIBW01
);
3168 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3177 static void tg3_phy_copper_begin(struct tg3
*tp
)
3182 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3183 new_adv
= ADVERTISED_10baseT_Half
|
3184 ADVERTISED_10baseT_Full
;
3185 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3186 new_adv
|= ADVERTISED_100baseT_Half
|
3187 ADVERTISED_100baseT_Full
;
3189 tg3_phy_autoneg_cfg(tp
, new_adv
,
3190 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3191 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3192 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3193 tp
->link_config
.advertising
&=
3194 ~(ADVERTISED_1000baseT_Half
|
3195 ADVERTISED_1000baseT_Full
);
3197 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3198 tp
->link_config
.flowctrl
);
3200 /* Asking for a specific link mode. */
3201 if (tp
->link_config
.speed
== SPEED_1000
) {
3202 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3203 new_adv
= ADVERTISED_1000baseT_Full
;
3205 new_adv
= ADVERTISED_1000baseT_Half
;
3206 } else if (tp
->link_config
.speed
== SPEED_100
) {
3207 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3208 new_adv
= ADVERTISED_100baseT_Full
;
3210 new_adv
= ADVERTISED_100baseT_Half
;
3212 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3213 new_adv
= ADVERTISED_10baseT_Full
;
3215 new_adv
= ADVERTISED_10baseT_Half
;
3218 tg3_phy_autoneg_cfg(tp
, new_adv
,
3219 tp
->link_config
.flowctrl
);
3222 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3223 tp
->link_config
.speed
!= SPEED_INVALID
) {
3224 u32 bmcr
, orig_bmcr
;
3226 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3227 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3230 switch (tp
->link_config
.speed
) {
3236 bmcr
|= BMCR_SPEED100
;
3240 bmcr
|= BMCR_SPEED1000
;
3244 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3245 bmcr
|= BMCR_FULLDPLX
;
3247 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3248 (bmcr
!= orig_bmcr
)) {
3249 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3250 for (i
= 0; i
< 1500; i
++) {
3254 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3255 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3257 if (!(tmp
& BMSR_LSTATUS
)) {
3262 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3266 tg3_writephy(tp
, MII_BMCR
,
3267 BMCR_ANENABLE
| BMCR_ANRESTART
);
3271 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3275 /* Turn off tap power management. */
3276 /* Set Extended packet length bit */
3277 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3279 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3280 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3281 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3282 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3283 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3290 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3292 u32 adv_reg
, all_mask
= 0;
3294 if (mask
& ADVERTISED_10baseT_Half
)
3295 all_mask
|= ADVERTISE_10HALF
;
3296 if (mask
& ADVERTISED_10baseT_Full
)
3297 all_mask
|= ADVERTISE_10FULL
;
3298 if (mask
& ADVERTISED_100baseT_Half
)
3299 all_mask
|= ADVERTISE_100HALF
;
3300 if (mask
& ADVERTISED_100baseT_Full
)
3301 all_mask
|= ADVERTISE_100FULL
;
3303 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3306 if ((adv_reg
& all_mask
) != all_mask
)
3308 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3312 if (mask
& ADVERTISED_1000baseT_Half
)
3313 all_mask
|= ADVERTISE_1000HALF
;
3314 if (mask
& ADVERTISED_1000baseT_Full
)
3315 all_mask
|= ADVERTISE_1000FULL
;
3317 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3320 if ((tg3_ctrl
& all_mask
) != all_mask
)
3326 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3330 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3333 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3334 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3336 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3337 if (curadv
!= reqadv
)
3340 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3341 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3343 /* Reprogram the advertisement register, even if it
3344 * does not affect the current link. If the link
3345 * gets renegotiated in the future, we can save an
3346 * additional renegotiation cycle by advertising
3347 * it correctly in the first place.
3349 if (curadv
!= reqadv
) {
3350 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3351 ADVERTISE_PAUSE_ASYM
);
3352 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3359 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3361 int current_link_up
;
3363 u32 lcl_adv
, rmt_adv
;
3371 (MAC_STATUS_SYNC_CHANGED
|
3372 MAC_STATUS_CFG_CHANGED
|
3373 MAC_STATUS_MI_COMPLETION
|
3374 MAC_STATUS_LNKSTATE_CHANGED
));
3377 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3379 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3383 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3385 /* Some third-party PHYs need to be reset on link going
3388 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3389 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3390 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3391 netif_carrier_ok(tp
->dev
)) {
3392 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3393 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3394 !(bmsr
& BMSR_LSTATUS
))
3400 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3401 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3402 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3403 !tg3_flag(tp
, INIT_COMPLETE
))
3406 if (!(bmsr
& BMSR_LSTATUS
)) {
3407 err
= tg3_init_5401phy_dsp(tp
);
3411 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3412 for (i
= 0; i
< 1000; i
++) {
3414 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3415 (bmsr
& BMSR_LSTATUS
)) {
3421 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3422 TG3_PHY_REV_BCM5401_B0
&&
3423 !(bmsr
& BMSR_LSTATUS
) &&
3424 tp
->link_config
.active_speed
== SPEED_1000
) {
3425 err
= tg3_phy_reset(tp
);
3427 err
= tg3_init_5401phy_dsp(tp
);
3432 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3433 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3434 /* 5701 {A0,B0} CRC bug workaround */
3435 tg3_writephy(tp
, 0x15, 0x0a75);
3436 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3437 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3438 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3441 /* Clear pending interrupts... */
3442 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3443 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3445 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3446 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3447 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3448 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3450 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3451 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3452 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3453 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3454 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3456 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3459 current_link_up
= 0;
3460 current_speed
= SPEED_INVALID
;
3461 current_duplex
= DUPLEX_INVALID
;
3463 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3464 err
= tg3_phy_auxctl_read(tp
,
3465 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3467 if (!err
&& !(val
& (1 << 10))) {
3468 tg3_phy_auxctl_write(tp
,
3469 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3476 for (i
= 0; i
< 100; i
++) {
3477 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3478 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3479 (bmsr
& BMSR_LSTATUS
))
3484 if (bmsr
& BMSR_LSTATUS
) {
3487 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3488 for (i
= 0; i
< 2000; i
++) {
3490 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3495 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3500 for (i
= 0; i
< 200; i
++) {
3501 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3502 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3504 if (bmcr
&& bmcr
!= 0x7fff)
3512 tp
->link_config
.active_speed
= current_speed
;
3513 tp
->link_config
.active_duplex
= current_duplex
;
3515 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3516 if ((bmcr
& BMCR_ANENABLE
) &&
3517 tg3_copper_is_advertising_all(tp
,
3518 tp
->link_config
.advertising
)) {
3519 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3521 current_link_up
= 1;
3524 if (!(bmcr
& BMCR_ANENABLE
) &&
3525 tp
->link_config
.speed
== current_speed
&&
3526 tp
->link_config
.duplex
== current_duplex
&&
3527 tp
->link_config
.flowctrl
==
3528 tp
->link_config
.active_flowctrl
) {
3529 current_link_up
= 1;
3533 if (current_link_up
== 1 &&
3534 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3535 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3539 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3540 tg3_phy_copper_begin(tp
);
3542 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3543 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
3544 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
3545 current_link_up
= 1;
3548 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3549 if (current_link_up
== 1) {
3550 if (tp
->link_config
.active_speed
== SPEED_100
||
3551 tp
->link_config
.active_speed
== SPEED_10
)
3552 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3554 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3555 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3556 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3558 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3560 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3561 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3562 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3564 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3565 if (current_link_up
== 1 &&
3566 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3567 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3569 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3572 /* ??? Without this setting Netgear GA302T PHY does not
3573 * ??? send/receive packets...
3575 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3576 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3577 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3578 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3582 tw32_f(MAC_MODE
, tp
->mac_mode
);
3585 tg3_phy_eee_adjust(tp
, current_link_up
);
3587 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
3588 /* Polled via timer. */
3589 tw32_f(MAC_EVENT
, 0);
3591 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3595 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3596 current_link_up
== 1 &&
3597 tp
->link_config
.active_speed
== SPEED_1000
&&
3598 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
3601 (MAC_STATUS_SYNC_CHANGED
|
3602 MAC_STATUS_CFG_CHANGED
));
3605 NIC_SRAM_FIRMWARE_MBOX
,
3606 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3609 /* Prevent send BD corruption. */
3610 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3611 u16 oldlnkctl
, newlnkctl
;
3613 pci_read_config_word(tp
->pdev
,
3614 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3616 if (tp
->link_config
.active_speed
== SPEED_100
||
3617 tp
->link_config
.active_speed
== SPEED_10
)
3618 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3620 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3621 if (newlnkctl
!= oldlnkctl
)
3622 pci_write_config_word(tp
->pdev
,
3623 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3627 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3628 if (current_link_up
)
3629 netif_carrier_on(tp
->dev
);
3631 netif_carrier_off(tp
->dev
);
3632 tg3_link_report(tp
);
3638 struct tg3_fiber_aneginfo
{
3640 #define ANEG_STATE_UNKNOWN 0
3641 #define ANEG_STATE_AN_ENABLE 1
3642 #define ANEG_STATE_RESTART_INIT 2
3643 #define ANEG_STATE_RESTART 3
3644 #define ANEG_STATE_DISABLE_LINK_OK 4
3645 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3646 #define ANEG_STATE_ABILITY_DETECT 6
3647 #define ANEG_STATE_ACK_DETECT_INIT 7
3648 #define ANEG_STATE_ACK_DETECT 8
3649 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3650 #define ANEG_STATE_COMPLETE_ACK 10
3651 #define ANEG_STATE_IDLE_DETECT_INIT 11
3652 #define ANEG_STATE_IDLE_DETECT 12
3653 #define ANEG_STATE_LINK_OK 13
3654 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3655 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3658 #define MR_AN_ENABLE 0x00000001
3659 #define MR_RESTART_AN 0x00000002
3660 #define MR_AN_COMPLETE 0x00000004
3661 #define MR_PAGE_RX 0x00000008
3662 #define MR_NP_LOADED 0x00000010
3663 #define MR_TOGGLE_TX 0x00000020
3664 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3665 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3666 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3667 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3668 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3669 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3670 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3671 #define MR_TOGGLE_RX 0x00002000
3672 #define MR_NP_RX 0x00004000
3674 #define MR_LINK_OK 0x80000000
3676 unsigned long link_time
, cur_time
;
3678 u32 ability_match_cfg
;
3679 int ability_match_count
;
3681 char ability_match
, idle_match
, ack_match
;
3683 u32 txconfig
, rxconfig
;
3684 #define ANEG_CFG_NP 0x00000080
3685 #define ANEG_CFG_ACK 0x00000040
3686 #define ANEG_CFG_RF2 0x00000020
3687 #define ANEG_CFG_RF1 0x00000010
3688 #define ANEG_CFG_PS2 0x00000001
3689 #define ANEG_CFG_PS1 0x00008000
3690 #define ANEG_CFG_HD 0x00004000
3691 #define ANEG_CFG_FD 0x00002000
3692 #define ANEG_CFG_INVAL 0x00001f06
3697 #define ANEG_TIMER_ENAB 2
3698 #define ANEG_FAILED -1
3700 #define ANEG_STATE_SETTLE_TIME 10000
3702 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3703 struct tg3_fiber_aneginfo
*ap
)
3706 unsigned long delta
;
3710 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3714 ap
->ability_match_cfg
= 0;
3715 ap
->ability_match_count
= 0;
3716 ap
->ability_match
= 0;
3722 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3723 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3725 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3726 ap
->ability_match_cfg
= rx_cfg_reg
;
3727 ap
->ability_match
= 0;
3728 ap
->ability_match_count
= 0;
3730 if (++ap
->ability_match_count
> 1) {
3731 ap
->ability_match
= 1;
3732 ap
->ability_match_cfg
= rx_cfg_reg
;
3735 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3743 ap
->ability_match_cfg
= 0;
3744 ap
->ability_match_count
= 0;
3745 ap
->ability_match
= 0;
3751 ap
->rxconfig
= rx_cfg_reg
;
3754 switch (ap
->state
) {
3755 case ANEG_STATE_UNKNOWN
:
3756 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3757 ap
->state
= ANEG_STATE_AN_ENABLE
;
3760 case ANEG_STATE_AN_ENABLE
:
3761 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3762 if (ap
->flags
& MR_AN_ENABLE
) {
3765 ap
->ability_match_cfg
= 0;
3766 ap
->ability_match_count
= 0;
3767 ap
->ability_match
= 0;
3771 ap
->state
= ANEG_STATE_RESTART_INIT
;
3773 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3777 case ANEG_STATE_RESTART_INIT
:
3778 ap
->link_time
= ap
->cur_time
;
3779 ap
->flags
&= ~(MR_NP_LOADED
);
3781 tw32(MAC_TX_AUTO_NEG
, 0);
3782 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3783 tw32_f(MAC_MODE
, tp
->mac_mode
);
3786 ret
= ANEG_TIMER_ENAB
;
3787 ap
->state
= ANEG_STATE_RESTART
;
3790 case ANEG_STATE_RESTART
:
3791 delta
= ap
->cur_time
- ap
->link_time
;
3792 if (delta
> ANEG_STATE_SETTLE_TIME
)
3793 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3795 ret
= ANEG_TIMER_ENAB
;
3798 case ANEG_STATE_DISABLE_LINK_OK
:
3802 case ANEG_STATE_ABILITY_DETECT_INIT
:
3803 ap
->flags
&= ~(MR_TOGGLE_TX
);
3804 ap
->txconfig
= ANEG_CFG_FD
;
3805 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3806 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3807 ap
->txconfig
|= ANEG_CFG_PS1
;
3808 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3809 ap
->txconfig
|= ANEG_CFG_PS2
;
3810 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3811 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3812 tw32_f(MAC_MODE
, tp
->mac_mode
);
3815 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3818 case ANEG_STATE_ABILITY_DETECT
:
3819 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3820 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3823 case ANEG_STATE_ACK_DETECT_INIT
:
3824 ap
->txconfig
|= ANEG_CFG_ACK
;
3825 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3826 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3827 tw32_f(MAC_MODE
, tp
->mac_mode
);
3830 ap
->state
= ANEG_STATE_ACK_DETECT
;
3833 case ANEG_STATE_ACK_DETECT
:
3834 if (ap
->ack_match
!= 0) {
3835 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3836 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3837 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3839 ap
->state
= ANEG_STATE_AN_ENABLE
;
3841 } else if (ap
->ability_match
!= 0 &&
3842 ap
->rxconfig
== 0) {
3843 ap
->state
= ANEG_STATE_AN_ENABLE
;
3847 case ANEG_STATE_COMPLETE_ACK_INIT
:
3848 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3852 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3853 MR_LP_ADV_HALF_DUPLEX
|
3854 MR_LP_ADV_SYM_PAUSE
|
3855 MR_LP_ADV_ASYM_PAUSE
|
3856 MR_LP_ADV_REMOTE_FAULT1
|
3857 MR_LP_ADV_REMOTE_FAULT2
|
3858 MR_LP_ADV_NEXT_PAGE
|
3861 if (ap
->rxconfig
& ANEG_CFG_FD
)
3862 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3863 if (ap
->rxconfig
& ANEG_CFG_HD
)
3864 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3865 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3866 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3867 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3868 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3869 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3870 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3871 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3872 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3873 if (ap
->rxconfig
& ANEG_CFG_NP
)
3874 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3876 ap
->link_time
= ap
->cur_time
;
3878 ap
->flags
^= (MR_TOGGLE_TX
);
3879 if (ap
->rxconfig
& 0x0008)
3880 ap
->flags
|= MR_TOGGLE_RX
;
3881 if (ap
->rxconfig
& ANEG_CFG_NP
)
3882 ap
->flags
|= MR_NP_RX
;
3883 ap
->flags
|= MR_PAGE_RX
;
3885 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3886 ret
= ANEG_TIMER_ENAB
;
3889 case ANEG_STATE_COMPLETE_ACK
:
3890 if (ap
->ability_match
!= 0 &&
3891 ap
->rxconfig
== 0) {
3892 ap
->state
= ANEG_STATE_AN_ENABLE
;
3895 delta
= ap
->cur_time
- ap
->link_time
;
3896 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3897 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3898 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3900 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3901 !(ap
->flags
& MR_NP_RX
)) {
3902 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3910 case ANEG_STATE_IDLE_DETECT_INIT
:
3911 ap
->link_time
= ap
->cur_time
;
3912 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3913 tw32_f(MAC_MODE
, tp
->mac_mode
);
3916 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3917 ret
= ANEG_TIMER_ENAB
;
3920 case ANEG_STATE_IDLE_DETECT
:
3921 if (ap
->ability_match
!= 0 &&
3922 ap
->rxconfig
== 0) {
3923 ap
->state
= ANEG_STATE_AN_ENABLE
;
3926 delta
= ap
->cur_time
- ap
->link_time
;
3927 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3928 /* XXX another gem from the Broadcom driver :( */
3929 ap
->state
= ANEG_STATE_LINK_OK
;
3933 case ANEG_STATE_LINK_OK
:
3934 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3938 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3939 /* ??? unimplemented */
3942 case ANEG_STATE_NEXT_PAGE_WAIT
:
3943 /* ??? unimplemented */
3954 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3957 struct tg3_fiber_aneginfo aninfo
;
3958 int status
= ANEG_FAILED
;
3962 tw32_f(MAC_TX_AUTO_NEG
, 0);
3964 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3965 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3968 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3971 memset(&aninfo
, 0, sizeof(aninfo
));
3972 aninfo
.flags
|= MR_AN_ENABLE
;
3973 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3974 aninfo
.cur_time
= 0;
3976 while (++tick
< 195000) {
3977 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3978 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3984 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3985 tw32_f(MAC_MODE
, tp
->mac_mode
);
3988 *txflags
= aninfo
.txconfig
;
3989 *rxflags
= aninfo
.flags
;
3991 if (status
== ANEG_DONE
&&
3992 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3993 MR_LP_ADV_FULL_DUPLEX
)))
3999 static void tg3_init_bcm8002(struct tg3
*tp
)
4001 u32 mac_status
= tr32(MAC_STATUS
);
4004 /* Reset when initting first time or we have a link. */
4005 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4006 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4009 /* Set PLL lock range. */
4010 tg3_writephy(tp
, 0x16, 0x8007);
4013 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4015 /* Wait for reset to complete. */
4016 /* XXX schedule_timeout() ... */
4017 for (i
= 0; i
< 500; i
++)
4020 /* Config mode; select PMA/Ch 1 regs. */
4021 tg3_writephy(tp
, 0x10, 0x8411);
4023 /* Enable auto-lock and comdet, select txclk for tx. */
4024 tg3_writephy(tp
, 0x11, 0x0a10);
4026 tg3_writephy(tp
, 0x18, 0x00a0);
4027 tg3_writephy(tp
, 0x16, 0x41ff);
4029 /* Assert and deassert POR. */
4030 tg3_writephy(tp
, 0x13, 0x0400);
4032 tg3_writephy(tp
, 0x13, 0x0000);
4034 tg3_writephy(tp
, 0x11, 0x0a50);
4036 tg3_writephy(tp
, 0x11, 0x0a10);
4038 /* Wait for signal to stabilize */
4039 /* XXX schedule_timeout() ... */
4040 for (i
= 0; i
< 15000; i
++)
4043 /* Deselect the channel register so we can read the PHYID
4046 tg3_writephy(tp
, 0x10, 0x8011);
4049 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4052 u32 sg_dig_ctrl
, sg_dig_status
;
4053 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4054 int workaround
, port_a
;
4055 int current_link_up
;
4058 expected_sg_dig_ctrl
= 0;
4061 current_link_up
= 0;
4063 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4064 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4066 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4069 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4070 /* preserve bits 20-23 for voltage regulator */
4071 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4074 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4076 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4077 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4079 u32 val
= serdes_cfg
;
4085 tw32_f(MAC_SERDES_CFG
, val
);
4088 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4090 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4091 tg3_setup_flow_control(tp
, 0, 0);
4092 current_link_up
= 1;
4097 /* Want auto-negotiation. */
4098 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4100 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4101 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4102 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4103 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4104 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4106 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4107 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4108 tp
->serdes_counter
&&
4109 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4110 MAC_STATUS_RCVD_CFG
)) ==
4111 MAC_STATUS_PCS_SYNCED
)) {
4112 tp
->serdes_counter
--;
4113 current_link_up
= 1;
4118 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4119 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4121 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4123 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4124 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4125 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4126 MAC_STATUS_SIGNAL_DET
)) {
4127 sg_dig_status
= tr32(SG_DIG_STATUS
);
4128 mac_status
= tr32(MAC_STATUS
);
4130 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4131 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4132 u32 local_adv
= 0, remote_adv
= 0;
4134 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4135 local_adv
|= ADVERTISE_1000XPAUSE
;
4136 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4137 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4139 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4140 remote_adv
|= LPA_1000XPAUSE
;
4141 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4142 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4144 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4145 current_link_up
= 1;
4146 tp
->serdes_counter
= 0;
4147 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4148 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4149 if (tp
->serdes_counter
)
4150 tp
->serdes_counter
--;
4153 u32 val
= serdes_cfg
;
4160 tw32_f(MAC_SERDES_CFG
, val
);
4163 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4166 /* Link parallel detection - link is up */
4167 /* only if we have PCS_SYNC and not */
4168 /* receiving config code words */
4169 mac_status
= tr32(MAC_STATUS
);
4170 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4171 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4172 tg3_setup_flow_control(tp
, 0, 0);
4173 current_link_up
= 1;
4175 TG3_PHYFLG_PARALLEL_DETECT
;
4176 tp
->serdes_counter
=
4177 SERDES_PARALLEL_DET_TIMEOUT
;
4179 goto restart_autoneg
;
4183 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4184 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4188 return current_link_up
;
4191 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4193 int current_link_up
= 0;
4195 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4198 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4199 u32 txflags
, rxflags
;
4202 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4203 u32 local_adv
= 0, remote_adv
= 0;
4205 if (txflags
& ANEG_CFG_PS1
)
4206 local_adv
|= ADVERTISE_1000XPAUSE
;
4207 if (txflags
& ANEG_CFG_PS2
)
4208 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4210 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4211 remote_adv
|= LPA_1000XPAUSE
;
4212 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4213 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4215 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4217 current_link_up
= 1;
4219 for (i
= 0; i
< 30; i
++) {
4222 (MAC_STATUS_SYNC_CHANGED
|
4223 MAC_STATUS_CFG_CHANGED
));
4225 if ((tr32(MAC_STATUS
) &
4226 (MAC_STATUS_SYNC_CHANGED
|
4227 MAC_STATUS_CFG_CHANGED
)) == 0)
4231 mac_status
= tr32(MAC_STATUS
);
4232 if (current_link_up
== 0 &&
4233 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4234 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4235 current_link_up
= 1;
4237 tg3_setup_flow_control(tp
, 0, 0);
4239 /* Forcing 1000FD link up. */
4240 current_link_up
= 1;
4242 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4245 tw32_f(MAC_MODE
, tp
->mac_mode
);
4250 return current_link_up
;
4253 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4256 u16 orig_active_speed
;
4257 u8 orig_active_duplex
;
4259 int current_link_up
;
4262 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4263 orig_active_speed
= tp
->link_config
.active_speed
;
4264 orig_active_duplex
= tp
->link_config
.active_duplex
;
4266 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4267 netif_carrier_ok(tp
->dev
) &&
4268 tg3_flag(tp
, INIT_COMPLETE
)) {
4269 mac_status
= tr32(MAC_STATUS
);
4270 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4271 MAC_STATUS_SIGNAL_DET
|
4272 MAC_STATUS_CFG_CHANGED
|
4273 MAC_STATUS_RCVD_CFG
);
4274 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4275 MAC_STATUS_SIGNAL_DET
)) {
4276 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4277 MAC_STATUS_CFG_CHANGED
));
4282 tw32_f(MAC_TX_AUTO_NEG
, 0);
4284 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4285 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4286 tw32_f(MAC_MODE
, tp
->mac_mode
);
4289 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4290 tg3_init_bcm8002(tp
);
4292 /* Enable link change event even when serdes polling. */
4293 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4296 current_link_up
= 0;
4297 mac_status
= tr32(MAC_STATUS
);
4299 if (tg3_flag(tp
, HW_AUTONEG
))
4300 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4302 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4304 tp
->napi
[0].hw_status
->status
=
4305 (SD_STATUS_UPDATED
|
4306 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4308 for (i
= 0; i
< 100; i
++) {
4309 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4310 MAC_STATUS_CFG_CHANGED
));
4312 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4313 MAC_STATUS_CFG_CHANGED
|
4314 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4318 mac_status
= tr32(MAC_STATUS
);
4319 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4320 current_link_up
= 0;
4321 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4322 tp
->serdes_counter
== 0) {
4323 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4324 MAC_MODE_SEND_CONFIGS
));
4326 tw32_f(MAC_MODE
, tp
->mac_mode
);
4330 if (current_link_up
== 1) {
4331 tp
->link_config
.active_speed
= SPEED_1000
;
4332 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4333 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4334 LED_CTRL_LNKLED_OVERRIDE
|
4335 LED_CTRL_1000MBPS_ON
));
4337 tp
->link_config
.active_speed
= SPEED_INVALID
;
4338 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4339 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4340 LED_CTRL_LNKLED_OVERRIDE
|
4341 LED_CTRL_TRAFFIC_OVERRIDE
));
4344 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4345 if (current_link_up
)
4346 netif_carrier_on(tp
->dev
);
4348 netif_carrier_off(tp
->dev
);
4349 tg3_link_report(tp
);
4351 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4352 if (orig_pause_cfg
!= now_pause_cfg
||
4353 orig_active_speed
!= tp
->link_config
.active_speed
||
4354 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4355 tg3_link_report(tp
);
4361 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4363 int current_link_up
, err
= 0;
4367 u32 local_adv
, remote_adv
;
4369 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4370 tw32_f(MAC_MODE
, tp
->mac_mode
);
4376 (MAC_STATUS_SYNC_CHANGED
|
4377 MAC_STATUS_CFG_CHANGED
|
4378 MAC_STATUS_MI_COMPLETION
|
4379 MAC_STATUS_LNKSTATE_CHANGED
));
4385 current_link_up
= 0;
4386 current_speed
= SPEED_INVALID
;
4387 current_duplex
= DUPLEX_INVALID
;
4389 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4390 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4391 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4392 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4393 bmsr
|= BMSR_LSTATUS
;
4395 bmsr
&= ~BMSR_LSTATUS
;
4398 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4400 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4401 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4402 /* do nothing, just check for link up at the end */
4403 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4406 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4407 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4408 ADVERTISE_1000XPAUSE
|
4409 ADVERTISE_1000XPSE_ASYM
|
4412 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4414 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4415 new_adv
|= ADVERTISE_1000XHALF
;
4416 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4417 new_adv
|= ADVERTISE_1000XFULL
;
4419 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4420 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4421 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4422 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4424 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4425 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4426 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4433 bmcr
&= ~BMCR_SPEED1000
;
4434 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4436 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4437 new_bmcr
|= BMCR_FULLDPLX
;
4439 if (new_bmcr
!= bmcr
) {
4440 /* BMCR_SPEED1000 is a reserved bit that needs
4441 * to be set on write.
4443 new_bmcr
|= BMCR_SPEED1000
;
4445 /* Force a linkdown */
4446 if (netif_carrier_ok(tp
->dev
)) {
4449 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4450 adv
&= ~(ADVERTISE_1000XFULL
|
4451 ADVERTISE_1000XHALF
|
4453 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4454 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4458 netif_carrier_off(tp
->dev
);
4460 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4462 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4463 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4464 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4466 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4467 bmsr
|= BMSR_LSTATUS
;
4469 bmsr
&= ~BMSR_LSTATUS
;
4471 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4475 if (bmsr
& BMSR_LSTATUS
) {
4476 current_speed
= SPEED_1000
;
4477 current_link_up
= 1;
4478 if (bmcr
& BMCR_FULLDPLX
)
4479 current_duplex
= DUPLEX_FULL
;
4481 current_duplex
= DUPLEX_HALF
;
4486 if (bmcr
& BMCR_ANENABLE
) {
4489 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4490 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4491 common
= local_adv
& remote_adv
;
4492 if (common
& (ADVERTISE_1000XHALF
|
4493 ADVERTISE_1000XFULL
)) {
4494 if (common
& ADVERTISE_1000XFULL
)
4495 current_duplex
= DUPLEX_FULL
;
4497 current_duplex
= DUPLEX_HALF
;
4498 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4499 /* Link is up via parallel detect */
4501 current_link_up
= 0;
4506 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4507 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4509 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4510 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4511 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4513 tw32_f(MAC_MODE
, tp
->mac_mode
);
4516 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4518 tp
->link_config
.active_speed
= current_speed
;
4519 tp
->link_config
.active_duplex
= current_duplex
;
4521 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4522 if (current_link_up
)
4523 netif_carrier_on(tp
->dev
);
4525 netif_carrier_off(tp
->dev
);
4526 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4528 tg3_link_report(tp
);
4533 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4535 if (tp
->serdes_counter
) {
4536 /* Give autoneg time to complete. */
4537 tp
->serdes_counter
--;
4541 if (!netif_carrier_ok(tp
->dev
) &&
4542 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4545 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4546 if (bmcr
& BMCR_ANENABLE
) {
4549 /* Select shadow register 0x1f */
4550 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4551 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4553 /* Select expansion interrupt status register */
4554 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4555 MII_TG3_DSP_EXP1_INT_STAT
);
4556 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4557 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4559 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4560 /* We have signal detect and not receiving
4561 * config code words, link is up by parallel
4565 bmcr
&= ~BMCR_ANENABLE
;
4566 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4567 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4568 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4571 } else if (netif_carrier_ok(tp
->dev
) &&
4572 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4573 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4576 /* Select expansion interrupt status register */
4577 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4578 MII_TG3_DSP_EXP1_INT_STAT
);
4579 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4583 /* Config code words received, turn on autoneg. */
4584 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4585 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4587 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4593 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4598 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4599 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4600 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4601 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4603 err
= tg3_setup_copper_phy(tp
, force_reset
);
4605 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4608 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4609 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4611 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4616 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4617 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4618 tw32(GRC_MISC_CFG
, val
);
4621 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4622 (6 << TX_LENGTHS_IPG_SHIFT
);
4623 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
4624 val
|= tr32(MAC_TX_LENGTHS
) &
4625 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
4626 TX_LENGTHS_CNT_DWN_VAL_MSK
);
4628 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4629 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4630 tw32(MAC_TX_LENGTHS
, val
|
4631 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
4633 tw32(MAC_TX_LENGTHS
, val
|
4634 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
4636 if (!tg3_flag(tp
, 5705_PLUS
)) {
4637 if (netif_carrier_ok(tp
->dev
)) {
4638 tw32(HOSTCC_STAT_COAL_TICKS
,
4639 tp
->coal
.stats_block_coalesce_usecs
);
4641 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4645 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
4646 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4647 if (!netif_carrier_ok(tp
->dev
))
4648 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4651 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4652 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4658 static inline int tg3_irq_sync(struct tg3
*tp
)
4660 return tp
->irq_sync
;
4663 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
4667 dst
= (u32
*)((u8
*)dst
+ off
);
4668 for (i
= 0; i
< len
; i
+= sizeof(u32
))
4669 *dst
++ = tr32(off
+ i
);
4672 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
4674 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
4675 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
4676 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
4677 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
4678 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
4679 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
4680 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
4681 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
4682 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
4683 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
4684 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
4685 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
4686 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
4687 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
4688 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
4689 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
4690 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
4691 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
4692 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
4694 if (tg3_flag(tp
, SUPPORT_MSIX
))
4695 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
4697 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
4698 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
4699 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
4700 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
4701 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
4702 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
4703 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
4704 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
4706 if (!tg3_flag(tp
, 5705_PLUS
)) {
4707 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
4708 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
4709 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
4712 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
4713 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
4714 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
4715 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
4716 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
4718 if (tg3_flag(tp
, NVRAM
))
4719 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
4722 static void tg3_dump_state(struct tg3
*tp
)
4727 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
4729 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
4733 if (tg3_flag(tp
, PCI_EXPRESS
)) {
4734 /* Read up to but not including private PCI registers */
4735 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
4736 regs
[i
/ sizeof(u32
)] = tr32(i
);
4738 tg3_dump_legacy_regs(tp
, regs
);
4740 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
4741 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
4742 !regs
[i
+ 2] && !regs
[i
+ 3])
4745 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4747 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
4752 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
4753 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
4755 /* SW status block */
4757 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4759 tnapi
->hw_status
->status
,
4760 tnapi
->hw_status
->status_tag
,
4761 tnapi
->hw_status
->rx_jumbo_consumer
,
4762 tnapi
->hw_status
->rx_consumer
,
4763 tnapi
->hw_status
->rx_mini_consumer
,
4764 tnapi
->hw_status
->idx
[0].rx_producer
,
4765 tnapi
->hw_status
->idx
[0].tx_consumer
);
4768 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4770 tnapi
->last_tag
, tnapi
->last_irq_tag
,
4771 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
4773 tnapi
->prodring
.rx_std_prod_idx
,
4774 tnapi
->prodring
.rx_std_cons_idx
,
4775 tnapi
->prodring
.rx_jmb_prod_idx
,
4776 tnapi
->prodring
.rx_jmb_cons_idx
);
4780 /* This is called whenever we suspect that the system chipset is re-
4781 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4782 * is bogus tx completions. We try to recover by setting the
4783 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4786 static void tg3_tx_recover(struct tg3
*tp
)
4788 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
4789 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4791 netdev_warn(tp
->dev
,
4792 "The system may be re-ordering memory-mapped I/O "
4793 "cycles to the network device, attempting to recover. "
4794 "Please report the problem to the driver maintainer "
4795 "and include system chipset information.\n");
4797 spin_lock(&tp
->lock
);
4798 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
4799 spin_unlock(&tp
->lock
);
4802 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4804 /* Tell compiler to fetch tx indices from memory. */
4806 return tnapi
->tx_pending
-
4807 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4810 /* Tigon3 never reports partial packet sends. So we do not
4811 * need special logic to handle SKBs that have not had all
4812 * of their frags sent yet, like SunGEM does.
4814 static void tg3_tx(struct tg3_napi
*tnapi
)
4816 struct tg3
*tp
= tnapi
->tp
;
4817 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4818 u32 sw_idx
= tnapi
->tx_cons
;
4819 struct netdev_queue
*txq
;
4820 int index
= tnapi
- tp
->napi
;
4822 if (tg3_flag(tp
, ENABLE_TSS
))
4825 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4827 while (sw_idx
!= hw_idx
) {
4828 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4829 struct sk_buff
*skb
= ri
->skb
;
4832 if (unlikely(skb
== NULL
)) {
4837 pci_unmap_single(tp
->pdev
,
4838 dma_unmap_addr(ri
, mapping
),
4844 while (ri
->fragmented
) {
4845 ri
->fragmented
= false;
4846 sw_idx
= NEXT_TX(sw_idx
);
4847 ri
= &tnapi
->tx_buffers
[sw_idx
];
4850 sw_idx
= NEXT_TX(sw_idx
);
4852 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4853 ri
= &tnapi
->tx_buffers
[sw_idx
];
4854 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4857 pci_unmap_page(tp
->pdev
,
4858 dma_unmap_addr(ri
, mapping
),
4859 skb_shinfo(skb
)->frags
[i
].size
,
4862 while (ri
->fragmented
) {
4863 ri
->fragmented
= false;
4864 sw_idx
= NEXT_TX(sw_idx
);
4865 ri
= &tnapi
->tx_buffers
[sw_idx
];
4868 sw_idx
= NEXT_TX(sw_idx
);
4873 if (unlikely(tx_bug
)) {
4879 tnapi
->tx_cons
= sw_idx
;
4881 /* Need to make the tx_cons update visible to tg3_start_xmit()
4882 * before checking for netif_queue_stopped(). Without the
4883 * memory barrier, there is a small possibility that tg3_start_xmit()
4884 * will miss it and cause the queue to be stopped forever.
4888 if (unlikely(netif_tx_queue_stopped(txq
) &&
4889 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4890 __netif_tx_lock(txq
, smp_processor_id());
4891 if (netif_tx_queue_stopped(txq
) &&
4892 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4893 netif_tx_wake_queue(txq
);
4894 __netif_tx_unlock(txq
);
4898 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4903 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4904 map_sz
, PCI_DMA_FROMDEVICE
);
4905 dev_kfree_skb_any(ri
->skb
);
4909 /* Returns size of skb allocated or < 0 on error.
4911 * We only need to fill in the address because the other members
4912 * of the RX descriptor are invariant, see tg3_init_rings.
4914 * Note the purposeful assymetry of cpu vs. chip accesses. For
4915 * posting buffers we only dirty the first cache line of the RX
4916 * descriptor (containing the address). Whereas for the RX status
4917 * buffers the cpu only reads the last cacheline of the RX descriptor
4918 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4920 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4921 u32 opaque_key
, u32 dest_idx_unmasked
)
4923 struct tg3_rx_buffer_desc
*desc
;
4924 struct ring_info
*map
;
4925 struct sk_buff
*skb
;
4927 int skb_size
, dest_idx
;
4929 switch (opaque_key
) {
4930 case RXD_OPAQUE_RING_STD
:
4931 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4932 desc
= &tpr
->rx_std
[dest_idx
];
4933 map
= &tpr
->rx_std_buffers
[dest_idx
];
4934 skb_size
= tp
->rx_pkt_map_sz
;
4937 case RXD_OPAQUE_RING_JUMBO
:
4938 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4939 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4940 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4941 skb_size
= TG3_RX_JMB_MAP_SZ
;
4948 /* Do not overwrite any of the map or rp information
4949 * until we are sure we can commit to a new buffer.
4951 * Callers depend upon this behavior and assume that
4952 * we leave everything unchanged if we fail.
4954 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4958 skb_reserve(skb
, tp
->rx_offset
);
4960 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4961 PCI_DMA_FROMDEVICE
);
4962 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4968 dma_unmap_addr_set(map
, mapping
, mapping
);
4970 desc
->addr_hi
= ((u64
)mapping
>> 32);
4971 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4976 /* We only need to move over in the address because the other
4977 * members of the RX descriptor are invariant. See notes above
4978 * tg3_alloc_rx_skb for full details.
4980 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4981 struct tg3_rx_prodring_set
*dpr
,
4982 u32 opaque_key
, int src_idx
,
4983 u32 dest_idx_unmasked
)
4985 struct tg3
*tp
= tnapi
->tp
;
4986 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4987 struct ring_info
*src_map
, *dest_map
;
4988 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4991 switch (opaque_key
) {
4992 case RXD_OPAQUE_RING_STD
:
4993 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4994 dest_desc
= &dpr
->rx_std
[dest_idx
];
4995 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4996 src_desc
= &spr
->rx_std
[src_idx
];
4997 src_map
= &spr
->rx_std_buffers
[src_idx
];
5000 case RXD_OPAQUE_RING_JUMBO
:
5001 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5002 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5003 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5004 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5005 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5012 dest_map
->skb
= src_map
->skb
;
5013 dma_unmap_addr_set(dest_map
, mapping
,
5014 dma_unmap_addr(src_map
, mapping
));
5015 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5016 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5018 /* Ensure that the update to the skb happens after the physical
5019 * addresses have been transferred to the new BD location.
5023 src_map
->skb
= NULL
;
5026 /* The RX ring scheme is composed of multiple rings which post fresh
5027 * buffers to the chip, and one special ring the chip uses to report
5028 * status back to the host.
5030 * The special ring reports the status of received packets to the
5031 * host. The chip does not write into the original descriptor the
5032 * RX buffer was obtained from. The chip simply takes the original
5033 * descriptor as provided by the host, updates the status and length
5034 * field, then writes this into the next status ring entry.
5036 * Each ring the host uses to post buffers to the chip is described
5037 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5038 * it is first placed into the on-chip ram. When the packet's length
5039 * is known, it walks down the TG3_BDINFO entries to select the ring.
5040 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5041 * which is within the range of the new packet's length is chosen.
5043 * The "separate ring for rx status" scheme may sound queer, but it makes
5044 * sense from a cache coherency perspective. If only the host writes
5045 * to the buffer post rings, and only the chip writes to the rx status
5046 * rings, then cache lines never move beyond shared-modified state.
5047 * If both the host and chip were to write into the same ring, cache line
5048 * eviction could occur since both entities want it in an exclusive state.
5050 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5052 struct tg3
*tp
= tnapi
->tp
;
5053 u32 work_mask
, rx_std_posted
= 0;
5054 u32 std_prod_idx
, jmb_prod_idx
;
5055 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5058 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5060 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5062 * We need to order the read of hw_idx and the read of
5063 * the opaque cookie.
5068 std_prod_idx
= tpr
->rx_std_prod_idx
;
5069 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5070 while (sw_idx
!= hw_idx
&& budget
> 0) {
5071 struct ring_info
*ri
;
5072 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5074 struct sk_buff
*skb
;
5075 dma_addr_t dma_addr
;
5076 u32 opaque_key
, desc_idx
, *post_ptr
;
5078 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5079 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5080 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5081 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5082 dma_addr
= dma_unmap_addr(ri
, mapping
);
5084 post_ptr
= &std_prod_idx
;
5086 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5087 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5088 dma_addr
= dma_unmap_addr(ri
, mapping
);
5090 post_ptr
= &jmb_prod_idx
;
5092 goto next_pkt_nopost
;
5094 work_mask
|= opaque_key
;
5096 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5097 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5099 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5100 desc_idx
, *post_ptr
);
5102 /* Other statistics kept track of by card. */
5107 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5110 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5113 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
5118 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5119 PCI_DMA_FROMDEVICE
);
5121 /* Ensure that the update to the skb happens
5122 * after the usage of the old DMA mapping.
5130 struct sk_buff
*copy_skb
;
5132 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5133 desc_idx
, *post_ptr
);
5135 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
5137 if (copy_skb
== NULL
)
5138 goto drop_it_no_recycle
;
5140 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
5141 skb_put(copy_skb
, len
);
5142 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5143 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
5144 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5146 /* We'll reuse the original ring buffer. */
5150 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5151 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5152 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5153 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5154 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5156 skb_checksum_none_assert(skb
);
5158 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5160 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5161 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5163 goto drop_it_no_recycle
;
5166 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5167 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5168 __vlan_hwaccel_put_tag(skb
,
5169 desc
->err_vlan
& RXD_VLAN_MASK
);
5171 napi_gro_receive(&tnapi
->napi
, skb
);
5179 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5180 tpr
->rx_std_prod_idx
= std_prod_idx
&
5181 tp
->rx_std_ring_mask
;
5182 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5183 tpr
->rx_std_prod_idx
);
5184 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5189 sw_idx
&= tp
->rx_ret_ring_mask
;
5191 /* Refresh hw_idx to see if there is new work */
5192 if (sw_idx
== hw_idx
) {
5193 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5198 /* ACK the status ring. */
5199 tnapi
->rx_rcb_ptr
= sw_idx
;
5200 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5202 /* Refill RX ring(s). */
5203 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5204 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5205 tpr
->rx_std_prod_idx
= std_prod_idx
&
5206 tp
->rx_std_ring_mask
;
5207 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5208 tpr
->rx_std_prod_idx
);
5210 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5211 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5212 tp
->rx_jmb_ring_mask
;
5213 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5214 tpr
->rx_jmb_prod_idx
);
5217 } else if (work_mask
) {
5218 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5219 * updated before the producer indices can be updated.
5223 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5224 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5226 if (tnapi
!= &tp
->napi
[1])
5227 napi_schedule(&tp
->napi
[1].napi
);
5233 static void tg3_poll_link(struct tg3
*tp
)
5235 /* handle link change and other phy events */
5236 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5237 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5239 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5240 sblk
->status
= SD_STATUS_UPDATED
|
5241 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5242 spin_lock(&tp
->lock
);
5243 if (tg3_flag(tp
, USE_PHYLIB
)) {
5245 (MAC_STATUS_SYNC_CHANGED
|
5246 MAC_STATUS_CFG_CHANGED
|
5247 MAC_STATUS_MI_COMPLETION
|
5248 MAC_STATUS_LNKSTATE_CHANGED
));
5251 tg3_setup_phy(tp
, 0);
5252 spin_unlock(&tp
->lock
);
5257 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5258 struct tg3_rx_prodring_set
*dpr
,
5259 struct tg3_rx_prodring_set
*spr
)
5261 u32 si
, di
, cpycnt
, src_prod_idx
;
5265 src_prod_idx
= spr
->rx_std_prod_idx
;
5267 /* Make sure updates to the rx_std_buffers[] entries and the
5268 * standard producer index are seen in the correct order.
5272 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5275 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5276 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5278 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5279 spr
->rx_std_cons_idx
;
5281 cpycnt
= min(cpycnt
,
5282 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5284 si
= spr
->rx_std_cons_idx
;
5285 di
= dpr
->rx_std_prod_idx
;
5287 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5288 if (dpr
->rx_std_buffers
[i
].skb
) {
5298 /* Ensure that updates to the rx_std_buffers ring and the
5299 * shadowed hardware producer ring from tg3_recycle_skb() are
5300 * ordered correctly WRT the skb check above.
5304 memcpy(&dpr
->rx_std_buffers
[di
],
5305 &spr
->rx_std_buffers
[si
],
5306 cpycnt
* sizeof(struct ring_info
));
5308 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5309 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5310 sbd
= &spr
->rx_std
[si
];
5311 dbd
= &dpr
->rx_std
[di
];
5312 dbd
->addr_hi
= sbd
->addr_hi
;
5313 dbd
->addr_lo
= sbd
->addr_lo
;
5316 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5317 tp
->rx_std_ring_mask
;
5318 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5319 tp
->rx_std_ring_mask
;
5323 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5325 /* Make sure updates to the rx_jmb_buffers[] entries and
5326 * the jumbo producer index are seen in the correct order.
5330 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5333 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5334 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5336 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5337 spr
->rx_jmb_cons_idx
;
5339 cpycnt
= min(cpycnt
,
5340 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5342 si
= spr
->rx_jmb_cons_idx
;
5343 di
= dpr
->rx_jmb_prod_idx
;
5345 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5346 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5356 /* Ensure that updates to the rx_jmb_buffers ring and the
5357 * shadowed hardware producer ring from tg3_recycle_skb() are
5358 * ordered correctly WRT the skb check above.
5362 memcpy(&dpr
->rx_jmb_buffers
[di
],
5363 &spr
->rx_jmb_buffers
[si
],
5364 cpycnt
* sizeof(struct ring_info
));
5366 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5367 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5368 sbd
= &spr
->rx_jmb
[si
].std
;
5369 dbd
= &dpr
->rx_jmb
[di
].std
;
5370 dbd
->addr_hi
= sbd
->addr_hi
;
5371 dbd
->addr_lo
= sbd
->addr_lo
;
5374 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5375 tp
->rx_jmb_ring_mask
;
5376 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5377 tp
->rx_jmb_ring_mask
;
5383 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5385 struct tg3
*tp
= tnapi
->tp
;
5387 /* run TX completion thread */
5388 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5390 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5394 /* run RX thread, within the bounds set by NAPI.
5395 * All RX "locking" is done by ensuring outside
5396 * code synchronizes with tg3->napi.poll()
5398 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5399 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5401 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5402 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5404 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5405 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5407 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5408 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5409 &tp
->napi
[i
].prodring
);
5413 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5414 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5415 dpr
->rx_std_prod_idx
);
5417 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5418 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5419 dpr
->rx_jmb_prod_idx
);
5424 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5430 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5432 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5433 struct tg3
*tp
= tnapi
->tp
;
5435 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5438 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5440 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5443 if (unlikely(work_done
>= budget
))
5446 /* tp->last_tag is used in tg3_int_reenable() below
5447 * to tell the hw how much work has been processed,
5448 * so we must read it before checking for more work.
5450 tnapi
->last_tag
= sblk
->status_tag
;
5451 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5454 /* check for RX/TX work to do */
5455 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5456 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5457 napi_complete(napi
);
5458 /* Reenable interrupts. */
5459 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5468 /* work_done is guaranteed to be less than budget. */
5469 napi_complete(napi
);
5470 schedule_work(&tp
->reset_task
);
5474 static void tg3_process_error(struct tg3
*tp
)
5477 bool real_error
= false;
5479 if (tg3_flag(tp
, ERROR_PROCESSED
))
5482 /* Check Flow Attention register */
5483 val
= tr32(HOSTCC_FLOW_ATTN
);
5484 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5485 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5489 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5490 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5494 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5495 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
5504 tg3_flag_set(tp
, ERROR_PROCESSED
);
5505 schedule_work(&tp
->reset_task
);
5508 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5510 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5511 struct tg3
*tp
= tnapi
->tp
;
5513 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5516 if (sblk
->status
& SD_STATUS_ERROR
)
5517 tg3_process_error(tp
);
5521 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5523 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5526 if (unlikely(work_done
>= budget
))
5529 if (tg3_flag(tp
, TAGGED_STATUS
)) {
5530 /* tp->last_tag is used in tg3_int_reenable() below
5531 * to tell the hw how much work has been processed,
5532 * so we must read it before checking for more work.
5534 tnapi
->last_tag
= sblk
->status_tag
;
5535 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5538 sblk
->status
&= ~SD_STATUS_UPDATED
;
5540 if (likely(!tg3_has_work(tnapi
))) {
5541 napi_complete(napi
);
5542 tg3_int_reenable(tnapi
);
5550 /* work_done is guaranteed to be less than budget. */
5551 napi_complete(napi
);
5552 schedule_work(&tp
->reset_task
);
5556 static void tg3_napi_disable(struct tg3
*tp
)
5560 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5561 napi_disable(&tp
->napi
[i
].napi
);
5564 static void tg3_napi_enable(struct tg3
*tp
)
5568 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5569 napi_enable(&tp
->napi
[i
].napi
);
5572 static void tg3_napi_init(struct tg3
*tp
)
5576 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5577 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5578 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5581 static void tg3_napi_fini(struct tg3
*tp
)
5585 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5586 netif_napi_del(&tp
->napi
[i
].napi
);
5589 static inline void tg3_netif_stop(struct tg3
*tp
)
5591 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5592 tg3_napi_disable(tp
);
5593 netif_tx_disable(tp
->dev
);
5596 static inline void tg3_netif_start(struct tg3
*tp
)
5598 /* NOTE: unconditional netif_tx_wake_all_queues is only
5599 * appropriate so long as all callers are assured to
5600 * have free tx slots (such as after tg3_init_hw)
5602 netif_tx_wake_all_queues(tp
->dev
);
5604 tg3_napi_enable(tp
);
5605 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5606 tg3_enable_ints(tp
);
5609 static void tg3_irq_quiesce(struct tg3
*tp
)
5613 BUG_ON(tp
->irq_sync
);
5618 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5619 synchronize_irq(tp
->napi
[i
].irq_vec
);
5622 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5623 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5624 * with as well. Most of the time, this is not necessary except when
5625 * shutting down the device.
5627 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5629 spin_lock_bh(&tp
->lock
);
5631 tg3_irq_quiesce(tp
);
5634 static inline void tg3_full_unlock(struct tg3
*tp
)
5636 spin_unlock_bh(&tp
->lock
);
5639 /* One-shot MSI handler - Chip automatically disables interrupt
5640 * after sending MSI so driver doesn't have to do it.
5642 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5644 struct tg3_napi
*tnapi
= dev_id
;
5645 struct tg3
*tp
= tnapi
->tp
;
5647 prefetch(tnapi
->hw_status
);
5649 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5651 if (likely(!tg3_irq_sync(tp
)))
5652 napi_schedule(&tnapi
->napi
);
5657 /* MSI ISR - No need to check for interrupt sharing and no need to
5658 * flush status block and interrupt mailbox. PCI ordering rules
5659 * guarantee that MSI will arrive after the status block.
5661 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5663 struct tg3_napi
*tnapi
= dev_id
;
5664 struct tg3
*tp
= tnapi
->tp
;
5666 prefetch(tnapi
->hw_status
);
5668 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5670 * Writing any value to intr-mbox-0 clears PCI INTA# and
5671 * chip-internal interrupt pending events.
5672 * Writing non-zero to intr-mbox-0 additional tells the
5673 * NIC to stop sending us irqs, engaging "in-intr-handler"
5676 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5677 if (likely(!tg3_irq_sync(tp
)))
5678 napi_schedule(&tnapi
->napi
);
5680 return IRQ_RETVAL(1);
5683 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5685 struct tg3_napi
*tnapi
= dev_id
;
5686 struct tg3
*tp
= tnapi
->tp
;
5687 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5688 unsigned int handled
= 1;
5690 /* In INTx mode, it is possible for the interrupt to arrive at
5691 * the CPU before the status block posted prior to the interrupt.
5692 * Reading the PCI State register will confirm whether the
5693 * interrupt is ours and will flush the status block.
5695 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5696 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5697 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5704 * Writing any value to intr-mbox-0 clears PCI INTA# and
5705 * chip-internal interrupt pending events.
5706 * Writing non-zero to intr-mbox-0 additional tells the
5707 * NIC to stop sending us irqs, engaging "in-intr-handler"
5710 * Flush the mailbox to de-assert the IRQ immediately to prevent
5711 * spurious interrupts. The flush impacts performance but
5712 * excessive spurious interrupts can be worse in some cases.
5714 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5715 if (tg3_irq_sync(tp
))
5717 sblk
->status
&= ~SD_STATUS_UPDATED
;
5718 if (likely(tg3_has_work(tnapi
))) {
5719 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5720 napi_schedule(&tnapi
->napi
);
5722 /* No work, shared interrupt perhaps? re-enable
5723 * interrupts, and flush that PCI write
5725 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5729 return IRQ_RETVAL(handled
);
5732 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5734 struct tg3_napi
*tnapi
= dev_id
;
5735 struct tg3
*tp
= tnapi
->tp
;
5736 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5737 unsigned int handled
= 1;
5739 /* In INTx mode, it is possible for the interrupt to arrive at
5740 * the CPU before the status block posted prior to the interrupt.
5741 * Reading the PCI State register will confirm whether the
5742 * interrupt is ours and will flush the status block.
5744 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5745 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5746 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5753 * writing any value to intr-mbox-0 clears PCI INTA# and
5754 * chip-internal interrupt pending events.
5755 * writing non-zero to intr-mbox-0 additional tells the
5756 * NIC to stop sending us irqs, engaging "in-intr-handler"
5759 * Flush the mailbox to de-assert the IRQ immediately to prevent
5760 * spurious interrupts. The flush impacts performance but
5761 * excessive spurious interrupts can be worse in some cases.
5763 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5766 * In a shared interrupt configuration, sometimes other devices'
5767 * interrupts will scream. We record the current status tag here
5768 * so that the above check can report that the screaming interrupts
5769 * are unhandled. Eventually they will be silenced.
5771 tnapi
->last_irq_tag
= sblk
->status_tag
;
5773 if (tg3_irq_sync(tp
))
5776 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5778 napi_schedule(&tnapi
->napi
);
5781 return IRQ_RETVAL(handled
);
5784 /* ISR for interrupt test */
5785 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5787 struct tg3_napi
*tnapi
= dev_id
;
5788 struct tg3
*tp
= tnapi
->tp
;
5789 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5791 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5792 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5793 tg3_disable_ints(tp
);
5794 return IRQ_RETVAL(1);
5796 return IRQ_RETVAL(0);
5799 static int tg3_init_hw(struct tg3
*, int);
5800 static int tg3_halt(struct tg3
*, int, int);
5802 /* Restart hardware after configuration changes, self-test, etc.
5803 * Invoked with tp->lock held.
5805 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5806 __releases(tp
->lock
)
5807 __acquires(tp
->lock
)
5811 err
= tg3_init_hw(tp
, reset_phy
);
5814 "Failed to re-initialize device, aborting\n");
5815 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5816 tg3_full_unlock(tp
);
5817 del_timer_sync(&tp
->timer
);
5819 tg3_napi_enable(tp
);
5821 tg3_full_lock(tp
, 0);
5826 #ifdef CONFIG_NET_POLL_CONTROLLER
5827 static void tg3_poll_controller(struct net_device
*dev
)
5830 struct tg3
*tp
= netdev_priv(dev
);
5832 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5833 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5837 static void tg3_reset_task(struct work_struct
*work
)
5839 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5841 unsigned int restart_timer
;
5843 tg3_full_lock(tp
, 0);
5845 if (!netif_running(tp
->dev
)) {
5846 tg3_full_unlock(tp
);
5850 tg3_full_unlock(tp
);
5856 tg3_full_lock(tp
, 1);
5858 restart_timer
= tg3_flag(tp
, RESTART_TIMER
);
5859 tg3_flag_clear(tp
, RESTART_TIMER
);
5861 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
5862 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5863 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5864 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
5865 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
5868 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5869 err
= tg3_init_hw(tp
, 1);
5873 tg3_netif_start(tp
);
5876 mod_timer(&tp
->timer
, jiffies
+ 1);
5879 tg3_full_unlock(tp
);
5885 static void tg3_tx_timeout(struct net_device
*dev
)
5887 struct tg3
*tp
= netdev_priv(dev
);
5889 if (netif_msg_tx_err(tp
)) {
5890 netdev_err(dev
, "transmit timed out, resetting\n");
5894 schedule_work(&tp
->reset_task
);
5897 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5898 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5900 u32 base
= (u32
) mapping
& 0xffffffff;
5902 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5905 /* Test for DMA addresses > 40-bit */
5906 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5909 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5910 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
5911 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5918 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
5919 dma_addr_t mapping
, u32 len
, u32 flags
,
5922 txbd
->addr_hi
= ((u64
) mapping
>> 32);
5923 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5924 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
5925 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
5928 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
5929 dma_addr_t map
, u32 len
, u32 flags
,
5932 struct tg3
*tp
= tnapi
->tp
;
5935 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
5938 if (tg3_4g_overflow_test(map
, len
))
5941 if (tg3_40bit_overflow_test(tp
, map
, len
))
5944 if (tg3_flag(tp
, 4K_FIFO_LIMIT
)) {
5945 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
5946 while (len
> TG3_TX_BD_DMA_MAX
) {
5947 u32 frag_len
= TG3_TX_BD_DMA_MAX
;
5948 len
-= TG3_TX_BD_DMA_MAX
;
5951 tnapi
->tx_buffers
[*entry
].fragmented
= true;
5952 /* Avoid the 8byte DMA problem */
5954 len
+= TG3_TX_BD_DMA_MAX
/ 2;
5955 frag_len
= TG3_TX_BD_DMA_MAX
/ 2;
5961 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
5962 frag_len
, tmp_flag
, mss
, vlan
);
5964 *entry
= NEXT_TX(*entry
);
5975 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
5976 len
, flags
, mss
, vlan
);
5978 *entry
= NEXT_TX(*entry
);
5984 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
5985 len
, flags
, mss
, vlan
);
5986 *entry
= NEXT_TX(*entry
);
5992 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
5995 struct sk_buff
*skb
;
5996 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6001 pci_unmap_single(tnapi
->tp
->pdev
,
6002 dma_unmap_addr(txb
, mapping
),
6006 while (txb
->fragmented
) {
6007 txb
->fragmented
= false;
6008 entry
= NEXT_TX(entry
);
6009 txb
= &tnapi
->tx_buffers
[entry
];
6012 for (i
= 0; i
< last
; i
++) {
6013 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6015 entry
= NEXT_TX(entry
);
6016 txb
= &tnapi
->tx_buffers
[entry
];
6018 pci_unmap_page(tnapi
->tp
->pdev
,
6019 dma_unmap_addr(txb
, mapping
),
6020 frag
->size
, PCI_DMA_TODEVICE
);
6022 while (txb
->fragmented
) {
6023 txb
->fragmented
= false;
6024 entry
= NEXT_TX(entry
);
6025 txb
= &tnapi
->tx_buffers
[entry
];
6030 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6031 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6032 struct sk_buff
*skb
,
6033 u32
*entry
, u32
*budget
,
6034 u32 base_flags
, u32 mss
, u32 vlan
)
6036 struct tg3
*tp
= tnapi
->tp
;
6037 struct sk_buff
*new_skb
;
6038 dma_addr_t new_addr
= 0;
6041 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6042 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6044 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6046 new_skb
= skb_copy_expand(skb
,
6047 skb_headroom(skb
) + more_headroom
,
6048 skb_tailroom(skb
), GFP_ATOMIC
);
6054 /* New SKB is guaranteed to be linear. */
6055 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6057 /* Make sure the mapping succeeded */
6058 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6059 dev_kfree_skb(new_skb
);
6062 base_flags
|= TXD_FLAG_END
;
6064 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6065 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6068 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6069 new_skb
->len
, base_flags
,
6071 tg3_tx_skb_unmap(tnapi
, *entry
, 0);
6072 dev_kfree_skb(new_skb
);
6083 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6085 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6086 * TSO header is greater than 80 bytes.
6088 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6090 struct sk_buff
*segs
, *nskb
;
6091 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6093 /* Estimate the number of fragments in the worst case */
6094 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6095 netif_stop_queue(tp
->dev
);
6097 /* netif_tx_stop_queue() must be done before checking
6098 * checking tx index in tg3_tx_avail() below, because in
6099 * tg3_tx(), we update tx index before checking for
6100 * netif_tx_queue_stopped().
6103 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6104 return NETDEV_TX_BUSY
;
6106 netif_wake_queue(tp
->dev
);
6109 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6111 goto tg3_tso_bug_end
;
6117 tg3_start_xmit(nskb
, tp
->dev
);
6123 return NETDEV_TX_OK
;
6126 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6127 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6129 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6131 struct tg3
*tp
= netdev_priv(dev
);
6132 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6134 int i
= -1, would_hit_hwbug
;
6136 struct tg3_napi
*tnapi
;
6137 struct netdev_queue
*txq
;
6140 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6141 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6142 if (tg3_flag(tp
, ENABLE_TSS
))
6145 budget
= tg3_tx_avail(tnapi
);
6147 /* We are running in BH disabled context with netif_tx_lock
6148 * and TX reclaim runs via tp->napi.poll inside of a software
6149 * interrupt. Furthermore, IRQ processing runs lockless so we have
6150 * no IRQ context deadlocks to worry about either. Rejoice!
6152 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6153 if (!netif_tx_queue_stopped(txq
)) {
6154 netif_tx_stop_queue(txq
);
6156 /* This is a hard error, log it. */
6158 "BUG! Tx Ring full when queue awake!\n");
6160 return NETDEV_TX_BUSY
;
6163 entry
= tnapi
->tx_prod
;
6165 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6166 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6168 mss
= skb_shinfo(skb
)->gso_size
;
6171 u32 tcp_opt_len
, hdr_len
;
6173 if (skb_header_cloned(skb
) &&
6174 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
6180 tcp_opt_len
= tcp_optlen(skb
);
6182 if (skb_is_gso_v6(skb
)) {
6183 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
6187 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
6188 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
6191 iph
->tot_len
= htons(mss
+ hdr_len
);
6194 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6195 tg3_flag(tp
, TSO_BUG
))
6196 return tg3_tso_bug(tp
, skb
);
6198 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6199 TXD_FLAG_CPU_POST_DMA
);
6201 if (tg3_flag(tp
, HW_TSO_1
) ||
6202 tg3_flag(tp
, HW_TSO_2
) ||
6203 tg3_flag(tp
, HW_TSO_3
)) {
6204 tcp_hdr(skb
)->check
= 0;
6205 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6207 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6212 if (tg3_flag(tp
, HW_TSO_3
)) {
6213 mss
|= (hdr_len
& 0xc) << 12;
6215 base_flags
|= 0x00000010;
6216 base_flags
|= (hdr_len
& 0x3e0) << 5;
6217 } else if (tg3_flag(tp
, HW_TSO_2
))
6218 mss
|= hdr_len
<< 9;
6219 else if (tg3_flag(tp
, HW_TSO_1
) ||
6220 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6221 if (tcp_opt_len
|| iph
->ihl
> 5) {
6224 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6225 mss
|= (tsflags
<< 11);
6228 if (tcp_opt_len
|| iph
->ihl
> 5) {
6231 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6232 base_flags
|= tsflags
<< 12;
6237 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6238 if (vlan_tx_tag_present(skb
)) {
6239 base_flags
|= TXD_FLAG_VLAN
;
6240 vlan
= vlan_tx_tag_get(skb
);
6244 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6245 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6246 base_flags
|= TXD_FLAG_JMB_PKT
;
6248 len
= skb_headlen(skb
);
6250 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6251 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
6256 tnapi
->tx_buffers
[entry
].skb
= skb
;
6257 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6259 would_hit_hwbug
= 0;
6261 if (tg3_flag(tp
, 5701_DMA_BUG
))
6262 would_hit_hwbug
= 1;
6264 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
6265 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
6267 would_hit_hwbug
= 1;
6269 /* Now loop through additional data fragments, and queue them. */
6270 if (skb_shinfo(skb
)->nr_frags
> 0) {
6273 if (!tg3_flag(tp
, HW_TSO_1
) &&
6274 !tg3_flag(tp
, HW_TSO_2
) &&
6275 !tg3_flag(tp
, HW_TSO_3
))
6278 last
= skb_shinfo(skb
)->nr_frags
- 1;
6279 for (i
= 0; i
<= last
; i
++) {
6280 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6283 mapping
= pci_map_page(tp
->pdev
,
6286 len
, PCI_DMA_TODEVICE
);
6288 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6289 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6291 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6294 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
6296 ((i
== last
) ? TXD_FLAG_END
: 0),
6298 would_hit_hwbug
= 1;
6302 if (would_hit_hwbug
) {
6303 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6305 /* If the workaround fails due to memory/mapping
6306 * failure, silently drop this packet.
6308 entry
= tnapi
->tx_prod
;
6309 budget
= tg3_tx_avail(tnapi
);
6310 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, &entry
, &budget
,
6311 base_flags
, mss
, vlan
))
6315 skb_tx_timestamp(skb
);
6317 /* Packets are ready, update Tx producer idx local and on card. */
6318 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6320 tnapi
->tx_prod
= entry
;
6321 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6322 netif_tx_stop_queue(txq
);
6324 /* netif_tx_stop_queue() must be done before checking
6325 * checking tx index in tg3_tx_avail() below, because in
6326 * tg3_tx(), we update tx index before checking for
6327 * netif_tx_queue_stopped().
6330 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6331 netif_tx_wake_queue(txq
);
6337 return NETDEV_TX_OK
;
6340 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
6342 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6343 return NETDEV_TX_OK
;
6346 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6348 struct tg3
*tp
= netdev_priv(dev
);
6350 if (features
& NETIF_F_LOOPBACK
) {
6351 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6355 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6356 * loopback mode if Half-Duplex mode was negotiated earlier.
6358 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6360 /* Enable internal MAC loopback mode */
6361 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6362 spin_lock_bh(&tp
->lock
);
6363 tw32(MAC_MODE
, tp
->mac_mode
);
6364 netif_carrier_on(tp
->dev
);
6365 spin_unlock_bh(&tp
->lock
);
6366 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6368 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6371 /* Disable internal MAC loopback mode */
6372 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6373 spin_lock_bh(&tp
->lock
);
6374 tw32(MAC_MODE
, tp
->mac_mode
);
6375 /* Force link status check */
6376 tg3_setup_phy(tp
, 1);
6377 spin_unlock_bh(&tp
->lock
);
6378 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6382 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6384 struct tg3
*tp
= netdev_priv(dev
);
6386 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6387 features
&= ~NETIF_F_ALL_TSO
;
6392 static int tg3_set_features(struct net_device
*dev
, u32 features
)
6394 u32 changed
= dev
->features
^ features
;
6396 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6397 tg3_set_loopback(dev
, features
);
6402 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6407 if (new_mtu
> ETH_DATA_LEN
) {
6408 if (tg3_flag(tp
, 5780_CLASS
)) {
6409 netdev_update_features(dev
);
6410 tg3_flag_clear(tp
, TSO_CAPABLE
);
6412 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
6415 if (tg3_flag(tp
, 5780_CLASS
)) {
6416 tg3_flag_set(tp
, TSO_CAPABLE
);
6417 netdev_update_features(dev
);
6419 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
6423 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6425 struct tg3
*tp
= netdev_priv(dev
);
6428 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6431 if (!netif_running(dev
)) {
6432 /* We'll just catch it later when the
6435 tg3_set_mtu(dev
, tp
, new_mtu
);
6443 tg3_full_lock(tp
, 1);
6445 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6447 tg3_set_mtu(dev
, tp
, new_mtu
);
6449 err
= tg3_restart_hw(tp
, 0);
6452 tg3_netif_start(tp
);
6454 tg3_full_unlock(tp
);
6462 static void tg3_rx_prodring_free(struct tg3
*tp
,
6463 struct tg3_rx_prodring_set
*tpr
)
6467 if (tpr
!= &tp
->napi
[0].prodring
) {
6468 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6469 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6470 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6473 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
6474 for (i
= tpr
->rx_jmb_cons_idx
;
6475 i
!= tpr
->rx_jmb_prod_idx
;
6476 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6477 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6485 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6486 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6489 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6490 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6491 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6496 /* Initialize rx rings for packet processing.
6498 * The chip has been shut down and the driver detached from
6499 * the networking, so no interrupts or new tx packets will
6500 * end up in the driver. tp->{tx,}lock are held and thus
6503 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6504 struct tg3_rx_prodring_set
*tpr
)
6506 u32 i
, rx_pkt_dma_sz
;
6508 tpr
->rx_std_cons_idx
= 0;
6509 tpr
->rx_std_prod_idx
= 0;
6510 tpr
->rx_jmb_cons_idx
= 0;
6511 tpr
->rx_jmb_prod_idx
= 0;
6513 if (tpr
!= &tp
->napi
[0].prodring
) {
6514 memset(&tpr
->rx_std_buffers
[0], 0,
6515 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6516 if (tpr
->rx_jmb_buffers
)
6517 memset(&tpr
->rx_jmb_buffers
[0], 0,
6518 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6522 /* Zero out all descriptors. */
6523 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6525 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6526 if (tg3_flag(tp
, 5780_CLASS
) &&
6527 tp
->dev
->mtu
> ETH_DATA_LEN
)
6528 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6529 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6531 /* Initialize invariants of the rings, we only set this
6532 * stuff once. This works because the card does not
6533 * write into the rx buffer posting rings.
6535 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6536 struct tg3_rx_buffer_desc
*rxd
;
6538 rxd
= &tpr
->rx_std
[i
];
6539 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6540 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6541 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6542 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6545 /* Now allocate fresh SKBs for each rx ring. */
6546 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6547 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6548 netdev_warn(tp
->dev
,
6549 "Using a smaller RX standard ring. Only "
6550 "%d out of %d buffers were allocated "
6551 "successfully\n", i
, tp
->rx_pending
);
6559 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
6562 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6564 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
6567 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6568 struct tg3_rx_buffer_desc
*rxd
;
6570 rxd
= &tpr
->rx_jmb
[i
].std
;
6571 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6572 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6574 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6575 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6578 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6579 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6580 netdev_warn(tp
->dev
,
6581 "Using a smaller RX jumbo ring. Only %d "
6582 "out of %d buffers were allocated "
6583 "successfully\n", i
, tp
->rx_jumbo_pending
);
6586 tp
->rx_jumbo_pending
= i
;
6595 tg3_rx_prodring_free(tp
, tpr
);
6599 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6600 struct tg3_rx_prodring_set
*tpr
)
6602 kfree(tpr
->rx_std_buffers
);
6603 tpr
->rx_std_buffers
= NULL
;
6604 kfree(tpr
->rx_jmb_buffers
);
6605 tpr
->rx_jmb_buffers
= NULL
;
6607 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6608 tpr
->rx_std
, tpr
->rx_std_mapping
);
6612 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6613 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6618 static int tg3_rx_prodring_init(struct tg3
*tp
,
6619 struct tg3_rx_prodring_set
*tpr
)
6621 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6623 if (!tpr
->rx_std_buffers
)
6626 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6627 TG3_RX_STD_RING_BYTES(tp
),
6628 &tpr
->rx_std_mapping
,
6633 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6634 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6636 if (!tpr
->rx_jmb_buffers
)
6639 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6640 TG3_RX_JMB_RING_BYTES(tp
),
6641 &tpr
->rx_jmb_mapping
,
6650 tg3_rx_prodring_fini(tp
, tpr
);
6654 /* Free up pending packets in all rx/tx rings.
6656 * The chip has been shut down and the driver detached from
6657 * the networking, so no interrupts or new tx packets will
6658 * end up in the driver. tp->{tx,}lock is not held and we are not
6659 * in an interrupt context and thus may sleep.
6661 static void tg3_free_rings(struct tg3
*tp
)
6665 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6666 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6668 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6670 if (!tnapi
->tx_buffers
)
6673 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
6674 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
6679 tg3_tx_skb_unmap(tnapi
, i
, skb_shinfo(skb
)->nr_frags
);
6681 dev_kfree_skb_any(skb
);
6686 /* Initialize tx/rx rings for packet processing.
6688 * The chip has been shut down and the driver detached from
6689 * the networking, so no interrupts or new tx packets will
6690 * end up in the driver. tp->{tx,}lock are held and thus
6693 static int tg3_init_rings(struct tg3
*tp
)
6697 /* Free up all the SKBs. */
6700 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6701 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6703 tnapi
->last_tag
= 0;
6704 tnapi
->last_irq_tag
= 0;
6705 tnapi
->hw_status
->status
= 0;
6706 tnapi
->hw_status
->status_tag
= 0;
6707 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6712 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6714 tnapi
->rx_rcb_ptr
= 0;
6716 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6718 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6728 * Must not be invoked with interrupt sources disabled and
6729 * the hardware shutdown down.
6731 static void tg3_free_consistent(struct tg3
*tp
)
6735 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6736 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6738 if (tnapi
->tx_ring
) {
6739 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6740 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6741 tnapi
->tx_ring
= NULL
;
6744 kfree(tnapi
->tx_buffers
);
6745 tnapi
->tx_buffers
= NULL
;
6747 if (tnapi
->rx_rcb
) {
6748 dma_free_coherent(&tp
->pdev
->dev
,
6749 TG3_RX_RCB_RING_BYTES(tp
),
6751 tnapi
->rx_rcb_mapping
);
6752 tnapi
->rx_rcb
= NULL
;
6755 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6757 if (tnapi
->hw_status
) {
6758 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6760 tnapi
->status_mapping
);
6761 tnapi
->hw_status
= NULL
;
6766 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6767 tp
->hw_stats
, tp
->stats_mapping
);
6768 tp
->hw_stats
= NULL
;
6773 * Must not be invoked with interrupt sources disabled and
6774 * the hardware shutdown down. Can sleep.
6776 static int tg3_alloc_consistent(struct tg3
*tp
)
6780 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6781 sizeof(struct tg3_hw_stats
),
6787 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6789 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6790 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6791 struct tg3_hw_status
*sblk
;
6793 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6795 &tnapi
->status_mapping
,
6797 if (!tnapi
->hw_status
)
6800 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6801 sblk
= tnapi
->hw_status
;
6803 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6806 /* If multivector TSS is enabled, vector 0 does not handle
6807 * tx interrupts. Don't allocate any resources for it.
6809 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
6810 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
6811 tnapi
->tx_buffers
= kzalloc(
6812 sizeof(struct tg3_tx_ring_info
) *
6813 TG3_TX_RING_SIZE
, GFP_KERNEL
);
6814 if (!tnapi
->tx_buffers
)
6817 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6819 &tnapi
->tx_desc_mapping
,
6821 if (!tnapi
->tx_ring
)
6826 * When RSS is enabled, the status block format changes
6827 * slightly. The "rx_jumbo_consumer", "reserved",
6828 * and "rx_mini_consumer" members get mapped to the
6829 * other three rx return ring producer indexes.
6833 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6836 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6839 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6842 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6847 * If multivector RSS is enabled, vector 0 does not handle
6848 * rx or tx interrupts. Don't allocate any resources for it.
6850 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
6853 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6854 TG3_RX_RCB_RING_BYTES(tp
),
6855 &tnapi
->rx_rcb_mapping
,
6860 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6866 tg3_free_consistent(tp
);
6870 #define MAX_WAIT_CNT 1000
6872 /* To stop a block, clear the enable bit and poll till it
6873 * clears. tp->lock is held.
6875 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6880 if (tg3_flag(tp
, 5705_PLUS
)) {
6887 /* We can't enable/disable these bits of the
6888 * 5705/5750, just say success.
6901 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6904 if ((val
& enable_bit
) == 0)
6908 if (i
== MAX_WAIT_CNT
&& !silent
) {
6909 dev_err(&tp
->pdev
->dev
,
6910 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6918 /* tp->lock is held. */
6919 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6923 tg3_disable_ints(tp
);
6925 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6926 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6929 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6930 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6931 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6932 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6933 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6934 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6936 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6937 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6938 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6939 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6940 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6941 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6942 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6944 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6945 tw32_f(MAC_MODE
, tp
->mac_mode
);
6948 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6949 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6951 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6953 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6956 if (i
>= MAX_WAIT_CNT
) {
6957 dev_err(&tp
->pdev
->dev
,
6958 "%s timed out, TX_MODE_ENABLE will not clear "
6959 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6963 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6964 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6965 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6967 tw32(FTQ_RESET
, 0xffffffff);
6968 tw32(FTQ_RESET
, 0x00000000);
6970 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6971 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6973 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6974 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6975 if (tnapi
->hw_status
)
6976 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6979 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6984 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6989 /* NCSI does not support APE events */
6990 if (tg3_flag(tp
, APE_HAS_NCSI
))
6993 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6994 if (apedata
!= APE_SEG_SIG_MAGIC
)
6997 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6998 if (!(apedata
& APE_FW_STATUS_READY
))
7001 /* Wait for up to 1 millisecond for APE to service previous event. */
7002 for (i
= 0; i
< 10; i
++) {
7003 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
7006 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
7008 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
7009 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
7010 event
| APE_EVENT_STATUS_EVENT_PENDING
);
7012 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
7014 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
7020 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
7021 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
7024 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
7029 if (!tg3_flag(tp
, ENABLE_APE
))
7033 case RESET_KIND_INIT
:
7034 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
7035 APE_HOST_SEG_SIG_MAGIC
);
7036 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
7037 APE_HOST_SEG_LEN_MAGIC
);
7038 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
7039 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
7040 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
7041 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
7042 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
7043 APE_HOST_BEHAV_NO_PHYLOCK
);
7044 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
7045 TG3_APE_HOST_DRVR_STATE_START
);
7047 event
= APE_EVENT_STATUS_STATE_START
;
7049 case RESET_KIND_SHUTDOWN
:
7050 /* With the interface we are currently using,
7051 * APE does not track driver state. Wiping
7052 * out the HOST SEGMENT SIGNATURE forces
7053 * the APE to assume OS absent status.
7055 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
7057 if (device_may_wakeup(&tp
->pdev
->dev
) &&
7058 tg3_flag(tp
, WOL_ENABLE
)) {
7059 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
7060 TG3_APE_HOST_WOL_SPEED_AUTO
);
7061 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
7063 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
7065 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
7067 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
7069 case RESET_KIND_SUSPEND
:
7070 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
7076 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
7078 tg3_ape_send_event(tp
, event
);
7081 /* tp->lock is held. */
7082 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
7084 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
7085 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
7087 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7089 case RESET_KIND_INIT
:
7090 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7094 case RESET_KIND_SHUTDOWN
:
7095 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7099 case RESET_KIND_SUSPEND
:
7100 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7109 if (kind
== RESET_KIND_INIT
||
7110 kind
== RESET_KIND_SUSPEND
)
7111 tg3_ape_driver_state_change(tp
, kind
);
7114 /* tp->lock is held. */
7115 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
7117 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7119 case RESET_KIND_INIT
:
7120 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7121 DRV_STATE_START_DONE
);
7124 case RESET_KIND_SHUTDOWN
:
7125 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7126 DRV_STATE_UNLOAD_DONE
);
7134 if (kind
== RESET_KIND_SHUTDOWN
)
7135 tg3_ape_driver_state_change(tp
, kind
);
7138 /* tp->lock is held. */
7139 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
7141 if (tg3_flag(tp
, ENABLE_ASF
)) {
7143 case RESET_KIND_INIT
:
7144 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7148 case RESET_KIND_SHUTDOWN
:
7149 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7153 case RESET_KIND_SUSPEND
:
7154 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7164 static int tg3_poll_fw(struct tg3
*tp
)
7169 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7170 /* Wait up to 20ms for init done. */
7171 for (i
= 0; i
< 200; i
++) {
7172 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
7179 /* Wait for firmware initialization to complete. */
7180 for (i
= 0; i
< 100000; i
++) {
7181 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
7182 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
7187 /* Chip might not be fitted with firmware. Some Sun onboard
7188 * parts are configured like that. So don't signal the timeout
7189 * of the above loop as an error, but do report the lack of
7190 * running firmware once.
7192 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
7193 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
7195 netdev_info(tp
->dev
, "No firmware running\n");
7198 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7199 /* The 57765 A0 needs a little more
7200 * time to do some important work.
7208 /* Save PCI command register before chip reset */
7209 static void tg3_save_pci_state(struct tg3
*tp
)
7211 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7214 /* Restore PCI state after chip reset */
7215 static void tg3_restore_pci_state(struct tg3
*tp
)
7219 /* Re-enable indirect register accesses. */
7220 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7221 tp
->misc_host_ctrl
);
7223 /* Set MAX PCI retry to zero. */
7224 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7225 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7226 tg3_flag(tp
, PCIX_MODE
))
7227 val
|= PCISTATE_RETRY_SAME_DMA
;
7228 /* Allow reads and writes to the APE register and memory space. */
7229 if (tg3_flag(tp
, ENABLE_APE
))
7230 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7231 PCISTATE_ALLOW_APE_SHMEM_WR
|
7232 PCISTATE_ALLOW_APE_PSPACE_WR
;
7233 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7235 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7237 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7238 if (tg3_flag(tp
, PCI_EXPRESS
))
7239 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7241 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7242 tp
->pci_cacheline_sz
);
7243 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7248 /* Make sure PCI-X relaxed ordering bit is clear. */
7249 if (tg3_flag(tp
, PCIX_MODE
)) {
7252 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7254 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7255 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7259 if (tg3_flag(tp
, 5780_CLASS
)) {
7261 /* Chip reset on 5780 will reset MSI enable bit,
7262 * so need to restore it.
7264 if (tg3_flag(tp
, USING_MSI
)) {
7267 pci_read_config_word(tp
->pdev
,
7268 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7270 pci_write_config_word(tp
->pdev
,
7271 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7272 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7273 val
= tr32(MSGINT_MODE
);
7274 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7279 static void tg3_stop_fw(struct tg3
*);
7281 /* tp->lock is held. */
7282 static int tg3_chip_reset(struct tg3
*tp
)
7285 void (*write_op
)(struct tg3
*, u32
, u32
);
7290 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7292 /* No matching tg3_nvram_unlock() after this because
7293 * chip reset below will undo the nvram lock.
7295 tp
->nvram_lock_cnt
= 0;
7297 /* GRC_MISC_CFG core clock reset will clear the memory
7298 * enable bit in PCI register 4 and the MSI enable bit
7299 * on some chips, so we save relevant registers here.
7301 tg3_save_pci_state(tp
);
7303 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7304 tg3_flag(tp
, 5755_PLUS
))
7305 tw32(GRC_FASTBOOT_PC
, 0);
7308 * We must avoid the readl() that normally takes place.
7309 * It locks machines, causes machine checks, and other
7310 * fun things. So, temporarily disable the 5701
7311 * hardware workaround, while we do the reset.
7313 write_op
= tp
->write32
;
7314 if (write_op
== tg3_write_flush_reg32
)
7315 tp
->write32
= tg3_write32
;
7317 /* Prevent the irq handler from reading or writing PCI registers
7318 * during chip reset when the memory enable bit in the PCI command
7319 * register may be cleared. The chip does not generate interrupt
7320 * at this time, but the irq handler may still be called due to irq
7321 * sharing or irqpoll.
7323 tg3_flag_set(tp
, CHIP_RESETTING
);
7324 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7325 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7326 if (tnapi
->hw_status
) {
7327 tnapi
->hw_status
->status
= 0;
7328 tnapi
->hw_status
->status_tag
= 0;
7330 tnapi
->last_tag
= 0;
7331 tnapi
->last_irq_tag
= 0;
7335 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7336 synchronize_irq(tp
->napi
[i
].irq_vec
);
7338 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7339 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7340 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7344 val
= GRC_MISC_CFG_CORECLK_RESET
;
7346 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7347 /* Force PCIe 1.0a mode */
7348 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7349 !tg3_flag(tp
, 57765_PLUS
) &&
7350 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7351 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7352 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7354 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7355 tw32(GRC_MISC_CFG
, (1 << 29));
7360 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7361 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7362 tw32(GRC_VCPU_EXT_CTRL
,
7363 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7366 /* Manage gphy power for all CPMU absent PCIe devices. */
7367 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7368 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7370 tw32(GRC_MISC_CFG
, val
);
7372 /* restore 5701 hardware bug workaround write method */
7373 tp
->write32
= write_op
;
7375 /* Unfortunately, we have to delay before the PCI read back.
7376 * Some 575X chips even will not respond to a PCI cfg access
7377 * when the reset command is given to the chip.
7379 * How do these hardware designers expect things to work
7380 * properly if the PCI write is posted for a long period
7381 * of time? It is always necessary to have some method by
7382 * which a register read back can occur to push the write
7383 * out which does the reset.
7385 * For most tg3 variants the trick below was working.
7390 /* Flush PCI posted writes. The normal MMIO registers
7391 * are inaccessible at this time so this is the only
7392 * way to make this reliably (actually, this is no longer
7393 * the case, see above). I tried to use indirect
7394 * register read/write but this upset some 5701 variants.
7396 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7400 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7403 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7407 /* Wait for link training to complete. */
7408 for (i
= 0; i
< 5000; i
++)
7411 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7412 pci_write_config_dword(tp
->pdev
, 0xc4,
7413 cfg_val
| (1 << 15));
7416 /* Clear the "no snoop" and "relaxed ordering" bits. */
7417 pci_read_config_word(tp
->pdev
,
7418 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7420 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7421 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7423 * Older PCIe devices only support the 128 byte
7424 * MPS setting. Enforce the restriction.
7426 if (!tg3_flag(tp
, CPMU_PRESENT
))
7427 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7428 pci_write_config_word(tp
->pdev
,
7429 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7432 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7434 /* Clear error status */
7435 pci_write_config_word(tp
->pdev
,
7436 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7437 PCI_EXP_DEVSTA_CED
|
7438 PCI_EXP_DEVSTA_NFED
|
7439 PCI_EXP_DEVSTA_FED
|
7440 PCI_EXP_DEVSTA_URD
);
7443 tg3_restore_pci_state(tp
);
7445 tg3_flag_clear(tp
, CHIP_RESETTING
);
7446 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7449 if (tg3_flag(tp
, 5780_CLASS
))
7450 val
= tr32(MEMARB_MODE
);
7451 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7453 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7455 tw32(0x5000, 0x400);
7458 tw32(GRC_MODE
, tp
->grc_mode
);
7460 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7463 tw32(0xc4, val
| (1 << 15));
7466 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7467 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7468 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7469 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7470 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7471 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7474 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7475 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7477 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7478 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7483 tw32_f(MAC_MODE
, val
);
7486 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7488 err
= tg3_poll_fw(tp
);
7494 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7495 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7496 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7497 !tg3_flag(tp
, 57765_PLUS
)) {
7500 tw32(0x7c00, val
| (1 << 25));
7503 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7504 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7505 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7508 /* Reprobe ASF enable state. */
7509 tg3_flag_clear(tp
, ENABLE_ASF
);
7510 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7511 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7512 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7515 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7516 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7517 tg3_flag_set(tp
, ENABLE_ASF
);
7518 tp
->last_event_jiffies
= jiffies
;
7519 if (tg3_flag(tp
, 5750_PLUS
))
7520 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7527 /* tp->lock is held. */
7528 static void tg3_stop_fw(struct tg3
*tp
)
7530 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
7531 /* Wait for RX cpu to ACK the previous event. */
7532 tg3_wait_for_event_ack(tp
);
7534 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7536 tg3_generate_fw_event(tp
);
7538 /* Wait for RX cpu to ACK this event. */
7539 tg3_wait_for_event_ack(tp
);
7543 /* tp->lock is held. */
7544 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7550 tg3_write_sig_pre_reset(tp
, kind
);
7552 tg3_abort_hw(tp
, silent
);
7553 err
= tg3_chip_reset(tp
);
7555 __tg3_set_mac_addr(tp
, 0);
7557 tg3_write_sig_legacy(tp
, kind
);
7558 tg3_write_sig_post_reset(tp
, kind
);
7566 #define RX_CPU_SCRATCH_BASE 0x30000
7567 #define RX_CPU_SCRATCH_SIZE 0x04000
7568 #define TX_CPU_SCRATCH_BASE 0x34000
7569 #define TX_CPU_SCRATCH_SIZE 0x04000
7571 /* tp->lock is held. */
7572 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7576 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
7578 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7579 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7581 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7584 if (offset
== RX_CPU_BASE
) {
7585 for (i
= 0; i
< 10000; i
++) {
7586 tw32(offset
+ CPU_STATE
, 0xffffffff);
7587 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7588 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7592 tw32(offset
+ CPU_STATE
, 0xffffffff);
7593 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7596 for (i
= 0; i
< 10000; i
++) {
7597 tw32(offset
+ CPU_STATE
, 0xffffffff);
7598 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7599 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7605 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7606 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7610 /* Clear firmware's nvram arbitration. */
7611 if (tg3_flag(tp
, NVRAM
))
7612 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7617 unsigned int fw_base
;
7618 unsigned int fw_len
;
7619 const __be32
*fw_data
;
7622 /* tp->lock is held. */
7623 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7624 int cpu_scratch_size
, struct fw_info
*info
)
7626 int err
, lock_err
, i
;
7627 void (*write_op
)(struct tg3
*, u32
, u32
);
7629 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
7631 "%s: Trying to load TX cpu firmware which is 5705\n",
7636 if (tg3_flag(tp
, 5705_PLUS
))
7637 write_op
= tg3_write_mem
;
7639 write_op
= tg3_write_indirect_reg32
;
7641 /* It is possible that bootcode is still loading at this point.
7642 * Get the nvram lock first before halting the cpu.
7644 lock_err
= tg3_nvram_lock(tp
);
7645 err
= tg3_halt_cpu(tp
, cpu_base
);
7647 tg3_nvram_unlock(tp
);
7651 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7652 write_op(tp
, cpu_scratch_base
+ i
, 0);
7653 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7654 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7655 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7656 write_op(tp
, (cpu_scratch_base
+
7657 (info
->fw_base
& 0xffff) +
7659 be32_to_cpu(info
->fw_data
[i
]));
7667 /* tp->lock is held. */
7668 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7670 struct fw_info info
;
7671 const __be32
*fw_data
;
7674 fw_data
= (void *)tp
->fw
->data
;
7676 /* Firmware blob starts with version numbers, followed by
7677 start address and length. We are setting complete length.
7678 length = end_address_of_bss - start_address_of_text.
7679 Remainder is the blob to be loaded contiguously
7680 from start address. */
7682 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7683 info
.fw_len
= tp
->fw
->size
- 12;
7684 info
.fw_data
= &fw_data
[3];
7686 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7687 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7692 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7693 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7698 /* Now startup only the RX cpu. */
7699 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7700 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7702 for (i
= 0; i
< 5; i
++) {
7703 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7705 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7706 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7707 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7711 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7712 "should be %08x\n", __func__
,
7713 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7716 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7717 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7722 /* tp->lock is held. */
7723 static int tg3_load_tso_firmware(struct tg3
*tp
)
7725 struct fw_info info
;
7726 const __be32
*fw_data
;
7727 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7730 if (tg3_flag(tp
, HW_TSO_1
) ||
7731 tg3_flag(tp
, HW_TSO_2
) ||
7732 tg3_flag(tp
, HW_TSO_3
))
7735 fw_data
= (void *)tp
->fw
->data
;
7737 /* Firmware blob starts with version numbers, followed by
7738 start address and length. We are setting complete length.
7739 length = end_address_of_bss - start_address_of_text.
7740 Remainder is the blob to be loaded contiguously
7741 from start address. */
7743 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7744 cpu_scratch_size
= tp
->fw_len
;
7745 info
.fw_len
= tp
->fw
->size
- 12;
7746 info
.fw_data
= &fw_data
[3];
7748 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7749 cpu_base
= RX_CPU_BASE
;
7750 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7752 cpu_base
= TX_CPU_BASE
;
7753 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7754 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7757 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7758 cpu_scratch_base
, cpu_scratch_size
,
7763 /* Now startup the cpu. */
7764 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7765 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7767 for (i
= 0; i
< 5; i
++) {
7768 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7770 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7771 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7772 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7777 "%s fails to set CPU PC, is %08x should be %08x\n",
7778 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7781 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7782 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7787 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7789 struct tg3
*tp
= netdev_priv(dev
);
7790 struct sockaddr
*addr
= p
;
7791 int err
= 0, skip_mac_1
= 0;
7793 if (!is_valid_ether_addr(addr
->sa_data
))
7796 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7798 if (!netif_running(dev
))
7801 if (tg3_flag(tp
, ENABLE_ASF
)) {
7802 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7804 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7805 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7806 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7807 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7809 /* Skip MAC addr 1 if ASF is using it. */
7810 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7811 !(addr1_high
== 0 && addr1_low
== 0))
7814 spin_lock_bh(&tp
->lock
);
7815 __tg3_set_mac_addr(tp
, skip_mac_1
);
7816 spin_unlock_bh(&tp
->lock
);
7821 /* tp->lock is held. */
7822 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7823 dma_addr_t mapping
, u32 maxlen_flags
,
7827 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7828 ((u64
) mapping
>> 32));
7830 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7831 ((u64
) mapping
& 0xffffffff));
7833 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7836 if (!tg3_flag(tp
, 5705_PLUS
))
7838 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7842 static void __tg3_set_rx_mode(struct net_device
*);
7843 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7847 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7848 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7849 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7850 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7852 tw32(HOSTCC_TXCOL_TICKS
, 0);
7853 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7854 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7857 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7858 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7859 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7860 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7862 tw32(HOSTCC_RXCOL_TICKS
, 0);
7863 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7864 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7867 if (!tg3_flag(tp
, 5705_PLUS
)) {
7868 u32 val
= ec
->stats_block_coalesce_usecs
;
7870 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7871 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7873 if (!netif_carrier_ok(tp
->dev
))
7876 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7879 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7882 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7883 tw32(reg
, ec
->rx_coalesce_usecs
);
7884 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7885 tw32(reg
, ec
->rx_max_coalesced_frames
);
7886 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7887 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7889 if (tg3_flag(tp
, ENABLE_TSS
)) {
7890 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7891 tw32(reg
, ec
->tx_coalesce_usecs
);
7892 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7893 tw32(reg
, ec
->tx_max_coalesced_frames
);
7894 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7895 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7899 for (; i
< tp
->irq_max
- 1; i
++) {
7900 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7901 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7902 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7904 if (tg3_flag(tp
, ENABLE_TSS
)) {
7905 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7906 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7907 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7912 /* tp->lock is held. */
7913 static void tg3_rings_reset(struct tg3
*tp
)
7916 u32 stblk
, txrcb
, rxrcb
, limit
;
7917 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7919 /* Disable all transmit rings but the first. */
7920 if (!tg3_flag(tp
, 5705_PLUS
))
7921 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7922 else if (tg3_flag(tp
, 5717_PLUS
))
7923 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7924 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7925 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7927 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7929 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7930 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7931 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7932 BDINFO_FLAGS_DISABLED
);
7935 /* Disable all receive return rings but the first. */
7936 if (tg3_flag(tp
, 5717_PLUS
))
7937 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7938 else if (!tg3_flag(tp
, 5705_PLUS
))
7939 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7940 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7941 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7942 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7944 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7946 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7947 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7948 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7949 BDINFO_FLAGS_DISABLED
);
7951 /* Disable interrupts */
7952 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7953 tp
->napi
[0].chk_msi_cnt
= 0;
7954 tp
->napi
[0].last_rx_cons
= 0;
7955 tp
->napi
[0].last_tx_cons
= 0;
7957 /* Zero mailbox registers. */
7958 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
7959 for (i
= 1; i
< tp
->irq_max
; i
++) {
7960 tp
->napi
[i
].tx_prod
= 0;
7961 tp
->napi
[i
].tx_cons
= 0;
7962 if (tg3_flag(tp
, ENABLE_TSS
))
7963 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7964 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7965 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7966 tp
->napi
[0].chk_msi_cnt
= 0;
7967 tp
->napi
[i
].last_rx_cons
= 0;
7968 tp
->napi
[i
].last_tx_cons
= 0;
7970 if (!tg3_flag(tp
, ENABLE_TSS
))
7971 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7973 tp
->napi
[0].tx_prod
= 0;
7974 tp
->napi
[0].tx_cons
= 0;
7975 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7976 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7979 /* Make sure the NIC-based send BD rings are disabled. */
7980 if (!tg3_flag(tp
, 5705_PLUS
)) {
7981 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7982 for (i
= 0; i
< 16; i
++)
7983 tw32_tx_mbox(mbox
+ i
* 8, 0);
7986 txrcb
= NIC_SRAM_SEND_RCB
;
7987 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7989 /* Clear status block in ram. */
7990 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7992 /* Set status block DMA address */
7993 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7994 ((u64
) tnapi
->status_mapping
>> 32));
7995 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7996 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7998 if (tnapi
->tx_ring
) {
7999 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8000 (TG3_TX_RING_SIZE
<<
8001 BDINFO_FLAGS_MAXLEN_SHIFT
),
8002 NIC_SRAM_TX_BUFFER_DESC
);
8003 txrcb
+= TG3_BDINFO_SIZE
;
8006 if (tnapi
->rx_rcb
) {
8007 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8008 (tp
->rx_ret_ring_mask
+ 1) <<
8009 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8010 rxrcb
+= TG3_BDINFO_SIZE
;
8013 stblk
= HOSTCC_STATBLCK_RING1
;
8015 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8016 u64 mapping
= (u64
)tnapi
->status_mapping
;
8017 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8018 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8020 /* Clear status block in ram. */
8021 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8023 if (tnapi
->tx_ring
) {
8024 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8025 (TG3_TX_RING_SIZE
<<
8026 BDINFO_FLAGS_MAXLEN_SHIFT
),
8027 NIC_SRAM_TX_BUFFER_DESC
);
8028 txrcb
+= TG3_BDINFO_SIZE
;
8031 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8032 ((tp
->rx_ret_ring_mask
+ 1) <<
8033 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8036 rxrcb
+= TG3_BDINFO_SIZE
;
8040 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8042 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8044 if (!tg3_flag(tp
, 5750_PLUS
) ||
8045 tg3_flag(tp
, 5780_CLASS
) ||
8046 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8047 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8048 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8049 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8050 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8051 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8053 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8055 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8056 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8058 val
= min(nic_rep_thresh
, host_rep_thresh
);
8059 tw32(RCVBDI_STD_THRESH
, val
);
8061 if (tg3_flag(tp
, 57765_PLUS
))
8062 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8064 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8067 if (!tg3_flag(tp
, 5705_PLUS
))
8068 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8070 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
8072 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8074 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8075 tw32(RCVBDI_JUMBO_THRESH
, val
);
8077 if (tg3_flag(tp
, 57765_PLUS
))
8078 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8081 /* tp->lock is held. */
8082 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8084 u32 val
, rdmac_mode
;
8086 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8088 tg3_disable_ints(tp
);
8092 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8094 if (tg3_flag(tp
, INIT_COMPLETE
))
8095 tg3_abort_hw(tp
, 1);
8097 /* Enable MAC control of LPI */
8098 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8099 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8100 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8101 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8103 tw32_f(TG3_CPMU_EEE_CTRL
,
8104 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8106 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8107 TG3_CPMU_EEEMD_LPI_IN_TX
|
8108 TG3_CPMU_EEEMD_LPI_IN_RX
|
8109 TG3_CPMU_EEEMD_EEE_ENABLE
;
8111 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8112 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8114 if (tg3_flag(tp
, ENABLE_APE
))
8115 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8117 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8119 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8120 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8121 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8123 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8124 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8125 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8131 err
= tg3_chip_reset(tp
);
8135 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8137 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8138 val
= tr32(TG3_CPMU_CTRL
);
8139 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8140 tw32(TG3_CPMU_CTRL
, val
);
8142 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8143 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8144 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8145 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8147 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8148 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8149 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8150 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8152 val
= tr32(TG3_CPMU_HST_ACC
);
8153 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8154 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8155 tw32(TG3_CPMU_HST_ACC
, val
);
8158 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8159 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8160 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8161 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8162 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8164 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8165 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8167 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8169 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8170 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8173 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8174 u32 grc_mode
= tr32(GRC_MODE
);
8176 /* Access the lower 1K of PL PCIE block registers. */
8177 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8178 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8180 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8181 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8182 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8184 tw32(GRC_MODE
, grc_mode
);
8187 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
8188 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8189 u32 grc_mode
= tr32(GRC_MODE
);
8191 /* Access the lower 1K of PL PCIE block registers. */
8192 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8193 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8195 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8196 TG3_PCIE_PL_LO_PHYCTL5
);
8197 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8198 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8200 tw32(GRC_MODE
, grc_mode
);
8203 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8204 u32 grc_mode
= tr32(GRC_MODE
);
8206 /* Access the lower 1K of DL PCIE block registers. */
8207 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8208 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8210 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8211 TG3_PCIE_DL_LO_FTSMAX
);
8212 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8213 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8214 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8216 tw32(GRC_MODE
, grc_mode
);
8219 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8220 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8221 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8222 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8225 /* This works around an issue with Athlon chipsets on
8226 * B3 tigon3 silicon. This bit has no effect on any
8227 * other revision. But do not set this on PCI Express
8228 * chips and don't even touch the clocks if the CPMU is present.
8230 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8231 if (!tg3_flag(tp
, PCI_EXPRESS
))
8232 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8233 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8236 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8237 tg3_flag(tp
, PCIX_MODE
)) {
8238 val
= tr32(TG3PCI_PCISTATE
);
8239 val
|= PCISTATE_RETRY_SAME_DMA
;
8240 tw32(TG3PCI_PCISTATE
, val
);
8243 if (tg3_flag(tp
, ENABLE_APE
)) {
8244 /* Allow reads and writes to the
8245 * APE register and memory space.
8247 val
= tr32(TG3PCI_PCISTATE
);
8248 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8249 PCISTATE_ALLOW_APE_SHMEM_WR
|
8250 PCISTATE_ALLOW_APE_PSPACE_WR
;
8251 tw32(TG3PCI_PCISTATE
, val
);
8254 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8255 /* Enable some hw fixes. */
8256 val
= tr32(TG3PCI_MSI_DATA
);
8257 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8258 tw32(TG3PCI_MSI_DATA
, val
);
8261 /* Descriptor ring init may make accesses to the
8262 * NIC SRAM area to setup the TX descriptors, so we
8263 * can only do this after the hardware has been
8264 * successfully reset.
8266 err
= tg3_init_rings(tp
);
8270 if (tg3_flag(tp
, 57765_PLUS
)) {
8271 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8272 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8273 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8274 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8275 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8276 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8277 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8278 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8279 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8280 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8281 /* This value is determined during the probe time DMA
8282 * engine test, tg3_test_dma.
8284 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8287 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8288 GRC_MODE_4X_NIC_SEND_RINGS
|
8289 GRC_MODE_NO_TX_PHDR_CSUM
|
8290 GRC_MODE_NO_RX_PHDR_CSUM
);
8291 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8293 /* Pseudo-header checksum is done by hardware logic and not
8294 * the offload processers, so make the chip do the pseudo-
8295 * header checksums on receive. For transmit it is more
8296 * convenient to do the pseudo-header checksum in software
8297 * as Linux does that on transmit for us in all cases.
8299 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8303 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8305 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8306 val
= tr32(GRC_MISC_CFG
);
8308 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8309 tw32(GRC_MISC_CFG
, val
);
8311 /* Initialize MBUF/DESC pool. */
8312 if (tg3_flag(tp
, 5750_PLUS
)) {
8314 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8315 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8316 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8317 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8319 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8320 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8321 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8322 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8325 fw_len
= tp
->fw_len
;
8326 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8327 tw32(BUFMGR_MB_POOL_ADDR
,
8328 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8329 tw32(BUFMGR_MB_POOL_SIZE
,
8330 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8333 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8334 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8335 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8336 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8337 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8338 tw32(BUFMGR_MB_HIGH_WATER
,
8339 tp
->bufmgr_config
.mbuf_high_water
);
8341 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8342 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8343 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8344 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8345 tw32(BUFMGR_MB_HIGH_WATER
,
8346 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8348 tw32(BUFMGR_DMA_LOW_WATER
,
8349 tp
->bufmgr_config
.dma_low_water
);
8350 tw32(BUFMGR_DMA_HIGH_WATER
,
8351 tp
->bufmgr_config
.dma_high_water
);
8353 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8354 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8355 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8356 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8357 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8358 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8359 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8360 tw32(BUFMGR_MODE
, val
);
8361 for (i
= 0; i
< 2000; i
++) {
8362 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8367 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8371 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8372 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8374 tg3_setup_rxbd_thresholds(tp
);
8376 /* Initialize TG3_BDINFO's at:
8377 * RCVDBDI_STD_BD: standard eth size rx ring
8378 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8379 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8382 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8383 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8384 * ring attribute flags
8385 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8387 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8388 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8390 * The size of each ring is fixed in the firmware, but the location is
8393 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8394 ((u64
) tpr
->rx_std_mapping
>> 32));
8395 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8396 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8397 if (!tg3_flag(tp
, 5717_PLUS
))
8398 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8399 NIC_SRAM_RX_BUFFER_DESC
);
8401 /* Disable the mini ring */
8402 if (!tg3_flag(tp
, 5705_PLUS
))
8403 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8404 BDINFO_FLAGS_DISABLED
);
8406 /* Program the jumbo buffer descriptor ring control
8407 * blocks on those devices that have them.
8409 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8410 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8412 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8413 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8414 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8415 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8416 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8417 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8418 BDINFO_FLAGS_MAXLEN_SHIFT
;
8419 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8420 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8421 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8422 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8423 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8424 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8426 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8427 BDINFO_FLAGS_DISABLED
);
8430 if (tg3_flag(tp
, 57765_PLUS
)) {
8431 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8432 val
= TG3_RX_STD_MAX_SIZE_5700
;
8434 val
= TG3_RX_STD_MAX_SIZE_5717
;
8435 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8436 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8438 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8440 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8442 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8444 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8445 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8447 tpr
->rx_jmb_prod_idx
=
8448 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8449 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8451 tg3_rings_reset(tp
);
8453 /* Initialize MAC address and backoff seed. */
8454 __tg3_set_mac_addr(tp
, 0);
8456 /* MTU + ethernet header + FCS + optional VLAN tag */
8457 tw32(MAC_RX_MTU_SIZE
,
8458 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8460 /* The slot time is changed by tg3_setup_phy if we
8461 * run at gigabit with half duplex.
8463 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8464 (6 << TX_LENGTHS_IPG_SHIFT
) |
8465 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8467 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8468 val
|= tr32(MAC_TX_LENGTHS
) &
8469 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8470 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8472 tw32(MAC_TX_LENGTHS
, val
);
8474 /* Receive rules. */
8475 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8476 tw32(RCVLPC_CONFIG
, 0x0181);
8478 /* Calculate RDMAC_MODE setting early, we need it to determine
8479 * the RCVLPC_STATE_ENABLE mask.
8481 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8482 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8483 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8484 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8485 RDMAC_MODE_LNGREAD_ENAB
);
8487 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8488 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8490 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8491 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8492 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8493 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8494 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8495 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8497 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8498 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8499 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8500 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8501 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8502 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8503 !tg3_flag(tp
, IS_5788
)) {
8504 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8508 if (tg3_flag(tp
, PCI_EXPRESS
))
8509 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8511 if (tg3_flag(tp
, HW_TSO_1
) ||
8512 tg3_flag(tp
, HW_TSO_2
) ||
8513 tg3_flag(tp
, HW_TSO_3
))
8514 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8516 if (tg3_flag(tp
, 57765_PLUS
) ||
8517 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8519 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8521 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8522 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8525 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8526 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8527 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8528 tg3_flag(tp
, 57765_PLUS
)) {
8529 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8530 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8531 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8532 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8533 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8534 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8535 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8536 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8537 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8539 tw32(TG3_RDMA_RSRVCTRL_REG
,
8540 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8543 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8544 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8545 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8546 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8547 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8548 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8551 /* Receive/send statistics. */
8552 if (tg3_flag(tp
, 5750_PLUS
)) {
8553 val
= tr32(RCVLPC_STATS_ENABLE
);
8554 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8555 tw32(RCVLPC_STATS_ENABLE
, val
);
8556 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8557 tg3_flag(tp
, TSO_CAPABLE
)) {
8558 val
= tr32(RCVLPC_STATS_ENABLE
);
8559 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8560 tw32(RCVLPC_STATS_ENABLE
, val
);
8562 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8564 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8565 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8566 tw32(SNDDATAI_STATSCTRL
,
8567 (SNDDATAI_SCTRL_ENABLE
|
8568 SNDDATAI_SCTRL_FASTUPD
));
8570 /* Setup host coalescing engine. */
8571 tw32(HOSTCC_MODE
, 0);
8572 for (i
= 0; i
< 2000; i
++) {
8573 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8578 __tg3_set_coalesce(tp
, &tp
->coal
);
8580 if (!tg3_flag(tp
, 5705_PLUS
)) {
8581 /* Status/statistics block address. See tg3_timer,
8582 * the tg3_periodic_fetch_stats call there, and
8583 * tg3_get_stats to see how this works for 5705/5750 chips.
8585 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8586 ((u64
) tp
->stats_mapping
>> 32));
8587 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8588 ((u64
) tp
->stats_mapping
& 0xffffffff));
8589 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8591 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8593 /* Clear statistics and status block memory areas */
8594 for (i
= NIC_SRAM_STATS_BLK
;
8595 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8597 tg3_write_mem(tp
, i
, 0);
8602 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8604 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8605 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8606 if (!tg3_flag(tp
, 5705_PLUS
))
8607 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8609 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8610 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8611 /* reset to prevent losing 1st rx packet intermittently */
8612 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8616 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8617 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8618 MAC_MODE_FHDE_ENABLE
;
8619 if (tg3_flag(tp
, ENABLE_APE
))
8620 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8621 if (!tg3_flag(tp
, 5705_PLUS
) &&
8622 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8623 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8624 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8625 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8628 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8629 * If TG3_FLAG_IS_NIC is zero, we should read the
8630 * register to preserve the GPIO settings for LOMs. The GPIOs,
8631 * whether used as inputs or outputs, are set by boot code after
8634 if (!tg3_flag(tp
, IS_NIC
)) {
8637 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8638 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8639 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8641 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8642 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8643 GRC_LCLCTRL_GPIO_OUTPUT3
;
8645 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8646 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8648 tp
->grc_local_ctrl
&= ~gpio_mask
;
8649 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8651 /* GPIO1 must be driven high for eeprom write protect */
8652 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8653 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8654 GRC_LCLCTRL_GPIO_OUTPUT1
);
8656 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8659 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8660 val
= tr32(MSGINT_MODE
);
8661 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8662 tw32(MSGINT_MODE
, val
);
8665 if (!tg3_flag(tp
, 5705_PLUS
)) {
8666 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8670 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8671 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8672 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8673 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8674 WDMAC_MODE_LNGREAD_ENAB
);
8676 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8677 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8678 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8679 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8680 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8682 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8683 !tg3_flag(tp
, IS_5788
)) {
8684 val
|= WDMAC_MODE_RX_ACCEL
;
8688 /* Enable host coalescing bug fix */
8689 if (tg3_flag(tp
, 5755_PLUS
))
8690 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8692 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8693 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8695 tw32_f(WDMAC_MODE
, val
);
8698 if (tg3_flag(tp
, PCIX_MODE
)) {
8701 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8703 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8704 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8705 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8706 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8707 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8708 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8710 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8714 tw32_f(RDMAC_MODE
, rdmac_mode
);
8717 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8718 if (!tg3_flag(tp
, 5705_PLUS
))
8719 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8721 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8723 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8725 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8727 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8728 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8729 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8730 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8731 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8732 tw32(RCVDBDI_MODE
, val
);
8733 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8734 if (tg3_flag(tp
, HW_TSO_1
) ||
8735 tg3_flag(tp
, HW_TSO_2
) ||
8736 tg3_flag(tp
, HW_TSO_3
))
8737 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8738 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8739 if (tg3_flag(tp
, ENABLE_TSS
))
8740 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8741 tw32(SNDBDI_MODE
, val
);
8742 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8744 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8745 err
= tg3_load_5701_a0_firmware_fix(tp
);
8750 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8751 err
= tg3_load_tso_firmware(tp
);
8756 tp
->tx_mode
= TX_MODE_ENABLE
;
8758 if (tg3_flag(tp
, 5755_PLUS
) ||
8759 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8760 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8762 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8763 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8764 tp
->tx_mode
&= ~val
;
8765 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8768 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8771 if (tg3_flag(tp
, ENABLE_RSS
)) {
8773 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8775 if (tp
->irq_cnt
== 2) {
8776 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
+= 8) {
8783 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8784 val
= i
% (tp
->irq_cnt
- 1);
8786 for (; i
% 8; i
++) {
8788 val
|= (i
% (tp
->irq_cnt
- 1));
8795 /* Setup the "secret" hash key. */
8796 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8797 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8798 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8799 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8800 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8801 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8802 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8803 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8804 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8805 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8808 tp
->rx_mode
= RX_MODE_ENABLE
;
8809 if (tg3_flag(tp
, 5755_PLUS
))
8810 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8812 if (tg3_flag(tp
, ENABLE_RSS
))
8813 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8814 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8815 RX_MODE_RSS_IPV6_HASH_EN
|
8816 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8817 RX_MODE_RSS_IPV4_HASH_EN
|
8818 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8820 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8823 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8825 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8826 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8827 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8830 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8833 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8834 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8835 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8836 /* Set drive transmission level to 1.2V */
8837 /* only if the signal pre-emphasis bit is not set */
8838 val
= tr32(MAC_SERDES_CFG
);
8841 tw32(MAC_SERDES_CFG
, val
);
8843 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8844 tw32(MAC_SERDES_CFG
, 0x616000);
8847 /* Prevent chip from dropping frames when flow control
8850 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8854 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8856 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8857 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8858 /* Use hardware link auto-negotiation */
8859 tg3_flag_set(tp
, HW_AUTONEG
);
8862 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8863 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
8866 tmp
= tr32(SERDES_RX_CTRL
);
8867 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8868 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8869 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8870 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8873 if (!tg3_flag(tp
, USE_PHYLIB
)) {
8874 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8875 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8876 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8877 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8878 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8881 err
= tg3_setup_phy(tp
, 0);
8885 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8886 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8889 /* Clear CRC stats. */
8890 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8891 tg3_writephy(tp
, MII_TG3_TEST1
,
8892 tmp
| MII_TG3_TEST1_CRC_EN
);
8893 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8898 __tg3_set_rx_mode(tp
->dev
);
8900 /* Initialize receive rules. */
8901 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8902 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8903 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8904 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8906 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
8910 if (tg3_flag(tp
, ENABLE_ASF
))
8914 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8916 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8918 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8920 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8922 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8924 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8926 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8928 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8930 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8932 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8934 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8936 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8938 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8940 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8948 if (tg3_flag(tp
, ENABLE_APE
))
8949 /* Write our heartbeat update interval to APE. */
8950 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8951 APE_HOST_HEARTBEAT_INT_DISABLE
);
8953 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8958 /* Called at device open time to get the chip ready for
8959 * packet processing. Invoked with tp->lock held.
8961 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8963 tg3_switch_clocks(tp
);
8965 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8967 return tg3_reset_hw(tp
, reset_phy
);
8970 #define TG3_STAT_ADD32(PSTAT, REG) \
8971 do { u32 __val = tr32(REG); \
8972 (PSTAT)->low += __val; \
8973 if ((PSTAT)->low < __val) \
8974 (PSTAT)->high += 1; \
8977 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8979 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8981 if (!netif_carrier_ok(tp
->dev
))
8984 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8985 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8986 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8987 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8988 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8989 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8990 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8991 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8992 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8993 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8994 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8995 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8996 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8998 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8999 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9000 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9001 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9002 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9003 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9004 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9005 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9006 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9007 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9008 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9009 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9010 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9011 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9013 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9014 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9015 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9016 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9017 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9019 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9020 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9022 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9023 sp
->rx_discards
.low
+= val
;
9024 if (sp
->rx_discards
.low
< val
)
9025 sp
->rx_discards
.high
+= 1;
9027 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9029 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9032 static void tg3_chk_missed_msi(struct tg3
*tp
)
9036 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9037 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9039 if (tg3_has_work(tnapi
)) {
9040 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9041 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9042 if (tnapi
->chk_msi_cnt
< 1) {
9043 tnapi
->chk_msi_cnt
++;
9046 tw32_mailbox(tnapi
->int_mbox
,
9047 tnapi
->last_tag
<< 24);
9050 tnapi
->chk_msi_cnt
= 0;
9051 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9052 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9056 static void tg3_timer(unsigned long __opaque
)
9058 struct tg3
*tp
= (struct tg3
*) __opaque
;
9063 spin_lock(&tp
->lock
);
9065 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9066 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9067 tg3_chk_missed_msi(tp
);
9069 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9070 /* All of this garbage is because when using non-tagged
9071 * IRQ status the mailbox/status_block protocol the chip
9072 * uses with the cpu is race prone.
9074 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9075 tw32(GRC_LOCAL_CTRL
,
9076 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9078 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9079 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9082 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9083 tg3_flag_set(tp
, RESTART_TIMER
);
9084 spin_unlock(&tp
->lock
);
9085 schedule_work(&tp
->reset_task
);
9090 /* This part only runs once per second. */
9091 if (!--tp
->timer_counter
) {
9092 if (tg3_flag(tp
, 5705_PLUS
))
9093 tg3_periodic_fetch_stats(tp
);
9095 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9096 tg3_phy_eee_enable(tp
);
9098 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9102 mac_stat
= tr32(MAC_STATUS
);
9105 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9106 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9108 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9112 tg3_setup_phy(tp
, 0);
9113 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9114 u32 mac_stat
= tr32(MAC_STATUS
);
9117 if (netif_carrier_ok(tp
->dev
) &&
9118 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9121 if (!netif_carrier_ok(tp
->dev
) &&
9122 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9123 MAC_STATUS_SIGNAL_DET
))) {
9127 if (!tp
->serdes_counter
) {
9130 ~MAC_MODE_PORT_MODE_MASK
));
9132 tw32_f(MAC_MODE
, tp
->mac_mode
);
9135 tg3_setup_phy(tp
, 0);
9137 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9138 tg3_flag(tp
, 5780_CLASS
)) {
9139 tg3_serdes_parallel_detect(tp
);
9142 tp
->timer_counter
= tp
->timer_multiplier
;
9145 /* Heartbeat is only sent once every 2 seconds.
9147 * The heartbeat is to tell the ASF firmware that the host
9148 * driver is still alive. In the event that the OS crashes,
9149 * ASF needs to reset the hardware to free up the FIFO space
9150 * that may be filled with rx packets destined for the host.
9151 * If the FIFO is full, ASF will no longer function properly.
9153 * Unintended resets have been reported on real time kernels
9154 * where the timer doesn't run on time. Netpoll will also have
9157 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9158 * to check the ring condition when the heartbeat is expiring
9159 * before doing the reset. This will prevent most unintended
9162 if (!--tp
->asf_counter
) {
9163 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9164 tg3_wait_for_event_ack(tp
);
9166 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9167 FWCMD_NICDRV_ALIVE3
);
9168 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9169 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9170 TG3_FW_UPDATE_TIMEOUT_SEC
);
9172 tg3_generate_fw_event(tp
);
9174 tp
->asf_counter
= tp
->asf_multiplier
;
9177 spin_unlock(&tp
->lock
);
9180 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9181 add_timer(&tp
->timer
);
9184 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9187 unsigned long flags
;
9189 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9191 if (tp
->irq_cnt
== 1)
9192 name
= tp
->dev
->name
;
9194 name
= &tnapi
->irq_lbl
[0];
9195 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9196 name
[IFNAMSIZ
-1] = 0;
9199 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9201 if (tg3_flag(tp
, 1SHOT_MSI
))
9206 if (tg3_flag(tp
, TAGGED_STATUS
))
9207 fn
= tg3_interrupt_tagged
;
9208 flags
= IRQF_SHARED
;
9211 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9214 static int tg3_test_interrupt(struct tg3
*tp
)
9216 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9217 struct net_device
*dev
= tp
->dev
;
9218 int err
, i
, intr_ok
= 0;
9221 if (!netif_running(dev
))
9224 tg3_disable_ints(tp
);
9226 free_irq(tnapi
->irq_vec
, tnapi
);
9229 * Turn off MSI one shot mode. Otherwise this test has no
9230 * observable way to know whether the interrupt was delivered.
9232 if (tg3_flag(tp
, 57765_PLUS
)) {
9233 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9234 tw32(MSGINT_MODE
, val
);
9237 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9238 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9242 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9243 tg3_enable_ints(tp
);
9245 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9248 for (i
= 0; i
< 5; i
++) {
9249 u32 int_mbox
, misc_host_ctrl
;
9251 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9252 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9254 if ((int_mbox
!= 0) ||
9255 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9260 if (tg3_flag(tp
, 57765_PLUS
) &&
9261 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9262 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9267 tg3_disable_ints(tp
);
9269 free_irq(tnapi
->irq_vec
, tnapi
);
9271 err
= tg3_request_irq(tp
, 0);
9277 /* Reenable MSI one shot mode. */
9278 if (tg3_flag(tp
, 57765_PLUS
)) {
9279 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9280 tw32(MSGINT_MODE
, val
);
9288 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9289 * successfully restored
9291 static int tg3_test_msi(struct tg3
*tp
)
9296 if (!tg3_flag(tp
, USING_MSI
))
9299 /* Turn off SERR reporting in case MSI terminates with Master
9302 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9303 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9304 pci_cmd
& ~PCI_COMMAND_SERR
);
9306 err
= tg3_test_interrupt(tp
);
9308 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9313 /* other failures */
9317 /* MSI test failed, go back to INTx mode */
9318 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9319 "to INTx mode. Please report this failure to the PCI "
9320 "maintainer and include system chipset information\n");
9322 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9324 pci_disable_msi(tp
->pdev
);
9326 tg3_flag_clear(tp
, USING_MSI
);
9327 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9329 err
= tg3_request_irq(tp
, 0);
9333 /* Need to reset the chip because the MSI cycle may have terminated
9334 * with Master Abort.
9336 tg3_full_lock(tp
, 1);
9338 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9339 err
= tg3_init_hw(tp
, 1);
9341 tg3_full_unlock(tp
);
9344 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9349 static int tg3_request_firmware(struct tg3
*tp
)
9351 const __be32
*fw_data
;
9353 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9354 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9359 fw_data
= (void *)tp
->fw
->data
;
9361 /* Firmware blob starts with version numbers, followed by
9362 * start address and _full_ length including BSS sections
9363 * (which must be longer than the actual data, of course
9366 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9367 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9368 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9369 tp
->fw_len
, tp
->fw_needed
);
9370 release_firmware(tp
->fw
);
9375 /* We no longer need firmware; we have it. */
9376 tp
->fw_needed
= NULL
;
9380 static bool tg3_enable_msix(struct tg3
*tp
)
9382 int i
, rc
, cpus
= num_online_cpus();
9383 struct msix_entry msix_ent
[tp
->irq_max
];
9386 /* Just fallback to the simpler MSI mode. */
9390 * We want as many rx rings enabled as there are cpus.
9391 * The first MSIX vector only deals with link interrupts, etc,
9392 * so we add one to the number of vectors we are requesting.
9394 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9396 for (i
= 0; i
< tp
->irq_max
; i
++) {
9397 msix_ent
[i
].entry
= i
;
9398 msix_ent
[i
].vector
= 0;
9401 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9404 } else if (rc
!= 0) {
9405 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9407 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9412 for (i
= 0; i
< tp
->irq_max
; i
++)
9413 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9415 netif_set_real_num_tx_queues(tp
->dev
, 1);
9416 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9417 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9418 pci_disable_msix(tp
->pdev
);
9422 if (tp
->irq_cnt
> 1) {
9423 tg3_flag_set(tp
, ENABLE_RSS
);
9425 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9426 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9427 tg3_flag_set(tp
, ENABLE_TSS
);
9428 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9435 static void tg3_ints_init(struct tg3
*tp
)
9437 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9438 !tg3_flag(tp
, TAGGED_STATUS
)) {
9439 /* All MSI supporting chips should support tagged
9440 * status. Assert that this is the case.
9442 netdev_warn(tp
->dev
,
9443 "MSI without TAGGED_STATUS? Not using MSI\n");
9447 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9448 tg3_flag_set(tp
, USING_MSIX
);
9449 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9450 tg3_flag_set(tp
, USING_MSI
);
9452 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9453 u32 msi_mode
= tr32(MSGINT_MODE
);
9454 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9455 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9456 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9459 if (!tg3_flag(tp
, USING_MSIX
)) {
9461 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9462 netif_set_real_num_tx_queues(tp
->dev
, 1);
9463 netif_set_real_num_rx_queues(tp
->dev
, 1);
9467 static void tg3_ints_fini(struct tg3
*tp
)
9469 if (tg3_flag(tp
, USING_MSIX
))
9470 pci_disable_msix(tp
->pdev
);
9471 else if (tg3_flag(tp
, USING_MSI
))
9472 pci_disable_msi(tp
->pdev
);
9473 tg3_flag_clear(tp
, USING_MSI
);
9474 tg3_flag_clear(tp
, USING_MSIX
);
9475 tg3_flag_clear(tp
, ENABLE_RSS
);
9476 tg3_flag_clear(tp
, ENABLE_TSS
);
9479 static int tg3_open(struct net_device
*dev
)
9481 struct tg3
*tp
= netdev_priv(dev
);
9484 if (tp
->fw_needed
) {
9485 err
= tg3_request_firmware(tp
);
9486 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9490 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9491 tg3_flag_clear(tp
, TSO_CAPABLE
);
9492 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9493 netdev_notice(tp
->dev
, "TSO capability restored\n");
9494 tg3_flag_set(tp
, TSO_CAPABLE
);
9498 netif_carrier_off(tp
->dev
);
9500 err
= tg3_power_up(tp
);
9504 tg3_full_lock(tp
, 0);
9506 tg3_disable_ints(tp
);
9507 tg3_flag_clear(tp
, INIT_COMPLETE
);
9509 tg3_full_unlock(tp
);
9512 * Setup interrupts first so we know how
9513 * many NAPI resources to allocate
9517 /* The placement of this call is tied
9518 * to the setup and use of Host TX descriptors.
9520 err
= tg3_alloc_consistent(tp
);
9526 tg3_napi_enable(tp
);
9528 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9529 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9530 err
= tg3_request_irq(tp
, i
);
9532 for (i
--; i
>= 0; i
--)
9533 free_irq(tnapi
->irq_vec
, tnapi
);
9541 tg3_full_lock(tp
, 0);
9543 err
= tg3_init_hw(tp
, 1);
9545 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9548 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9549 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9550 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
)
9551 tp
->timer_offset
= HZ
;
9553 tp
->timer_offset
= HZ
/ 10;
9555 BUG_ON(tp
->timer_offset
> HZ
);
9556 tp
->timer_counter
= tp
->timer_multiplier
=
9557 (HZ
/ tp
->timer_offset
);
9558 tp
->asf_counter
= tp
->asf_multiplier
=
9559 ((HZ
/ tp
->timer_offset
) * 2);
9561 init_timer(&tp
->timer
);
9562 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9563 tp
->timer
.data
= (unsigned long) tp
;
9564 tp
->timer
.function
= tg3_timer
;
9567 tg3_full_unlock(tp
);
9572 if (tg3_flag(tp
, USING_MSI
)) {
9573 err
= tg3_test_msi(tp
);
9576 tg3_full_lock(tp
, 0);
9577 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9579 tg3_full_unlock(tp
);
9584 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9585 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9587 tw32(PCIE_TRANSACTION_CFG
,
9588 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9594 tg3_full_lock(tp
, 0);
9596 add_timer(&tp
->timer
);
9597 tg3_flag_set(tp
, INIT_COMPLETE
);
9598 tg3_enable_ints(tp
);
9600 tg3_full_unlock(tp
);
9602 netif_tx_start_all_queues(dev
);
9605 * Reset loopback feature if it was turned on while the device was down
9606 * make sure that it's installed properly now.
9608 if (dev
->features
& NETIF_F_LOOPBACK
)
9609 tg3_set_loopback(dev
, dev
->features
);
9614 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9615 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9616 free_irq(tnapi
->irq_vec
, tnapi
);
9620 tg3_napi_disable(tp
);
9622 tg3_free_consistent(tp
);
9626 tg3_frob_aux_power(tp
, false);
9627 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9631 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9632 struct rtnl_link_stats64
*);
9633 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9635 static int tg3_close(struct net_device
*dev
)
9638 struct tg3
*tp
= netdev_priv(dev
);
9640 tg3_napi_disable(tp
);
9641 cancel_work_sync(&tp
->reset_task
);
9643 netif_tx_stop_all_queues(dev
);
9645 del_timer_sync(&tp
->timer
);
9649 tg3_full_lock(tp
, 1);
9651 tg3_disable_ints(tp
);
9653 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9655 tg3_flag_clear(tp
, INIT_COMPLETE
);
9657 tg3_full_unlock(tp
);
9659 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9660 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9661 free_irq(tnapi
->irq_vec
, tnapi
);
9666 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9668 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9669 sizeof(tp
->estats_prev
));
9673 tg3_free_consistent(tp
);
9677 netif_carrier_off(tp
->dev
);
9682 static inline u64
get_stat64(tg3_stat64_t
*val
)
9684 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9687 static u64
calc_crc_errors(struct tg3
*tp
)
9689 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9691 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9692 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9693 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9696 spin_lock_bh(&tp
->lock
);
9697 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9698 tg3_writephy(tp
, MII_TG3_TEST1
,
9699 val
| MII_TG3_TEST1_CRC_EN
);
9700 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9703 spin_unlock_bh(&tp
->lock
);
9705 tp
->phy_crc_errors
+= val
;
9707 return tp
->phy_crc_errors
;
9710 return get_stat64(&hw_stats
->rx_fcs_errors
);
9713 #define ESTAT_ADD(member) \
9714 estats->member = old_estats->member + \
9715 get_stat64(&hw_stats->member)
9717 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9719 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9720 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9721 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9726 ESTAT_ADD(rx_octets
);
9727 ESTAT_ADD(rx_fragments
);
9728 ESTAT_ADD(rx_ucast_packets
);
9729 ESTAT_ADD(rx_mcast_packets
);
9730 ESTAT_ADD(rx_bcast_packets
);
9731 ESTAT_ADD(rx_fcs_errors
);
9732 ESTAT_ADD(rx_align_errors
);
9733 ESTAT_ADD(rx_xon_pause_rcvd
);
9734 ESTAT_ADD(rx_xoff_pause_rcvd
);
9735 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9736 ESTAT_ADD(rx_xoff_entered
);
9737 ESTAT_ADD(rx_frame_too_long_errors
);
9738 ESTAT_ADD(rx_jabbers
);
9739 ESTAT_ADD(rx_undersize_packets
);
9740 ESTAT_ADD(rx_in_length_errors
);
9741 ESTAT_ADD(rx_out_length_errors
);
9742 ESTAT_ADD(rx_64_or_less_octet_packets
);
9743 ESTAT_ADD(rx_65_to_127_octet_packets
);
9744 ESTAT_ADD(rx_128_to_255_octet_packets
);
9745 ESTAT_ADD(rx_256_to_511_octet_packets
);
9746 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9747 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9748 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9749 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9750 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9751 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9753 ESTAT_ADD(tx_octets
);
9754 ESTAT_ADD(tx_collisions
);
9755 ESTAT_ADD(tx_xon_sent
);
9756 ESTAT_ADD(tx_xoff_sent
);
9757 ESTAT_ADD(tx_flow_control
);
9758 ESTAT_ADD(tx_mac_errors
);
9759 ESTAT_ADD(tx_single_collisions
);
9760 ESTAT_ADD(tx_mult_collisions
);
9761 ESTAT_ADD(tx_deferred
);
9762 ESTAT_ADD(tx_excessive_collisions
);
9763 ESTAT_ADD(tx_late_collisions
);
9764 ESTAT_ADD(tx_collide_2times
);
9765 ESTAT_ADD(tx_collide_3times
);
9766 ESTAT_ADD(tx_collide_4times
);
9767 ESTAT_ADD(tx_collide_5times
);
9768 ESTAT_ADD(tx_collide_6times
);
9769 ESTAT_ADD(tx_collide_7times
);
9770 ESTAT_ADD(tx_collide_8times
);
9771 ESTAT_ADD(tx_collide_9times
);
9772 ESTAT_ADD(tx_collide_10times
);
9773 ESTAT_ADD(tx_collide_11times
);
9774 ESTAT_ADD(tx_collide_12times
);
9775 ESTAT_ADD(tx_collide_13times
);
9776 ESTAT_ADD(tx_collide_14times
);
9777 ESTAT_ADD(tx_collide_15times
);
9778 ESTAT_ADD(tx_ucast_packets
);
9779 ESTAT_ADD(tx_mcast_packets
);
9780 ESTAT_ADD(tx_bcast_packets
);
9781 ESTAT_ADD(tx_carrier_sense_errors
);
9782 ESTAT_ADD(tx_discards
);
9783 ESTAT_ADD(tx_errors
);
9785 ESTAT_ADD(dma_writeq_full
);
9786 ESTAT_ADD(dma_write_prioq_full
);
9787 ESTAT_ADD(rxbds_empty
);
9788 ESTAT_ADD(rx_discards
);
9789 ESTAT_ADD(rx_errors
);
9790 ESTAT_ADD(rx_threshold_hit
);
9792 ESTAT_ADD(dma_readq_full
);
9793 ESTAT_ADD(dma_read_prioq_full
);
9794 ESTAT_ADD(tx_comp_queue_full
);
9796 ESTAT_ADD(ring_set_send_prod_index
);
9797 ESTAT_ADD(ring_status_update
);
9798 ESTAT_ADD(nic_irqs
);
9799 ESTAT_ADD(nic_avoided_irqs
);
9800 ESTAT_ADD(nic_tx_threshold_hit
);
9802 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9807 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9808 struct rtnl_link_stats64
*stats
)
9810 struct tg3
*tp
= netdev_priv(dev
);
9811 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9812 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9817 stats
->rx_packets
= old_stats
->rx_packets
+
9818 get_stat64(&hw_stats
->rx_ucast_packets
) +
9819 get_stat64(&hw_stats
->rx_mcast_packets
) +
9820 get_stat64(&hw_stats
->rx_bcast_packets
);
9822 stats
->tx_packets
= old_stats
->tx_packets
+
9823 get_stat64(&hw_stats
->tx_ucast_packets
) +
9824 get_stat64(&hw_stats
->tx_mcast_packets
) +
9825 get_stat64(&hw_stats
->tx_bcast_packets
);
9827 stats
->rx_bytes
= old_stats
->rx_bytes
+
9828 get_stat64(&hw_stats
->rx_octets
);
9829 stats
->tx_bytes
= old_stats
->tx_bytes
+
9830 get_stat64(&hw_stats
->tx_octets
);
9832 stats
->rx_errors
= old_stats
->rx_errors
+
9833 get_stat64(&hw_stats
->rx_errors
);
9834 stats
->tx_errors
= old_stats
->tx_errors
+
9835 get_stat64(&hw_stats
->tx_errors
) +
9836 get_stat64(&hw_stats
->tx_mac_errors
) +
9837 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9838 get_stat64(&hw_stats
->tx_discards
);
9840 stats
->multicast
= old_stats
->multicast
+
9841 get_stat64(&hw_stats
->rx_mcast_packets
);
9842 stats
->collisions
= old_stats
->collisions
+
9843 get_stat64(&hw_stats
->tx_collisions
);
9845 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9846 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9847 get_stat64(&hw_stats
->rx_undersize_packets
);
9849 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9850 get_stat64(&hw_stats
->rxbds_empty
);
9851 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9852 get_stat64(&hw_stats
->rx_align_errors
);
9853 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9854 get_stat64(&hw_stats
->tx_discards
);
9855 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9856 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9858 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9859 calc_crc_errors(tp
);
9861 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9862 get_stat64(&hw_stats
->rx_discards
);
9864 stats
->rx_dropped
= tp
->rx_dropped
;
9869 static inline u32
calc_crc(unsigned char *buf
, int len
)
9877 for (j
= 0; j
< len
; j
++) {
9880 for (k
= 0; k
< 8; k
++) {
9893 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9895 /* accept or reject all multicast frames */
9896 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9897 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9898 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9899 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9902 static void __tg3_set_rx_mode(struct net_device
*dev
)
9904 struct tg3
*tp
= netdev_priv(dev
);
9907 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9908 RX_MODE_KEEP_VLAN_TAG
);
9910 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9911 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9914 if (!tg3_flag(tp
, ENABLE_ASF
))
9915 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9918 if (dev
->flags
& IFF_PROMISC
) {
9919 /* Promiscuous mode. */
9920 rx_mode
|= RX_MODE_PROMISC
;
9921 } else if (dev
->flags
& IFF_ALLMULTI
) {
9922 /* Accept all multicast. */
9923 tg3_set_multi(tp
, 1);
9924 } else if (netdev_mc_empty(dev
)) {
9925 /* Reject all multicast. */
9926 tg3_set_multi(tp
, 0);
9928 /* Accept one or more multicast(s). */
9929 struct netdev_hw_addr
*ha
;
9930 u32 mc_filter
[4] = { 0, };
9935 netdev_for_each_mc_addr(ha
, dev
) {
9936 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9938 regidx
= (bit
& 0x60) >> 5;
9940 mc_filter
[regidx
] |= (1 << bit
);
9943 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9944 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9945 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9946 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9949 if (rx_mode
!= tp
->rx_mode
) {
9950 tp
->rx_mode
= rx_mode
;
9951 tw32_f(MAC_RX_MODE
, rx_mode
);
9956 static void tg3_set_rx_mode(struct net_device
*dev
)
9958 struct tg3
*tp
= netdev_priv(dev
);
9960 if (!netif_running(dev
))
9963 tg3_full_lock(tp
, 0);
9964 __tg3_set_rx_mode(dev
);
9965 tg3_full_unlock(tp
);
9968 static int tg3_get_regs_len(struct net_device
*dev
)
9970 return TG3_REG_BLK_SIZE
;
9973 static void tg3_get_regs(struct net_device
*dev
,
9974 struct ethtool_regs
*regs
, void *_p
)
9976 struct tg3
*tp
= netdev_priv(dev
);
9980 memset(_p
, 0, TG3_REG_BLK_SIZE
);
9982 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9985 tg3_full_lock(tp
, 0);
9987 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
9989 tg3_full_unlock(tp
);
9992 static int tg3_get_eeprom_len(struct net_device
*dev
)
9994 struct tg3
*tp
= netdev_priv(dev
);
9996 return tp
->nvram_size
;
9999 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10001 struct tg3
*tp
= netdev_priv(dev
);
10004 u32 i
, offset
, len
, b_offset
, b_count
;
10007 if (tg3_flag(tp
, NO_NVRAM
))
10010 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10013 offset
= eeprom
->offset
;
10017 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10020 /* adjustments to start on required 4 byte boundary */
10021 b_offset
= offset
& 3;
10022 b_count
= 4 - b_offset
;
10023 if (b_count
> len
) {
10024 /* i.e. offset=1 len=2 */
10027 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10030 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10033 eeprom
->len
+= b_count
;
10036 /* read bytes up to the last 4 byte boundary */
10037 pd
= &data
[eeprom
->len
];
10038 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10039 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10044 memcpy(pd
+ i
, &val
, 4);
10049 /* read last bytes not ending on 4 byte boundary */
10050 pd
= &data
[eeprom
->len
];
10052 b_offset
= offset
+ len
- b_count
;
10053 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10056 memcpy(pd
, &val
, b_count
);
10057 eeprom
->len
+= b_count
;
10062 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10064 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10066 struct tg3
*tp
= netdev_priv(dev
);
10068 u32 offset
, len
, b_offset
, odd_len
;
10072 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10075 if (tg3_flag(tp
, NO_NVRAM
) ||
10076 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10079 offset
= eeprom
->offset
;
10082 if ((b_offset
= (offset
& 3))) {
10083 /* adjustments to start on required 4 byte boundary */
10084 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10095 /* adjustments to end on required 4 byte boundary */
10097 len
= (len
+ 3) & ~3;
10098 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10104 if (b_offset
|| odd_len
) {
10105 buf
= kmalloc(len
, GFP_KERNEL
);
10109 memcpy(buf
, &start
, 4);
10111 memcpy(buf
+len
-4, &end
, 4);
10112 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10115 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10123 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10125 struct tg3
*tp
= netdev_priv(dev
);
10127 if (tg3_flag(tp
, USE_PHYLIB
)) {
10128 struct phy_device
*phydev
;
10129 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10131 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10132 return phy_ethtool_gset(phydev
, cmd
);
10135 cmd
->supported
= (SUPPORTED_Autoneg
);
10137 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10138 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10139 SUPPORTED_1000baseT_Full
);
10141 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10142 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10143 SUPPORTED_100baseT_Full
|
10144 SUPPORTED_10baseT_Half
|
10145 SUPPORTED_10baseT_Full
|
10147 cmd
->port
= PORT_TP
;
10149 cmd
->supported
|= SUPPORTED_FIBRE
;
10150 cmd
->port
= PORT_FIBRE
;
10153 cmd
->advertising
= tp
->link_config
.advertising
;
10154 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10155 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10156 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10157 cmd
->advertising
|= ADVERTISED_Pause
;
10159 cmd
->advertising
|= ADVERTISED_Pause
|
10160 ADVERTISED_Asym_Pause
;
10162 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10163 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10166 if (netif_running(dev
)) {
10167 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10168 cmd
->duplex
= tp
->link_config
.active_duplex
;
10170 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10171 cmd
->duplex
= DUPLEX_INVALID
;
10173 cmd
->phy_address
= tp
->phy_addr
;
10174 cmd
->transceiver
= XCVR_INTERNAL
;
10175 cmd
->autoneg
= tp
->link_config
.autoneg
;
10181 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10183 struct tg3
*tp
= netdev_priv(dev
);
10184 u32 speed
= ethtool_cmd_speed(cmd
);
10186 if (tg3_flag(tp
, USE_PHYLIB
)) {
10187 struct phy_device
*phydev
;
10188 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10190 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10191 return phy_ethtool_sset(phydev
, cmd
);
10194 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10195 cmd
->autoneg
!= AUTONEG_DISABLE
)
10198 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10199 cmd
->duplex
!= DUPLEX_FULL
&&
10200 cmd
->duplex
!= DUPLEX_HALF
)
10203 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10204 u32 mask
= ADVERTISED_Autoneg
|
10206 ADVERTISED_Asym_Pause
;
10208 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10209 mask
|= ADVERTISED_1000baseT_Half
|
10210 ADVERTISED_1000baseT_Full
;
10212 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10213 mask
|= ADVERTISED_100baseT_Half
|
10214 ADVERTISED_100baseT_Full
|
10215 ADVERTISED_10baseT_Half
|
10216 ADVERTISED_10baseT_Full
|
10219 mask
|= ADVERTISED_FIBRE
;
10221 if (cmd
->advertising
& ~mask
)
10224 mask
&= (ADVERTISED_1000baseT_Half
|
10225 ADVERTISED_1000baseT_Full
|
10226 ADVERTISED_100baseT_Half
|
10227 ADVERTISED_100baseT_Full
|
10228 ADVERTISED_10baseT_Half
|
10229 ADVERTISED_10baseT_Full
);
10231 cmd
->advertising
&= mask
;
10233 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10234 if (speed
!= SPEED_1000
)
10237 if (cmd
->duplex
!= DUPLEX_FULL
)
10240 if (speed
!= SPEED_100
&&
10246 tg3_full_lock(tp
, 0);
10248 tp
->link_config
.autoneg
= cmd
->autoneg
;
10249 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10250 tp
->link_config
.advertising
= (cmd
->advertising
|
10251 ADVERTISED_Autoneg
);
10252 tp
->link_config
.speed
= SPEED_INVALID
;
10253 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10255 tp
->link_config
.advertising
= 0;
10256 tp
->link_config
.speed
= speed
;
10257 tp
->link_config
.duplex
= cmd
->duplex
;
10260 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10261 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10262 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10264 if (netif_running(dev
))
10265 tg3_setup_phy(tp
, 1);
10267 tg3_full_unlock(tp
);
10272 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10274 struct tg3
*tp
= netdev_priv(dev
);
10276 strcpy(info
->driver
, DRV_MODULE_NAME
);
10277 strcpy(info
->version
, DRV_MODULE_VERSION
);
10278 strcpy(info
->fw_version
, tp
->fw_ver
);
10279 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10282 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10284 struct tg3
*tp
= netdev_priv(dev
);
10286 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10287 wol
->supported
= WAKE_MAGIC
;
10289 wol
->supported
= 0;
10291 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10292 wol
->wolopts
= WAKE_MAGIC
;
10293 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10296 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10298 struct tg3
*tp
= netdev_priv(dev
);
10299 struct device
*dp
= &tp
->pdev
->dev
;
10301 if (wol
->wolopts
& ~WAKE_MAGIC
)
10303 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10304 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10307 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10309 spin_lock_bh(&tp
->lock
);
10310 if (device_may_wakeup(dp
))
10311 tg3_flag_set(tp
, WOL_ENABLE
);
10313 tg3_flag_clear(tp
, WOL_ENABLE
);
10314 spin_unlock_bh(&tp
->lock
);
10319 static u32
tg3_get_msglevel(struct net_device
*dev
)
10321 struct tg3
*tp
= netdev_priv(dev
);
10322 return tp
->msg_enable
;
10325 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10327 struct tg3
*tp
= netdev_priv(dev
);
10328 tp
->msg_enable
= value
;
10331 static int tg3_nway_reset(struct net_device
*dev
)
10333 struct tg3
*tp
= netdev_priv(dev
);
10336 if (!netif_running(dev
))
10339 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10342 if (tg3_flag(tp
, USE_PHYLIB
)) {
10343 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10345 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10349 spin_lock_bh(&tp
->lock
);
10351 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10352 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10353 ((bmcr
& BMCR_ANENABLE
) ||
10354 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10355 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10359 spin_unlock_bh(&tp
->lock
);
10365 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10367 struct tg3
*tp
= netdev_priv(dev
);
10369 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10370 ering
->rx_mini_max_pending
= 0;
10371 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10372 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10374 ering
->rx_jumbo_max_pending
= 0;
10376 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10378 ering
->rx_pending
= tp
->rx_pending
;
10379 ering
->rx_mini_pending
= 0;
10380 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10381 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10383 ering
->rx_jumbo_pending
= 0;
10385 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10388 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10390 struct tg3
*tp
= netdev_priv(dev
);
10391 int i
, irq_sync
= 0, err
= 0;
10393 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10394 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10395 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10396 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10397 (tg3_flag(tp
, TSO_BUG
) &&
10398 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10401 if (netif_running(dev
)) {
10403 tg3_netif_stop(tp
);
10407 tg3_full_lock(tp
, irq_sync
);
10409 tp
->rx_pending
= ering
->rx_pending
;
10411 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10412 tp
->rx_pending
> 63)
10413 tp
->rx_pending
= 63;
10414 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10416 for (i
= 0; i
< tp
->irq_max
; i
++)
10417 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10419 if (netif_running(dev
)) {
10420 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10421 err
= tg3_restart_hw(tp
, 1);
10423 tg3_netif_start(tp
);
10426 tg3_full_unlock(tp
);
10428 if (irq_sync
&& !err
)
10434 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10436 struct tg3
*tp
= netdev_priv(dev
);
10438 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10440 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10441 epause
->rx_pause
= 1;
10443 epause
->rx_pause
= 0;
10445 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10446 epause
->tx_pause
= 1;
10448 epause
->tx_pause
= 0;
10451 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10453 struct tg3
*tp
= netdev_priv(dev
);
10456 if (tg3_flag(tp
, USE_PHYLIB
)) {
10458 struct phy_device
*phydev
;
10460 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10462 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10463 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10464 (epause
->rx_pause
!= epause
->tx_pause
)))
10467 tp
->link_config
.flowctrl
= 0;
10468 if (epause
->rx_pause
) {
10469 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10471 if (epause
->tx_pause
) {
10472 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10473 newadv
= ADVERTISED_Pause
;
10475 newadv
= ADVERTISED_Pause
|
10476 ADVERTISED_Asym_Pause
;
10477 } else if (epause
->tx_pause
) {
10478 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10479 newadv
= ADVERTISED_Asym_Pause
;
10483 if (epause
->autoneg
)
10484 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10486 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10488 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10489 u32 oldadv
= phydev
->advertising
&
10490 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10491 if (oldadv
!= newadv
) {
10492 phydev
->advertising
&=
10493 ~(ADVERTISED_Pause
|
10494 ADVERTISED_Asym_Pause
);
10495 phydev
->advertising
|= newadv
;
10496 if (phydev
->autoneg
) {
10498 * Always renegotiate the link to
10499 * inform our link partner of our
10500 * flow control settings, even if the
10501 * flow control is forced. Let
10502 * tg3_adjust_link() do the final
10503 * flow control setup.
10505 return phy_start_aneg(phydev
);
10509 if (!epause
->autoneg
)
10510 tg3_setup_flow_control(tp
, 0, 0);
10512 tp
->link_config
.orig_advertising
&=
10513 ~(ADVERTISED_Pause
|
10514 ADVERTISED_Asym_Pause
);
10515 tp
->link_config
.orig_advertising
|= newadv
;
10520 if (netif_running(dev
)) {
10521 tg3_netif_stop(tp
);
10525 tg3_full_lock(tp
, irq_sync
);
10527 if (epause
->autoneg
)
10528 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10530 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10531 if (epause
->rx_pause
)
10532 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10534 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10535 if (epause
->tx_pause
)
10536 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10538 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10540 if (netif_running(dev
)) {
10541 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10542 err
= tg3_restart_hw(tp
, 1);
10544 tg3_netif_start(tp
);
10547 tg3_full_unlock(tp
);
10553 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10557 return TG3_NUM_TEST
;
10559 return TG3_NUM_STATS
;
10561 return -EOPNOTSUPP
;
10565 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10567 switch (stringset
) {
10569 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10572 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10575 WARN_ON(1); /* we need a WARN() */
10580 static int tg3_set_phys_id(struct net_device
*dev
,
10581 enum ethtool_phys_id_state state
)
10583 struct tg3
*tp
= netdev_priv(dev
);
10585 if (!netif_running(tp
->dev
))
10589 case ETHTOOL_ID_ACTIVE
:
10590 return 1; /* cycle on/off once per second */
10592 case ETHTOOL_ID_ON
:
10593 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10594 LED_CTRL_1000MBPS_ON
|
10595 LED_CTRL_100MBPS_ON
|
10596 LED_CTRL_10MBPS_ON
|
10597 LED_CTRL_TRAFFIC_OVERRIDE
|
10598 LED_CTRL_TRAFFIC_BLINK
|
10599 LED_CTRL_TRAFFIC_LED
);
10602 case ETHTOOL_ID_OFF
:
10603 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10604 LED_CTRL_TRAFFIC_OVERRIDE
);
10607 case ETHTOOL_ID_INACTIVE
:
10608 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10615 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10616 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10618 struct tg3
*tp
= netdev_priv(dev
);
10619 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10622 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10626 u32 offset
= 0, len
= 0;
10629 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10632 if (magic
== TG3_EEPROM_MAGIC
) {
10633 for (offset
= TG3_NVM_DIR_START
;
10634 offset
< TG3_NVM_DIR_END
;
10635 offset
+= TG3_NVM_DIRENT_SIZE
) {
10636 if (tg3_nvram_read(tp
, offset
, &val
))
10639 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10640 TG3_NVM_DIRTYPE_EXTVPD
)
10644 if (offset
!= TG3_NVM_DIR_END
) {
10645 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10646 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10649 offset
= tg3_nvram_logical_addr(tp
, offset
);
10653 if (!offset
|| !len
) {
10654 offset
= TG3_NVM_VPD_OFF
;
10655 len
= TG3_NVM_VPD_LEN
;
10658 buf
= kmalloc(len
, GFP_KERNEL
);
10662 if (magic
== TG3_EEPROM_MAGIC
) {
10663 for (i
= 0; i
< len
; i
+= 4) {
10664 /* The data is in little-endian format in NVRAM.
10665 * Use the big-endian read routines to preserve
10666 * the byte order as it exists in NVRAM.
10668 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10674 unsigned int pos
= 0;
10676 ptr
= (u8
*)&buf
[0];
10677 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10678 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10680 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10698 #define NVRAM_TEST_SIZE 0x100
10699 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10700 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10701 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10702 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10703 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10704 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10705 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10706 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10708 static int tg3_test_nvram(struct tg3
*tp
)
10710 u32 csum
, magic
, len
;
10712 int i
, j
, k
, err
= 0, size
;
10714 if (tg3_flag(tp
, NO_NVRAM
))
10717 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10720 if (magic
== TG3_EEPROM_MAGIC
)
10721 size
= NVRAM_TEST_SIZE
;
10722 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10723 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10724 TG3_EEPROM_SB_FORMAT_1
) {
10725 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10726 case TG3_EEPROM_SB_REVISION_0
:
10727 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10729 case TG3_EEPROM_SB_REVISION_2
:
10730 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10732 case TG3_EEPROM_SB_REVISION_3
:
10733 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10735 case TG3_EEPROM_SB_REVISION_4
:
10736 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10738 case TG3_EEPROM_SB_REVISION_5
:
10739 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10741 case TG3_EEPROM_SB_REVISION_6
:
10742 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10749 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10750 size
= NVRAM_SELFBOOT_HW_SIZE
;
10754 buf
= kmalloc(size
, GFP_KERNEL
);
10759 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10760 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10767 /* Selfboot format */
10768 magic
= be32_to_cpu(buf
[0]);
10769 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10770 TG3_EEPROM_MAGIC_FW
) {
10771 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10773 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10774 TG3_EEPROM_SB_REVISION_2
) {
10775 /* For rev 2, the csum doesn't include the MBA. */
10776 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10778 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10781 for (i
= 0; i
< size
; i
++)
10794 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10795 TG3_EEPROM_MAGIC_HW
) {
10796 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10797 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10798 u8
*buf8
= (u8
*) buf
;
10800 /* Separate the parity bits and the data bytes. */
10801 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10802 if ((i
== 0) || (i
== 8)) {
10806 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10807 parity
[k
++] = buf8
[i
] & msk
;
10809 } else if (i
== 16) {
10813 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10814 parity
[k
++] = buf8
[i
] & msk
;
10817 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10818 parity
[k
++] = buf8
[i
] & msk
;
10821 data
[j
++] = buf8
[i
];
10825 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10826 u8 hw8
= hweight8(data
[i
]);
10828 if ((hw8
& 0x1) && parity
[i
])
10830 else if (!(hw8
& 0x1) && !parity
[i
])
10839 /* Bootstrap checksum at offset 0x10 */
10840 csum
= calc_crc((unsigned char *) buf
, 0x10);
10841 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10844 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10845 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10846 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10851 buf
= tg3_vpd_readblock(tp
, &len
);
10855 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
10857 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10861 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
10864 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10865 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10866 PCI_VPD_RO_KEYWORD_CHKSUM
);
10870 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10872 for (i
= 0; i
<= j
; i
++)
10873 csum8
+= ((u8
*)buf
)[i
];
10887 #define TG3_SERDES_TIMEOUT_SEC 2
10888 #define TG3_COPPER_TIMEOUT_SEC 6
10890 static int tg3_test_link(struct tg3
*tp
)
10894 if (!netif_running(tp
->dev
))
10897 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10898 max
= TG3_SERDES_TIMEOUT_SEC
;
10900 max
= TG3_COPPER_TIMEOUT_SEC
;
10902 for (i
= 0; i
< max
; i
++) {
10903 if (netif_carrier_ok(tp
->dev
))
10906 if (msleep_interruptible(1000))
10913 /* Only test the commonly used registers */
10914 static int tg3_test_registers(struct tg3
*tp
)
10916 int i
, is_5705
, is_5750
;
10917 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10921 #define TG3_FL_5705 0x1
10922 #define TG3_FL_NOT_5705 0x2
10923 #define TG3_FL_NOT_5788 0x4
10924 #define TG3_FL_NOT_5750 0x8
10928 /* MAC Control Registers */
10929 { MAC_MODE
, TG3_FL_NOT_5705
,
10930 0x00000000, 0x00ef6f8c },
10931 { MAC_MODE
, TG3_FL_5705
,
10932 0x00000000, 0x01ef6b8c },
10933 { MAC_STATUS
, TG3_FL_NOT_5705
,
10934 0x03800107, 0x00000000 },
10935 { MAC_STATUS
, TG3_FL_5705
,
10936 0x03800100, 0x00000000 },
10937 { MAC_ADDR_0_HIGH
, 0x0000,
10938 0x00000000, 0x0000ffff },
10939 { MAC_ADDR_0_LOW
, 0x0000,
10940 0x00000000, 0xffffffff },
10941 { MAC_RX_MTU_SIZE
, 0x0000,
10942 0x00000000, 0x0000ffff },
10943 { MAC_TX_MODE
, 0x0000,
10944 0x00000000, 0x00000070 },
10945 { MAC_TX_LENGTHS
, 0x0000,
10946 0x00000000, 0x00003fff },
10947 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10948 0x00000000, 0x000007fc },
10949 { MAC_RX_MODE
, TG3_FL_5705
,
10950 0x00000000, 0x000007dc },
10951 { MAC_HASH_REG_0
, 0x0000,
10952 0x00000000, 0xffffffff },
10953 { MAC_HASH_REG_1
, 0x0000,
10954 0x00000000, 0xffffffff },
10955 { MAC_HASH_REG_2
, 0x0000,
10956 0x00000000, 0xffffffff },
10957 { MAC_HASH_REG_3
, 0x0000,
10958 0x00000000, 0xffffffff },
10960 /* Receive Data and Receive BD Initiator Control Registers. */
10961 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10962 0x00000000, 0xffffffff },
10963 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10964 0x00000000, 0xffffffff },
10965 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10966 0x00000000, 0x00000003 },
10967 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10968 0x00000000, 0xffffffff },
10969 { RCVDBDI_STD_BD
+0, 0x0000,
10970 0x00000000, 0xffffffff },
10971 { RCVDBDI_STD_BD
+4, 0x0000,
10972 0x00000000, 0xffffffff },
10973 { RCVDBDI_STD_BD
+8, 0x0000,
10974 0x00000000, 0xffff0002 },
10975 { RCVDBDI_STD_BD
+0xc, 0x0000,
10976 0x00000000, 0xffffffff },
10978 /* Receive BD Initiator Control Registers. */
10979 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10980 0x00000000, 0xffffffff },
10981 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10982 0x00000000, 0x000003ff },
10983 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10984 0x00000000, 0xffffffff },
10986 /* Host Coalescing Control Registers. */
10987 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10988 0x00000000, 0x00000004 },
10989 { HOSTCC_MODE
, TG3_FL_5705
,
10990 0x00000000, 0x000000f6 },
10991 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10992 0x00000000, 0xffffffff },
10993 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10994 0x00000000, 0x000003ff },
10995 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10996 0x00000000, 0xffffffff },
10997 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10998 0x00000000, 0x000003ff },
10999 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11000 0x00000000, 0xffffffff },
11001 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11002 0x00000000, 0x000000ff },
11003 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11004 0x00000000, 0xffffffff },
11005 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11006 0x00000000, 0x000000ff },
11007 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11008 0x00000000, 0xffffffff },
11009 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11010 0x00000000, 0xffffffff },
11011 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11012 0x00000000, 0xffffffff },
11013 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11014 0x00000000, 0x000000ff },
11015 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11016 0x00000000, 0xffffffff },
11017 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11018 0x00000000, 0x000000ff },
11019 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11020 0x00000000, 0xffffffff },
11021 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11022 0x00000000, 0xffffffff },
11023 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11024 0x00000000, 0xffffffff },
11025 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11026 0x00000000, 0xffffffff },
11027 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11028 0x00000000, 0xffffffff },
11029 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11030 0xffffffff, 0x00000000 },
11031 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11032 0xffffffff, 0x00000000 },
11034 /* Buffer Manager Control Registers. */
11035 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11036 0x00000000, 0x007fff80 },
11037 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11038 0x00000000, 0x007fffff },
11039 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11040 0x00000000, 0x0000003f },
11041 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11042 0x00000000, 0x000001ff },
11043 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11044 0x00000000, 0x000001ff },
11045 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11046 0xffffffff, 0x00000000 },
11047 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11048 0xffffffff, 0x00000000 },
11050 /* Mailbox Registers */
11051 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11052 0x00000000, 0x000001ff },
11053 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11054 0x00000000, 0x000001ff },
11055 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11056 0x00000000, 0x000007ff },
11057 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11058 0x00000000, 0x000001ff },
11060 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11063 is_5705
= is_5750
= 0;
11064 if (tg3_flag(tp
, 5705_PLUS
)) {
11066 if (tg3_flag(tp
, 5750_PLUS
))
11070 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11071 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11074 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11077 if (tg3_flag(tp
, IS_5788
) &&
11078 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11081 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11084 offset
= (u32
) reg_tbl
[i
].offset
;
11085 read_mask
= reg_tbl
[i
].read_mask
;
11086 write_mask
= reg_tbl
[i
].write_mask
;
11088 /* Save the original register content */
11089 save_val
= tr32(offset
);
11091 /* Determine the read-only value. */
11092 read_val
= save_val
& read_mask
;
11094 /* Write zero to the register, then make sure the read-only bits
11095 * are not changed and the read/write bits are all zeros.
11099 val
= tr32(offset
);
11101 /* Test the read-only and read/write bits. */
11102 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11105 /* Write ones to all the bits defined by RdMask and WrMask, then
11106 * make sure the read-only bits are not changed and the
11107 * read/write bits are all ones.
11109 tw32(offset
, read_mask
| write_mask
);
11111 val
= tr32(offset
);
11113 /* Test the read-only bits. */
11114 if ((val
& read_mask
) != read_val
)
11117 /* Test the read/write bits. */
11118 if ((val
& write_mask
) != write_mask
)
11121 tw32(offset
, save_val
);
11127 if (netif_msg_hw(tp
))
11128 netdev_err(tp
->dev
,
11129 "Register test failed at offset %x\n", offset
);
11130 tw32(offset
, save_val
);
11134 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11136 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11140 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11141 for (j
= 0; j
< len
; j
+= 4) {
11144 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11145 tg3_read_mem(tp
, offset
+ j
, &val
);
11146 if (val
!= test_pattern
[i
])
11153 static int tg3_test_memory(struct tg3
*tp
)
11155 static struct mem_entry
{
11158 } mem_tbl_570x
[] = {
11159 { 0x00000000, 0x00b50},
11160 { 0x00002000, 0x1c000},
11161 { 0xffffffff, 0x00000}
11162 }, mem_tbl_5705
[] = {
11163 { 0x00000100, 0x0000c},
11164 { 0x00000200, 0x00008},
11165 { 0x00004000, 0x00800},
11166 { 0x00006000, 0x01000},
11167 { 0x00008000, 0x02000},
11168 { 0x00010000, 0x0e000},
11169 { 0xffffffff, 0x00000}
11170 }, mem_tbl_5755
[] = {
11171 { 0x00000200, 0x00008},
11172 { 0x00004000, 0x00800},
11173 { 0x00006000, 0x00800},
11174 { 0x00008000, 0x02000},
11175 { 0x00010000, 0x0c000},
11176 { 0xffffffff, 0x00000}
11177 }, mem_tbl_5906
[] = {
11178 { 0x00000200, 0x00008},
11179 { 0x00004000, 0x00400},
11180 { 0x00006000, 0x00400},
11181 { 0x00008000, 0x01000},
11182 { 0x00010000, 0x01000},
11183 { 0xffffffff, 0x00000}
11184 }, mem_tbl_5717
[] = {
11185 { 0x00000200, 0x00008},
11186 { 0x00010000, 0x0a000},
11187 { 0x00020000, 0x13c00},
11188 { 0xffffffff, 0x00000}
11189 }, mem_tbl_57765
[] = {
11190 { 0x00000200, 0x00008},
11191 { 0x00004000, 0x00800},
11192 { 0x00006000, 0x09800},
11193 { 0x00010000, 0x0a000},
11194 { 0xffffffff, 0x00000}
11196 struct mem_entry
*mem_tbl
;
11200 if (tg3_flag(tp
, 5717_PLUS
))
11201 mem_tbl
= mem_tbl_5717
;
11202 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11203 mem_tbl
= mem_tbl_57765
;
11204 else if (tg3_flag(tp
, 5755_PLUS
))
11205 mem_tbl
= mem_tbl_5755
;
11206 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11207 mem_tbl
= mem_tbl_5906
;
11208 else if (tg3_flag(tp
, 5705_PLUS
))
11209 mem_tbl
= mem_tbl_5705
;
11211 mem_tbl
= mem_tbl_570x
;
11213 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11214 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11222 #define TG3_MAC_LOOPBACK 0
11223 #define TG3_PHY_LOOPBACK 1
11224 #define TG3_TSO_LOOPBACK 2
11226 #define TG3_TSO_MSS 500
11228 #define TG3_TSO_IP_HDR_LEN 20
11229 #define TG3_TSO_TCP_HDR_LEN 20
11230 #define TG3_TSO_TCP_OPT_LEN 12
11232 static const u8 tg3_tso_header
[] = {
11234 0x45, 0x00, 0x00, 0x00,
11235 0x00, 0x00, 0x40, 0x00,
11236 0x40, 0x06, 0x00, 0x00,
11237 0x0a, 0x00, 0x00, 0x01,
11238 0x0a, 0x00, 0x00, 0x02,
11239 0x0d, 0x00, 0xe0, 0x00,
11240 0x00, 0x00, 0x01, 0x00,
11241 0x00, 0x00, 0x02, 0x00,
11242 0x80, 0x10, 0x10, 0x00,
11243 0x14, 0x09, 0x00, 0x00,
11244 0x01, 0x01, 0x08, 0x0a,
11245 0x11, 0x11, 0x11, 0x11,
11246 0x11, 0x11, 0x11, 0x11,
11249 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, int loopback_mode
)
11251 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11252 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11254 struct sk_buff
*skb
, *rx_skb
;
11257 int num_pkts
, tx_len
, rx_len
, i
, err
;
11258 struct tg3_rx_buffer_desc
*desc
;
11259 struct tg3_napi
*tnapi
, *rnapi
;
11260 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11262 tnapi
= &tp
->napi
[0];
11263 rnapi
= &tp
->napi
[0];
11264 if (tp
->irq_cnt
> 1) {
11265 if (tg3_flag(tp
, ENABLE_RSS
))
11266 rnapi
= &tp
->napi
[1];
11267 if (tg3_flag(tp
, ENABLE_TSS
))
11268 tnapi
= &tp
->napi
[1];
11270 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11272 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
11273 /* HW errata - mac loopback fails in some cases on 5780.
11274 * Normal traffic and PHY loopback are not affected by
11275 * errata. Also, the MAC loopback test is deprecated for
11276 * all newer ASIC revisions.
11278 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11279 tg3_flag(tp
, CPMU_PRESENT
))
11282 mac_mode
= tp
->mac_mode
&
11283 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11284 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
11285 if (!tg3_flag(tp
, 5705_PLUS
))
11286 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11287 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
11288 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11290 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11291 tw32(MAC_MODE
, mac_mode
);
11293 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11294 tg3_phy_fet_toggle_apd(tp
, false);
11295 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
11297 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
11299 tg3_phy_toggle_automdix(tp
, 0);
11301 tg3_writephy(tp
, MII_BMCR
, val
);
11304 mac_mode
= tp
->mac_mode
&
11305 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11306 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11307 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
11308 MII_TG3_FET_PTEST_FRC_TX_LINK
|
11309 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
11310 /* The write needs to be flushed for the AC131 */
11311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11312 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
11313 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11315 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11317 /* reset to prevent losing 1st rx packet intermittently */
11318 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
11319 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
11321 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
11323 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
11324 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
11325 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
11326 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
11327 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
11328 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11329 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
11330 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
11332 tw32(MAC_MODE
, mac_mode
);
11334 /* Wait for link */
11335 for (i
= 0; i
< 100; i
++) {
11336 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11345 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11349 tx_data
= skb_put(skb
, tx_len
);
11350 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11351 memset(tx_data
+ 6, 0x0, 8);
11353 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11355 if (loopback_mode
== TG3_TSO_LOOPBACK
) {
11356 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11358 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11359 TG3_TSO_TCP_OPT_LEN
;
11361 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11362 sizeof(tg3_tso_header
));
11365 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11366 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11368 /* Set the total length field in the IP header */
11369 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11371 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11372 TXD_FLAG_CPU_POST_DMA
);
11374 if (tg3_flag(tp
, HW_TSO_1
) ||
11375 tg3_flag(tp
, HW_TSO_2
) ||
11376 tg3_flag(tp
, HW_TSO_3
)) {
11378 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11379 th
= (struct tcphdr
*)&tx_data
[val
];
11382 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11384 if (tg3_flag(tp
, HW_TSO_3
)) {
11385 mss
|= (hdr_len
& 0xc) << 12;
11386 if (hdr_len
& 0x10)
11387 base_flags
|= 0x00000010;
11388 base_flags
|= (hdr_len
& 0x3e0) << 5;
11389 } else if (tg3_flag(tp
, HW_TSO_2
))
11390 mss
|= hdr_len
<< 9;
11391 else if (tg3_flag(tp
, HW_TSO_1
) ||
11392 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11393 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11395 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11398 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11401 data_off
= ETH_HLEN
;
11404 for (i
= data_off
; i
< tx_len
; i
++)
11405 tx_data
[i
] = (u8
) (i
& 0xff);
11407 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11408 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11409 dev_kfree_skb(skb
);
11413 val
= tnapi
->tx_prod
;
11414 tnapi
->tx_buffers
[val
].skb
= skb
;
11415 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
11417 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11422 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11424 budget
= tg3_tx_avail(tnapi
);
11425 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
11426 base_flags
| TXD_FLAG_END
, mss
, 0)) {
11427 tnapi
->tx_buffers
[val
].skb
= NULL
;
11428 dev_kfree_skb(skb
);
11434 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11435 tr32_mailbox(tnapi
->prodmbox
);
11439 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11440 for (i
= 0; i
< 35; i
++) {
11441 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11446 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11447 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11448 if ((tx_idx
== tnapi
->tx_prod
) &&
11449 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11453 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, 0);
11454 dev_kfree_skb(skb
);
11456 if (tx_idx
!= tnapi
->tx_prod
)
11459 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11463 while (rx_idx
!= rx_start_idx
) {
11464 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11465 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11466 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11468 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11469 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11472 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11475 if (loopback_mode
!= TG3_TSO_LOOPBACK
) {
11476 if (rx_len
!= tx_len
)
11479 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11480 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11483 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11486 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11487 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11488 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11492 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11493 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11494 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11496 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11497 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11498 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11503 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11504 PCI_DMA_FROMDEVICE
);
11506 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11507 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11514 /* tg3_free_rings will unmap and free the rx_skb */
11519 #define TG3_STD_LOOPBACK_FAILED 1
11520 #define TG3_JMB_LOOPBACK_FAILED 2
11521 #define TG3_TSO_LOOPBACK_FAILED 4
11523 #define TG3_MAC_LOOPBACK_SHIFT 0
11524 #define TG3_PHY_LOOPBACK_SHIFT 4
11525 #define TG3_LOOPBACK_FAILED 0x00000077
11527 static int tg3_test_loopback(struct tg3
*tp
)
11530 u32 eee_cap
, cpmuctrl
= 0;
11532 if (!netif_running(tp
->dev
))
11533 return TG3_LOOPBACK_FAILED
;
11535 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11536 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11538 err
= tg3_reset_hw(tp
, 1);
11540 err
= TG3_LOOPBACK_FAILED
;
11544 if (tg3_flag(tp
, ENABLE_RSS
)) {
11547 /* Reroute all rx packets to the 1st queue */
11548 for (i
= MAC_RSS_INDIR_TBL_0
;
11549 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11553 /* Turn off gphy autopowerdown. */
11554 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11555 tg3_phy_toggle_apd(tp
, false);
11557 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11561 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11563 /* Wait for up to 40 microseconds to acquire lock. */
11564 for (i
= 0; i
< 4; i
++) {
11565 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11566 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11571 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11572 err
= TG3_LOOPBACK_FAILED
;
11576 /* Turn off link-based power management. */
11577 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11578 tw32(TG3_CPMU_CTRL
,
11579 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11580 CPMU_CTRL_LINK_AWARE_MODE
));
11583 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_MAC_LOOPBACK
))
11584 err
|= TG3_STD_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11586 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11587 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_MAC_LOOPBACK
))
11588 err
|= TG3_JMB_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11590 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11591 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11593 /* Release the mutex */
11594 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11597 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11598 !tg3_flag(tp
, USE_PHYLIB
)) {
11599 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_PHY_LOOPBACK
))
11600 err
|= TG3_STD_LOOPBACK_FAILED
<<
11601 TG3_PHY_LOOPBACK_SHIFT
;
11602 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11603 tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_TSO_LOOPBACK
))
11604 err
|= TG3_TSO_LOOPBACK_FAILED
<<
11605 TG3_PHY_LOOPBACK_SHIFT
;
11606 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11607 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_PHY_LOOPBACK
))
11608 err
|= TG3_JMB_LOOPBACK_FAILED
<<
11609 TG3_PHY_LOOPBACK_SHIFT
;
11612 /* Re-enable gphy autopowerdown. */
11613 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11614 tg3_phy_toggle_apd(tp
, true);
11617 tp
->phy_flags
|= eee_cap
;
11622 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11625 struct tg3
*tp
= netdev_priv(dev
);
11627 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11628 tg3_power_up(tp
)) {
11629 etest
->flags
|= ETH_TEST_FL_FAILED
;
11630 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11634 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11636 if (tg3_test_nvram(tp
) != 0) {
11637 etest
->flags
|= ETH_TEST_FL_FAILED
;
11640 if (tg3_test_link(tp
) != 0) {
11641 etest
->flags
|= ETH_TEST_FL_FAILED
;
11644 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11645 int err
, err2
= 0, irq_sync
= 0;
11647 if (netif_running(dev
)) {
11649 tg3_netif_stop(tp
);
11653 tg3_full_lock(tp
, irq_sync
);
11655 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11656 err
= tg3_nvram_lock(tp
);
11657 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11658 if (!tg3_flag(tp
, 5705_PLUS
))
11659 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11661 tg3_nvram_unlock(tp
);
11663 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11666 if (tg3_test_registers(tp
) != 0) {
11667 etest
->flags
|= ETH_TEST_FL_FAILED
;
11670 if (tg3_test_memory(tp
) != 0) {
11671 etest
->flags
|= ETH_TEST_FL_FAILED
;
11674 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11675 etest
->flags
|= ETH_TEST_FL_FAILED
;
11677 tg3_full_unlock(tp
);
11679 if (tg3_test_interrupt(tp
) != 0) {
11680 etest
->flags
|= ETH_TEST_FL_FAILED
;
11684 tg3_full_lock(tp
, 0);
11686 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11687 if (netif_running(dev
)) {
11688 tg3_flag_set(tp
, INIT_COMPLETE
);
11689 err2
= tg3_restart_hw(tp
, 1);
11691 tg3_netif_start(tp
);
11694 tg3_full_unlock(tp
);
11696 if (irq_sync
&& !err2
)
11699 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11700 tg3_power_down(tp
);
11704 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11706 struct mii_ioctl_data
*data
= if_mii(ifr
);
11707 struct tg3
*tp
= netdev_priv(dev
);
11710 if (tg3_flag(tp
, USE_PHYLIB
)) {
11711 struct phy_device
*phydev
;
11712 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11714 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11715 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11720 data
->phy_id
= tp
->phy_addr
;
11723 case SIOCGMIIREG
: {
11726 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11727 break; /* We have no PHY */
11729 if (!netif_running(dev
))
11732 spin_lock_bh(&tp
->lock
);
11733 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11734 spin_unlock_bh(&tp
->lock
);
11736 data
->val_out
= mii_regval
;
11742 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11743 break; /* We have no PHY */
11745 if (!netif_running(dev
))
11748 spin_lock_bh(&tp
->lock
);
11749 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11750 spin_unlock_bh(&tp
->lock
);
11758 return -EOPNOTSUPP
;
11761 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11763 struct tg3
*tp
= netdev_priv(dev
);
11765 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11769 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11771 struct tg3
*tp
= netdev_priv(dev
);
11772 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11773 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11775 if (!tg3_flag(tp
, 5705_PLUS
)) {
11776 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11777 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11778 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11779 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11782 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11783 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11784 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11785 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11786 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11787 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11788 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11789 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11790 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11791 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11794 /* No rx interrupts will be generated if both are zero */
11795 if ((ec
->rx_coalesce_usecs
== 0) &&
11796 (ec
->rx_max_coalesced_frames
== 0))
11799 /* No tx interrupts will be generated if both are zero */
11800 if ((ec
->tx_coalesce_usecs
== 0) &&
11801 (ec
->tx_max_coalesced_frames
== 0))
11804 /* Only copy relevant parameters, ignore all others. */
11805 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11806 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11807 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11808 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11809 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11810 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11811 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11812 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11813 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11815 if (netif_running(dev
)) {
11816 tg3_full_lock(tp
, 0);
11817 __tg3_set_coalesce(tp
, &tp
->coal
);
11818 tg3_full_unlock(tp
);
11823 static const struct ethtool_ops tg3_ethtool_ops
= {
11824 .get_settings
= tg3_get_settings
,
11825 .set_settings
= tg3_set_settings
,
11826 .get_drvinfo
= tg3_get_drvinfo
,
11827 .get_regs_len
= tg3_get_regs_len
,
11828 .get_regs
= tg3_get_regs
,
11829 .get_wol
= tg3_get_wol
,
11830 .set_wol
= tg3_set_wol
,
11831 .get_msglevel
= tg3_get_msglevel
,
11832 .set_msglevel
= tg3_set_msglevel
,
11833 .nway_reset
= tg3_nway_reset
,
11834 .get_link
= ethtool_op_get_link
,
11835 .get_eeprom_len
= tg3_get_eeprom_len
,
11836 .get_eeprom
= tg3_get_eeprom
,
11837 .set_eeprom
= tg3_set_eeprom
,
11838 .get_ringparam
= tg3_get_ringparam
,
11839 .set_ringparam
= tg3_set_ringparam
,
11840 .get_pauseparam
= tg3_get_pauseparam
,
11841 .set_pauseparam
= tg3_set_pauseparam
,
11842 .self_test
= tg3_self_test
,
11843 .get_strings
= tg3_get_strings
,
11844 .set_phys_id
= tg3_set_phys_id
,
11845 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11846 .get_coalesce
= tg3_get_coalesce
,
11847 .set_coalesce
= tg3_set_coalesce
,
11848 .get_sset_count
= tg3_get_sset_count
,
11851 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11853 u32 cursize
, val
, magic
;
11855 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11857 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11860 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11861 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11862 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11866 * Size the chip by reading offsets at increasing powers of two.
11867 * When we encounter our validation signature, we know the addressing
11868 * has wrapped around, and thus have our chip size.
11872 while (cursize
< tp
->nvram_size
) {
11873 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11882 tp
->nvram_size
= cursize
;
11885 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11889 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11892 /* Selfboot format */
11893 if (val
!= TG3_EEPROM_MAGIC
) {
11894 tg3_get_eeprom_size(tp
);
11898 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11900 /* This is confusing. We want to operate on the
11901 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11902 * call will read from NVRAM and byteswap the data
11903 * according to the byteswapping settings for all
11904 * other register accesses. This ensures the data we
11905 * want will always reside in the lower 16-bits.
11906 * However, the data in NVRAM is in LE format, which
11907 * means the data from the NVRAM read will always be
11908 * opposite the endianness of the CPU. The 16-bit
11909 * byteswap then brings the data to CPU endianness.
11911 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11915 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11918 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11922 nvcfg1
= tr32(NVRAM_CFG1
);
11923 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11924 tg3_flag_set(tp
, FLASH
);
11926 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11927 tw32(NVRAM_CFG1
, nvcfg1
);
11930 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11931 tg3_flag(tp
, 5780_CLASS
)) {
11932 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11933 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11934 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11935 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11936 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11938 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11939 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11940 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11942 case FLASH_VENDOR_ATMEL_EEPROM
:
11943 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11944 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11945 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11947 case FLASH_VENDOR_ST
:
11948 tp
->nvram_jedecnum
= JEDEC_ST
;
11949 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11950 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11952 case FLASH_VENDOR_SAIFUN
:
11953 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11954 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11956 case FLASH_VENDOR_SST_SMALL
:
11957 case FLASH_VENDOR_SST_LARGE
:
11958 tp
->nvram_jedecnum
= JEDEC_SST
;
11959 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11963 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11964 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11965 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11969 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11971 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11972 case FLASH_5752PAGE_SIZE_256
:
11973 tp
->nvram_pagesize
= 256;
11975 case FLASH_5752PAGE_SIZE_512
:
11976 tp
->nvram_pagesize
= 512;
11978 case FLASH_5752PAGE_SIZE_1K
:
11979 tp
->nvram_pagesize
= 1024;
11981 case FLASH_5752PAGE_SIZE_2K
:
11982 tp
->nvram_pagesize
= 2048;
11984 case FLASH_5752PAGE_SIZE_4K
:
11985 tp
->nvram_pagesize
= 4096;
11987 case FLASH_5752PAGE_SIZE_264
:
11988 tp
->nvram_pagesize
= 264;
11990 case FLASH_5752PAGE_SIZE_528
:
11991 tp
->nvram_pagesize
= 528;
11996 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12000 nvcfg1
= tr32(NVRAM_CFG1
);
12002 /* NVRAM protection for TPM */
12003 if (nvcfg1
& (1 << 27))
12004 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12006 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12007 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12008 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12009 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12010 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12012 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12013 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12014 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12015 tg3_flag_set(tp
, FLASH
);
12017 case FLASH_5752VENDOR_ST_M45PE10
:
12018 case FLASH_5752VENDOR_ST_M45PE20
:
12019 case FLASH_5752VENDOR_ST_M45PE40
:
12020 tp
->nvram_jedecnum
= JEDEC_ST
;
12021 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12022 tg3_flag_set(tp
, FLASH
);
12026 if (tg3_flag(tp
, FLASH
)) {
12027 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12029 /* For eeprom, set pagesize to maximum eeprom size */
12030 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12032 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12033 tw32(NVRAM_CFG1
, nvcfg1
);
12037 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12039 u32 nvcfg1
, protect
= 0;
12041 nvcfg1
= tr32(NVRAM_CFG1
);
12043 /* NVRAM protection for TPM */
12044 if (nvcfg1
& (1 << 27)) {
12045 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12049 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12051 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12052 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12053 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12054 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12055 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12056 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12057 tg3_flag_set(tp
, FLASH
);
12058 tp
->nvram_pagesize
= 264;
12059 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12060 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12061 tp
->nvram_size
= (protect
? 0x3e200 :
12062 TG3_NVRAM_SIZE_512KB
);
12063 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12064 tp
->nvram_size
= (protect
? 0x1f200 :
12065 TG3_NVRAM_SIZE_256KB
);
12067 tp
->nvram_size
= (protect
? 0x1f200 :
12068 TG3_NVRAM_SIZE_128KB
);
12070 case FLASH_5752VENDOR_ST_M45PE10
:
12071 case FLASH_5752VENDOR_ST_M45PE20
:
12072 case FLASH_5752VENDOR_ST_M45PE40
:
12073 tp
->nvram_jedecnum
= JEDEC_ST
;
12074 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12075 tg3_flag_set(tp
, FLASH
);
12076 tp
->nvram_pagesize
= 256;
12077 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12078 tp
->nvram_size
= (protect
?
12079 TG3_NVRAM_SIZE_64KB
:
12080 TG3_NVRAM_SIZE_128KB
);
12081 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12082 tp
->nvram_size
= (protect
?
12083 TG3_NVRAM_SIZE_64KB
:
12084 TG3_NVRAM_SIZE_256KB
);
12086 tp
->nvram_size
= (protect
?
12087 TG3_NVRAM_SIZE_128KB
:
12088 TG3_NVRAM_SIZE_512KB
);
12093 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12097 nvcfg1
= tr32(NVRAM_CFG1
);
12099 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12100 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12101 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12102 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12103 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12104 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12105 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12106 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12108 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12109 tw32(NVRAM_CFG1
, nvcfg1
);
12111 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12112 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12113 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12114 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12115 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12116 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12117 tg3_flag_set(tp
, FLASH
);
12118 tp
->nvram_pagesize
= 264;
12120 case FLASH_5752VENDOR_ST_M45PE10
:
12121 case FLASH_5752VENDOR_ST_M45PE20
:
12122 case FLASH_5752VENDOR_ST_M45PE40
:
12123 tp
->nvram_jedecnum
= JEDEC_ST
;
12124 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12125 tg3_flag_set(tp
, FLASH
);
12126 tp
->nvram_pagesize
= 256;
12131 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12133 u32 nvcfg1
, protect
= 0;
12135 nvcfg1
= tr32(NVRAM_CFG1
);
12137 /* NVRAM protection for TPM */
12138 if (nvcfg1
& (1 << 27)) {
12139 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12143 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12145 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12146 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12147 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12148 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12149 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12150 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12151 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12152 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12153 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12154 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12155 tg3_flag_set(tp
, FLASH
);
12156 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12157 tp
->nvram_pagesize
= 256;
12159 case FLASH_5761VENDOR_ST_A_M45PE20
:
12160 case FLASH_5761VENDOR_ST_A_M45PE40
:
12161 case FLASH_5761VENDOR_ST_A_M45PE80
:
12162 case FLASH_5761VENDOR_ST_A_M45PE16
:
12163 case FLASH_5761VENDOR_ST_M_M45PE20
:
12164 case FLASH_5761VENDOR_ST_M_M45PE40
:
12165 case FLASH_5761VENDOR_ST_M_M45PE80
:
12166 case FLASH_5761VENDOR_ST_M_M45PE16
:
12167 tp
->nvram_jedecnum
= JEDEC_ST
;
12168 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12169 tg3_flag_set(tp
, FLASH
);
12170 tp
->nvram_pagesize
= 256;
12175 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12178 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12179 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12180 case FLASH_5761VENDOR_ST_A_M45PE16
:
12181 case FLASH_5761VENDOR_ST_M_M45PE16
:
12182 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12184 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12185 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12186 case FLASH_5761VENDOR_ST_A_M45PE80
:
12187 case FLASH_5761VENDOR_ST_M_M45PE80
:
12188 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12190 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12191 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12192 case FLASH_5761VENDOR_ST_A_M45PE40
:
12193 case FLASH_5761VENDOR_ST_M_M45PE40
:
12194 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12196 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12197 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12198 case FLASH_5761VENDOR_ST_A_M45PE20
:
12199 case FLASH_5761VENDOR_ST_M_M45PE20
:
12200 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12206 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12208 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12209 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12210 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12213 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12217 nvcfg1
= tr32(NVRAM_CFG1
);
12219 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12220 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12221 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12222 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12223 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12224 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12226 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12227 tw32(NVRAM_CFG1
, nvcfg1
);
12229 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12230 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12231 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12232 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12233 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12234 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12235 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12236 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12237 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12238 tg3_flag_set(tp
, FLASH
);
12240 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12241 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12242 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12243 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12244 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12246 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12247 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12248 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12250 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12251 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12252 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12256 case FLASH_5752VENDOR_ST_M45PE10
:
12257 case FLASH_5752VENDOR_ST_M45PE20
:
12258 case FLASH_5752VENDOR_ST_M45PE40
:
12259 tp
->nvram_jedecnum
= JEDEC_ST
;
12260 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12261 tg3_flag_set(tp
, FLASH
);
12263 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12264 case FLASH_5752VENDOR_ST_M45PE10
:
12265 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12267 case FLASH_5752VENDOR_ST_M45PE20
:
12268 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12270 case FLASH_5752VENDOR_ST_M45PE40
:
12271 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12276 tg3_flag_set(tp
, NO_NVRAM
);
12280 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12281 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12282 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12286 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12290 nvcfg1
= tr32(NVRAM_CFG1
);
12292 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12293 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12294 case FLASH_5717VENDOR_MICRO_EEPROM
:
12295 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12296 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12297 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12299 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12300 tw32(NVRAM_CFG1
, nvcfg1
);
12302 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12303 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12304 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12305 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12306 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12307 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12308 case FLASH_5717VENDOR_ATMEL_45USPT
:
12309 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12310 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12311 tg3_flag_set(tp
, FLASH
);
12313 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12314 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12315 /* Detect size with tg3_nvram_get_size() */
12317 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12318 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12319 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12322 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12326 case FLASH_5717VENDOR_ST_M_M25PE10
:
12327 case FLASH_5717VENDOR_ST_A_M25PE10
:
12328 case FLASH_5717VENDOR_ST_M_M45PE10
:
12329 case FLASH_5717VENDOR_ST_A_M45PE10
:
12330 case FLASH_5717VENDOR_ST_M_M25PE20
:
12331 case FLASH_5717VENDOR_ST_A_M25PE20
:
12332 case FLASH_5717VENDOR_ST_M_M45PE20
:
12333 case FLASH_5717VENDOR_ST_A_M45PE20
:
12334 case FLASH_5717VENDOR_ST_25USPT
:
12335 case FLASH_5717VENDOR_ST_45USPT
:
12336 tp
->nvram_jedecnum
= JEDEC_ST
;
12337 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12338 tg3_flag_set(tp
, FLASH
);
12340 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12341 case FLASH_5717VENDOR_ST_M_M25PE20
:
12342 case FLASH_5717VENDOR_ST_M_M45PE20
:
12343 /* Detect size with tg3_nvram_get_size() */
12345 case FLASH_5717VENDOR_ST_A_M25PE20
:
12346 case FLASH_5717VENDOR_ST_A_M45PE20
:
12347 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12350 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12355 tg3_flag_set(tp
, NO_NVRAM
);
12359 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12360 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12361 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12364 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12366 u32 nvcfg1
, nvmpinstrp
;
12368 nvcfg1
= tr32(NVRAM_CFG1
);
12369 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12371 switch (nvmpinstrp
) {
12372 case FLASH_5720_EEPROM_HD
:
12373 case FLASH_5720_EEPROM_LD
:
12374 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12375 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12377 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12378 tw32(NVRAM_CFG1
, nvcfg1
);
12379 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12380 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12382 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12384 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12385 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12386 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12387 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12388 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12389 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12390 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12391 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12392 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12393 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12394 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12395 case FLASH_5720VENDOR_ATMEL_45USPT
:
12396 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12397 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12398 tg3_flag_set(tp
, FLASH
);
12400 switch (nvmpinstrp
) {
12401 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12402 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12403 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12404 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12406 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12407 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12408 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12409 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12411 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12412 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12413 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12416 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12420 case FLASH_5720VENDOR_M_ST_M25PE10
:
12421 case FLASH_5720VENDOR_M_ST_M45PE10
:
12422 case FLASH_5720VENDOR_A_ST_M25PE10
:
12423 case FLASH_5720VENDOR_A_ST_M45PE10
:
12424 case FLASH_5720VENDOR_M_ST_M25PE20
:
12425 case FLASH_5720VENDOR_M_ST_M45PE20
:
12426 case FLASH_5720VENDOR_A_ST_M25PE20
:
12427 case FLASH_5720VENDOR_A_ST_M45PE20
:
12428 case FLASH_5720VENDOR_M_ST_M25PE40
:
12429 case FLASH_5720VENDOR_M_ST_M45PE40
:
12430 case FLASH_5720VENDOR_A_ST_M25PE40
:
12431 case FLASH_5720VENDOR_A_ST_M45PE40
:
12432 case FLASH_5720VENDOR_M_ST_M25PE80
:
12433 case FLASH_5720VENDOR_M_ST_M45PE80
:
12434 case FLASH_5720VENDOR_A_ST_M25PE80
:
12435 case FLASH_5720VENDOR_A_ST_M45PE80
:
12436 case FLASH_5720VENDOR_ST_25USPT
:
12437 case FLASH_5720VENDOR_ST_45USPT
:
12438 tp
->nvram_jedecnum
= JEDEC_ST
;
12439 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12440 tg3_flag_set(tp
, FLASH
);
12442 switch (nvmpinstrp
) {
12443 case FLASH_5720VENDOR_M_ST_M25PE20
:
12444 case FLASH_5720VENDOR_M_ST_M45PE20
:
12445 case FLASH_5720VENDOR_A_ST_M25PE20
:
12446 case FLASH_5720VENDOR_A_ST_M45PE20
:
12447 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12449 case FLASH_5720VENDOR_M_ST_M25PE40
:
12450 case FLASH_5720VENDOR_M_ST_M45PE40
:
12451 case FLASH_5720VENDOR_A_ST_M25PE40
:
12452 case FLASH_5720VENDOR_A_ST_M45PE40
:
12453 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12455 case FLASH_5720VENDOR_M_ST_M25PE80
:
12456 case FLASH_5720VENDOR_M_ST_M45PE80
:
12457 case FLASH_5720VENDOR_A_ST_M25PE80
:
12458 case FLASH_5720VENDOR_A_ST_M45PE80
:
12459 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12462 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12467 tg3_flag_set(tp
, NO_NVRAM
);
12471 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12472 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12473 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12476 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12477 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12479 tw32_f(GRC_EEPROM_ADDR
,
12480 (EEPROM_ADDR_FSM_RESET
|
12481 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12482 EEPROM_ADDR_CLKPERD_SHIFT
)));
12486 /* Enable seeprom accesses. */
12487 tw32_f(GRC_LOCAL_CTRL
,
12488 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12491 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12492 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12493 tg3_flag_set(tp
, NVRAM
);
12495 if (tg3_nvram_lock(tp
)) {
12496 netdev_warn(tp
->dev
,
12497 "Cannot get nvram lock, %s failed\n",
12501 tg3_enable_nvram_access(tp
);
12503 tp
->nvram_size
= 0;
12505 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12506 tg3_get_5752_nvram_info(tp
);
12507 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12508 tg3_get_5755_nvram_info(tp
);
12509 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12510 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12511 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12512 tg3_get_5787_nvram_info(tp
);
12513 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12514 tg3_get_5761_nvram_info(tp
);
12515 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12516 tg3_get_5906_nvram_info(tp
);
12517 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12519 tg3_get_57780_nvram_info(tp
);
12520 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12521 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12522 tg3_get_5717_nvram_info(tp
);
12523 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12524 tg3_get_5720_nvram_info(tp
);
12526 tg3_get_nvram_info(tp
);
12528 if (tp
->nvram_size
== 0)
12529 tg3_get_nvram_size(tp
);
12531 tg3_disable_nvram_access(tp
);
12532 tg3_nvram_unlock(tp
);
12535 tg3_flag_clear(tp
, NVRAM
);
12536 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12538 tg3_get_eeprom_size(tp
);
12542 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12543 u32 offset
, u32 len
, u8
*buf
)
12548 for (i
= 0; i
< len
; i
+= 4) {
12554 memcpy(&data
, buf
+ i
, 4);
12557 * The SEEPROM interface expects the data to always be opposite
12558 * the native endian format. We accomplish this by reversing
12559 * all the operations that would have been performed on the
12560 * data from a call to tg3_nvram_read_be32().
12562 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12564 val
= tr32(GRC_EEPROM_ADDR
);
12565 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12567 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12569 tw32(GRC_EEPROM_ADDR
, val
|
12570 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12571 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12572 EEPROM_ADDR_START
|
12573 EEPROM_ADDR_WRITE
);
12575 for (j
= 0; j
< 1000; j
++) {
12576 val
= tr32(GRC_EEPROM_ADDR
);
12578 if (val
& EEPROM_ADDR_COMPLETE
)
12582 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12591 /* offset and length are dword aligned */
12592 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12596 u32 pagesize
= tp
->nvram_pagesize
;
12597 u32 pagemask
= pagesize
- 1;
12601 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12607 u32 phy_addr
, page_off
, size
;
12609 phy_addr
= offset
& ~pagemask
;
12611 for (j
= 0; j
< pagesize
; j
+= 4) {
12612 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12613 (__be32
*) (tmp
+ j
));
12620 page_off
= offset
& pagemask
;
12627 memcpy(tmp
+ page_off
, buf
, size
);
12629 offset
= offset
+ (pagesize
- page_off
);
12631 tg3_enable_nvram_access(tp
);
12634 * Before we can erase the flash page, we need
12635 * to issue a special "write enable" command.
12637 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12639 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12642 /* Erase the target page */
12643 tw32(NVRAM_ADDR
, phy_addr
);
12645 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12646 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12648 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12651 /* Issue another write enable to start the write. */
12652 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12654 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12657 for (j
= 0; j
< pagesize
; j
+= 4) {
12660 data
= *((__be32
*) (tmp
+ j
));
12662 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12664 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12666 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12670 nvram_cmd
|= NVRAM_CMD_FIRST
;
12671 else if (j
== (pagesize
- 4))
12672 nvram_cmd
|= NVRAM_CMD_LAST
;
12674 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12681 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12682 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12689 /* offset and length are dword aligned */
12690 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12695 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12696 u32 page_off
, phy_addr
, nvram_cmd
;
12699 memcpy(&data
, buf
+ i
, 4);
12700 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12702 page_off
= offset
% tp
->nvram_pagesize
;
12704 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12706 tw32(NVRAM_ADDR
, phy_addr
);
12708 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12710 if (page_off
== 0 || i
== 0)
12711 nvram_cmd
|= NVRAM_CMD_FIRST
;
12712 if (page_off
== (tp
->nvram_pagesize
- 4))
12713 nvram_cmd
|= NVRAM_CMD_LAST
;
12715 if (i
== (len
- 4))
12716 nvram_cmd
|= NVRAM_CMD_LAST
;
12718 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12719 !tg3_flag(tp
, 5755_PLUS
) &&
12720 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12721 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12723 if ((ret
= tg3_nvram_exec_cmd(tp
,
12724 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12729 if (!tg3_flag(tp
, FLASH
)) {
12730 /* We always do complete word writes to eeprom. */
12731 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12734 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12740 /* offset and length are dword aligned */
12741 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12745 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12746 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12747 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12751 if (!tg3_flag(tp
, NVRAM
)) {
12752 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12756 ret
= tg3_nvram_lock(tp
);
12760 tg3_enable_nvram_access(tp
);
12761 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12762 tw32(NVRAM_WRITE1
, 0x406);
12764 grc_mode
= tr32(GRC_MODE
);
12765 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12767 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12768 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12771 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12775 grc_mode
= tr32(GRC_MODE
);
12776 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12778 tg3_disable_nvram_access(tp
);
12779 tg3_nvram_unlock(tp
);
12782 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12783 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12790 struct subsys_tbl_ent
{
12791 u16 subsys_vendor
, subsys_devid
;
12795 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12796 /* Broadcom boards. */
12797 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12798 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12799 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12800 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12801 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12802 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12803 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12804 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12805 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12806 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12807 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12808 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12809 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12810 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12811 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12812 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12813 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12814 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12815 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12816 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12817 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12818 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12821 { TG3PCI_SUBVENDOR_ID_3COM
,
12822 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12823 { TG3PCI_SUBVENDOR_ID_3COM
,
12824 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12825 { TG3PCI_SUBVENDOR_ID_3COM
,
12826 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12827 { TG3PCI_SUBVENDOR_ID_3COM
,
12828 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12829 { TG3PCI_SUBVENDOR_ID_3COM
,
12830 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12833 { TG3PCI_SUBVENDOR_ID_DELL
,
12834 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12835 { TG3PCI_SUBVENDOR_ID_DELL
,
12836 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12837 { TG3PCI_SUBVENDOR_ID_DELL
,
12838 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12839 { TG3PCI_SUBVENDOR_ID_DELL
,
12840 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12842 /* Compaq boards. */
12843 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12844 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12845 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12846 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12847 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12848 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12849 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12850 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12851 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12852 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12855 { TG3PCI_SUBVENDOR_ID_IBM
,
12856 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12859 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12863 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12864 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12865 tp
->pdev
->subsystem_vendor
) &&
12866 (subsys_id_to_phy_id
[i
].subsys_devid
==
12867 tp
->pdev
->subsystem_device
))
12868 return &subsys_id_to_phy_id
[i
];
12873 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12877 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12878 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12880 /* Assume an onboard device and WOL capable by default. */
12881 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12882 tg3_flag_set(tp
, WOL_CAP
);
12884 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12885 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12886 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12887 tg3_flag_set(tp
, IS_NIC
);
12889 val
= tr32(VCPU_CFGSHDW
);
12890 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12891 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12892 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12893 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12894 tg3_flag_set(tp
, WOL_ENABLE
);
12895 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12900 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12901 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12902 u32 nic_cfg
, led_cfg
;
12903 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12904 int eeprom_phy_serdes
= 0;
12906 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12907 tp
->nic_sram_data_cfg
= nic_cfg
;
12909 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12910 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12911 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12912 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
12913 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
12914 (ver
> 0) && (ver
< 0x100))
12915 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12917 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12918 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12920 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12921 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12922 eeprom_phy_serdes
= 1;
12924 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12925 if (nic_phy_id
!= 0) {
12926 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12927 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12929 eeprom_phy_id
= (id1
>> 16) << 10;
12930 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12931 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12935 tp
->phy_id
= eeprom_phy_id
;
12936 if (eeprom_phy_serdes
) {
12937 if (!tg3_flag(tp
, 5705_PLUS
))
12938 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12940 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12943 if (tg3_flag(tp
, 5750_PLUS
))
12944 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12945 SHASTA_EXT_LED_MODE_MASK
);
12947 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12951 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12952 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12955 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12956 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12959 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12960 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12962 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12963 * read on some older 5700/5701 bootcode.
12965 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12967 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12969 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12973 case SHASTA_EXT_LED_SHARED
:
12974 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12975 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12976 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12977 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12978 LED_CTRL_MODE_PHY_2
);
12981 case SHASTA_EXT_LED_MAC
:
12982 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12985 case SHASTA_EXT_LED_COMBO
:
12986 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12987 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12988 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12989 LED_CTRL_MODE_PHY_2
);
12994 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12995 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12996 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12997 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12999 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13000 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13002 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13003 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13004 if ((tp
->pdev
->subsystem_vendor
==
13005 PCI_VENDOR_ID_ARIMA
) &&
13006 (tp
->pdev
->subsystem_device
== 0x205a ||
13007 tp
->pdev
->subsystem_device
== 0x2063))
13008 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13010 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13011 tg3_flag_set(tp
, IS_NIC
);
13014 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13015 tg3_flag_set(tp
, ENABLE_ASF
);
13016 if (tg3_flag(tp
, 5750_PLUS
))
13017 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13020 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13021 tg3_flag(tp
, 5750_PLUS
))
13022 tg3_flag_set(tp
, ENABLE_APE
);
13024 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13025 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13026 tg3_flag_clear(tp
, WOL_CAP
);
13028 if (tg3_flag(tp
, WOL_CAP
) &&
13029 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13030 tg3_flag_set(tp
, WOL_ENABLE
);
13031 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13034 if (cfg2
& (1 << 17))
13035 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13037 /* serdes signal pre-emphasis in register 0x590 set by */
13038 /* bootcode if bit 18 is set */
13039 if (cfg2
& (1 << 18))
13040 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13042 if ((tg3_flag(tp
, 57765_PLUS
) ||
13043 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13044 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13045 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13046 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13048 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13049 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13050 !tg3_flag(tp
, 57765_PLUS
)) {
13053 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13054 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13055 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13058 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13059 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13060 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13061 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13062 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13063 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13066 if (tg3_flag(tp
, WOL_CAP
))
13067 device_set_wakeup_enable(&tp
->pdev
->dev
,
13068 tg3_flag(tp
, WOL_ENABLE
));
13070 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13073 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13078 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13079 tw32(OTP_CTRL
, cmd
);
13081 /* Wait for up to 1 ms for command to execute. */
13082 for (i
= 0; i
< 100; i
++) {
13083 val
= tr32(OTP_STATUS
);
13084 if (val
& OTP_STATUS_CMD_DONE
)
13089 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13092 /* Read the gphy configuration from the OTP region of the chip. The gphy
13093 * configuration is a 32-bit value that straddles the alignment boundary.
13094 * We do two 32-bit reads and then shift and merge the results.
13096 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13098 u32 bhalf_otp
, thalf_otp
;
13100 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13102 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13105 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13107 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13110 thalf_otp
= tr32(OTP_READ_DATA
);
13112 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13114 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13117 bhalf_otp
= tr32(OTP_READ_DATA
);
13119 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13122 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13124 u32 adv
= ADVERTISED_Autoneg
|
13127 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13128 adv
|= ADVERTISED_1000baseT_Half
|
13129 ADVERTISED_1000baseT_Full
;
13131 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13132 adv
|= ADVERTISED_100baseT_Half
|
13133 ADVERTISED_100baseT_Full
|
13134 ADVERTISED_10baseT_Half
|
13135 ADVERTISED_10baseT_Full
|
13138 adv
|= ADVERTISED_FIBRE
;
13140 tp
->link_config
.advertising
= adv
;
13141 tp
->link_config
.speed
= SPEED_INVALID
;
13142 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13143 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13144 tp
->link_config
.active_speed
= SPEED_INVALID
;
13145 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13146 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13147 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13148 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13151 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13153 u32 hw_phy_id_1
, hw_phy_id_2
;
13154 u32 hw_phy_id
, hw_phy_id_masked
;
13157 /* flow control autonegotiation is default behavior */
13158 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13159 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13161 if (tg3_flag(tp
, USE_PHYLIB
))
13162 return tg3_phy_init(tp
);
13164 /* Reading the PHY ID register can conflict with ASF
13165 * firmware access to the PHY hardware.
13168 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13169 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13171 /* Now read the physical PHY_ID from the chip and verify
13172 * that it is sane. If it doesn't look good, we fall back
13173 * to either the hard-coded table based PHY_ID and failing
13174 * that the value found in the eeprom area.
13176 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13177 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13179 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13180 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13181 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13183 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13186 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13187 tp
->phy_id
= hw_phy_id
;
13188 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13189 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13191 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13193 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13194 /* Do nothing, phy ID already set up in
13195 * tg3_get_eeprom_hw_cfg().
13198 struct subsys_tbl_ent
*p
;
13200 /* No eeprom signature? Try the hardcoded
13201 * subsys device table.
13203 p
= tg3_lookup_by_subsys(tp
);
13207 tp
->phy_id
= p
->phy_id
;
13209 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13210 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13214 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13215 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13216 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13217 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13218 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13219 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13220 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13221 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13223 tg3_phy_init_link_config(tp
);
13225 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13226 !tg3_flag(tp
, ENABLE_APE
) &&
13227 !tg3_flag(tp
, ENABLE_ASF
)) {
13230 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13231 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13232 (bmsr
& BMSR_LSTATUS
))
13233 goto skip_phy_reset
;
13235 err
= tg3_phy_reset(tp
);
13239 tg3_phy_set_wirespeed(tp
);
13241 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13242 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13243 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13244 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13245 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13246 tp
->link_config
.flowctrl
);
13248 tg3_writephy(tp
, MII_BMCR
,
13249 BMCR_ANENABLE
| BMCR_ANRESTART
);
13254 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13255 err
= tg3_init_5401phy_dsp(tp
);
13259 err
= tg3_init_5401phy_dsp(tp
);
13265 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13268 unsigned int block_end
, rosize
, len
;
13272 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13276 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13278 goto out_not_found
;
13280 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13281 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13282 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13284 if (block_end
> vpdlen
)
13285 goto out_not_found
;
13287 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13288 PCI_VPD_RO_KEYWORD_MFR_ID
);
13290 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13292 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13293 if (j
+ len
> block_end
|| len
!= 4 ||
13294 memcmp(&vpd_data
[j
], "1028", 4))
13297 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13298 PCI_VPD_RO_KEYWORD_VENDOR0
);
13302 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13304 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13305 if (j
+ len
> block_end
)
13308 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13309 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13313 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13314 PCI_VPD_RO_KEYWORD_PARTNO
);
13316 goto out_not_found
;
13318 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13320 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13321 if (len
> TG3_BPN_SIZE
||
13322 (len
+ i
) > vpdlen
)
13323 goto out_not_found
;
13325 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13329 if (tp
->board_part_number
[0])
13333 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13334 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13335 strcpy(tp
->board_part_number
, "BCM5717");
13336 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13337 strcpy(tp
->board_part_number
, "BCM5718");
13340 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13341 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13342 strcpy(tp
->board_part_number
, "BCM57780");
13343 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13344 strcpy(tp
->board_part_number
, "BCM57760");
13345 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13346 strcpy(tp
->board_part_number
, "BCM57790");
13347 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13348 strcpy(tp
->board_part_number
, "BCM57788");
13351 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13352 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13353 strcpy(tp
->board_part_number
, "BCM57761");
13354 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13355 strcpy(tp
->board_part_number
, "BCM57765");
13356 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13357 strcpy(tp
->board_part_number
, "BCM57781");
13358 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13359 strcpy(tp
->board_part_number
, "BCM57785");
13360 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13361 strcpy(tp
->board_part_number
, "BCM57791");
13362 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13363 strcpy(tp
->board_part_number
, "BCM57795");
13366 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13367 strcpy(tp
->board_part_number
, "BCM95906");
13370 strcpy(tp
->board_part_number
, "none");
13374 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13378 if (tg3_nvram_read(tp
, offset
, &val
) ||
13379 (val
& 0xfc000000) != 0x0c000000 ||
13380 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13387 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13389 u32 val
, offset
, start
, ver_offset
;
13391 bool newver
= false;
13393 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13394 tg3_nvram_read(tp
, 0x4, &start
))
13397 offset
= tg3_nvram_logical_addr(tp
, offset
);
13399 if (tg3_nvram_read(tp
, offset
, &val
))
13402 if ((val
& 0xfc000000) == 0x0c000000) {
13403 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13410 dst_off
= strlen(tp
->fw_ver
);
13413 if (TG3_VER_SIZE
- dst_off
< 16 ||
13414 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13417 offset
= offset
+ ver_offset
- start
;
13418 for (i
= 0; i
< 16; i
+= 4) {
13420 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13423 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13428 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13431 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13432 TG3_NVM_BCVER_MAJSFT
;
13433 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13434 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13435 "v%d.%02d", major
, minor
);
13439 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13441 u32 val
, major
, minor
;
13443 /* Use native endian representation */
13444 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13447 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13448 TG3_NVM_HWSB_CFG1_MAJSFT
;
13449 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13450 TG3_NVM_HWSB_CFG1_MINSFT
;
13452 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13455 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13457 u32 offset
, major
, minor
, build
;
13459 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13461 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13464 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13465 case TG3_EEPROM_SB_REVISION_0
:
13466 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13468 case TG3_EEPROM_SB_REVISION_2
:
13469 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13471 case TG3_EEPROM_SB_REVISION_3
:
13472 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13474 case TG3_EEPROM_SB_REVISION_4
:
13475 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13477 case TG3_EEPROM_SB_REVISION_5
:
13478 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13480 case TG3_EEPROM_SB_REVISION_6
:
13481 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13487 if (tg3_nvram_read(tp
, offset
, &val
))
13490 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13491 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13492 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13493 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13494 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13496 if (minor
> 99 || build
> 26)
13499 offset
= strlen(tp
->fw_ver
);
13500 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13501 " v%d.%02d", major
, minor
);
13504 offset
= strlen(tp
->fw_ver
);
13505 if (offset
< TG3_VER_SIZE
- 1)
13506 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13510 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13512 u32 val
, offset
, start
;
13515 for (offset
= TG3_NVM_DIR_START
;
13516 offset
< TG3_NVM_DIR_END
;
13517 offset
+= TG3_NVM_DIRENT_SIZE
) {
13518 if (tg3_nvram_read(tp
, offset
, &val
))
13521 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13525 if (offset
== TG3_NVM_DIR_END
)
13528 if (!tg3_flag(tp
, 5705_PLUS
))
13529 start
= 0x08000000;
13530 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13533 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13534 !tg3_fw_img_is_valid(tp
, offset
) ||
13535 tg3_nvram_read(tp
, offset
+ 8, &val
))
13538 offset
+= val
- start
;
13540 vlen
= strlen(tp
->fw_ver
);
13542 tp
->fw_ver
[vlen
++] = ',';
13543 tp
->fw_ver
[vlen
++] = ' ';
13545 for (i
= 0; i
< 4; i
++) {
13547 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13550 offset
+= sizeof(v
);
13552 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13553 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13557 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13562 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13568 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13571 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13572 if (apedata
!= APE_SEG_SIG_MAGIC
)
13575 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13576 if (!(apedata
& APE_FW_STATUS_READY
))
13579 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13581 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13582 tg3_flag_set(tp
, APE_HAS_NCSI
);
13588 vlen
= strlen(tp
->fw_ver
);
13590 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13592 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13593 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13594 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13595 (apedata
& APE_FW_VERSION_BLDMSK
));
13598 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13601 bool vpd_vers
= false;
13603 if (tp
->fw_ver
[0] != 0)
13606 if (tg3_flag(tp
, NO_NVRAM
)) {
13607 strcat(tp
->fw_ver
, "sb");
13611 if (tg3_nvram_read(tp
, 0, &val
))
13614 if (val
== TG3_EEPROM_MAGIC
)
13615 tg3_read_bc_ver(tp
);
13616 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13617 tg3_read_sb_ver(tp
, val
);
13618 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13619 tg3_read_hwsb_ver(tp
);
13626 if (tg3_flag(tp
, ENABLE_APE
)) {
13627 if (tg3_flag(tp
, ENABLE_ASF
))
13628 tg3_read_dash_ver(tp
);
13629 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13630 tg3_read_mgmtfw_ver(tp
);
13634 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13637 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13639 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13641 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13642 return TG3_RX_RET_MAX_SIZE_5717
;
13643 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13644 return TG3_RX_RET_MAX_SIZE_5700
;
13646 return TG3_RX_RET_MAX_SIZE_5705
;
13649 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13650 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13651 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13652 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13656 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13659 u32 pci_state_reg
, grc_misc_cfg
;
13664 /* Force memory write invalidate off. If we leave it on,
13665 * then on 5700_BX chips we have to enable a workaround.
13666 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13667 * to match the cacheline size. The Broadcom driver have this
13668 * workaround but turns MWI off all the times so never uses
13669 * it. This seems to suggest that the workaround is insufficient.
13671 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13672 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13673 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13675 /* Important! -- Make sure register accesses are byteswapped
13676 * correctly. Also, for those chips that require it, make
13677 * sure that indirect register accesses are enabled before
13678 * the first operation.
13680 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13682 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13683 MISC_HOST_CTRL_CHIPREV
);
13684 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13685 tp
->misc_host_ctrl
);
13687 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13688 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13689 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13690 u32 prod_id_asic_rev
;
13692 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13693 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13694 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13695 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13696 pci_read_config_dword(tp
->pdev
,
13697 TG3PCI_GEN2_PRODID_ASICREV
,
13698 &prod_id_asic_rev
);
13699 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13700 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13701 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13702 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13703 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13704 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13705 pci_read_config_dword(tp
->pdev
,
13706 TG3PCI_GEN15_PRODID_ASICREV
,
13707 &prod_id_asic_rev
);
13709 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13710 &prod_id_asic_rev
);
13712 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13715 /* Wrong chip ID in 5752 A0. This code can be removed later
13716 * as A0 is not in production.
13718 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13719 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13721 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13722 * we need to disable memory and use config. cycles
13723 * only to access all registers. The 5702/03 chips
13724 * can mistakenly decode the special cycles from the
13725 * ICH chipsets as memory write cycles, causing corruption
13726 * of register and memory space. Only certain ICH bridges
13727 * will drive special cycles with non-zero data during the
13728 * address phase which can fall within the 5703's address
13729 * range. This is not an ICH bug as the PCI spec allows
13730 * non-zero address during special cycles. However, only
13731 * these ICH bridges are known to drive non-zero addresses
13732 * during special cycles.
13734 * Since special cycles do not cross PCI bridges, we only
13735 * enable this workaround if the 5703 is on the secondary
13736 * bus of these ICH bridges.
13738 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13739 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13740 static struct tg3_dev_id
{
13744 } ich_chipsets
[] = {
13745 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13747 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13749 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13751 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13755 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13756 struct pci_dev
*bridge
= NULL
;
13758 while (pci_id
->vendor
!= 0) {
13759 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13765 if (pci_id
->rev
!= PCI_ANY_ID
) {
13766 if (bridge
->revision
> pci_id
->rev
)
13769 if (bridge
->subordinate
&&
13770 (bridge
->subordinate
->number
==
13771 tp
->pdev
->bus
->number
)) {
13772 tg3_flag_set(tp
, ICH_WORKAROUND
);
13773 pci_dev_put(bridge
);
13779 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13780 static struct tg3_dev_id
{
13783 } bridge_chipsets
[] = {
13784 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13785 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13788 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13789 struct pci_dev
*bridge
= NULL
;
13791 while (pci_id
->vendor
!= 0) {
13792 bridge
= pci_get_device(pci_id
->vendor
,
13799 if (bridge
->subordinate
&&
13800 (bridge
->subordinate
->number
<=
13801 tp
->pdev
->bus
->number
) &&
13802 (bridge
->subordinate
->subordinate
>=
13803 tp
->pdev
->bus
->number
)) {
13804 tg3_flag_set(tp
, 5701_DMA_BUG
);
13805 pci_dev_put(bridge
);
13811 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13812 * DMA addresses > 40-bit. This bridge may have other additional
13813 * 57xx devices behind it in some 4-port NIC designs for example.
13814 * Any tg3 device found behind the bridge will also need the 40-bit
13817 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13818 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13819 tg3_flag_set(tp
, 5780_CLASS
);
13820 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13821 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13823 struct pci_dev
*bridge
= NULL
;
13826 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13827 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13829 if (bridge
&& bridge
->subordinate
&&
13830 (bridge
->subordinate
->number
<=
13831 tp
->pdev
->bus
->number
) &&
13832 (bridge
->subordinate
->subordinate
>=
13833 tp
->pdev
->bus
->number
)) {
13834 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13835 pci_dev_put(bridge
);
13841 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13842 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
13843 tp
->pdev_peer
= tg3_find_peer(tp
);
13845 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13847 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13848 tg3_flag_set(tp
, 5717_PLUS
);
13850 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13851 tg3_flag(tp
, 5717_PLUS
))
13852 tg3_flag_set(tp
, 57765_PLUS
);
13854 /* Intentionally exclude ASIC_REV_5906 */
13855 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13856 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13857 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13858 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13859 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13860 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13861 tg3_flag(tp
, 57765_PLUS
))
13862 tg3_flag_set(tp
, 5755_PLUS
);
13864 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13865 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13866 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13867 tg3_flag(tp
, 5755_PLUS
) ||
13868 tg3_flag(tp
, 5780_CLASS
))
13869 tg3_flag_set(tp
, 5750_PLUS
);
13871 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
13872 tg3_flag(tp
, 5750_PLUS
))
13873 tg3_flag_set(tp
, 5705_PLUS
);
13875 /* Determine TSO capabilities */
13876 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
13877 ; /* Do nothing. HW bug. */
13878 else if (tg3_flag(tp
, 57765_PLUS
))
13879 tg3_flag_set(tp
, HW_TSO_3
);
13880 else if (tg3_flag(tp
, 5755_PLUS
) ||
13881 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13882 tg3_flag_set(tp
, HW_TSO_2
);
13883 else if (tg3_flag(tp
, 5750_PLUS
)) {
13884 tg3_flag_set(tp
, HW_TSO_1
);
13885 tg3_flag_set(tp
, TSO_BUG
);
13886 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13887 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13888 tg3_flag_clear(tp
, TSO_BUG
);
13889 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13890 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13891 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13892 tg3_flag_set(tp
, TSO_BUG
);
13893 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13894 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13896 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13899 /* Selectively allow TSO based on operating conditions */
13900 if (tg3_flag(tp
, HW_TSO_1
) ||
13901 tg3_flag(tp
, HW_TSO_2
) ||
13902 tg3_flag(tp
, HW_TSO_3
) ||
13903 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
13904 tg3_flag_set(tp
, TSO_CAPABLE
);
13906 tg3_flag_clear(tp
, TSO_CAPABLE
);
13907 tg3_flag_clear(tp
, TSO_BUG
);
13908 tp
->fw_needed
= NULL
;
13911 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
13912 tp
->fw_needed
= FIRMWARE_TG3
;
13916 if (tg3_flag(tp
, 5750_PLUS
)) {
13917 tg3_flag_set(tp
, SUPPORT_MSI
);
13918 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13919 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13920 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13921 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13922 tp
->pdev_peer
== tp
->pdev
))
13923 tg3_flag_clear(tp
, SUPPORT_MSI
);
13925 if (tg3_flag(tp
, 5755_PLUS
) ||
13926 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13927 tg3_flag_set(tp
, 1SHOT_MSI
);
13930 if (tg3_flag(tp
, 57765_PLUS
)) {
13931 tg3_flag_set(tp
, SUPPORT_MSIX
);
13932 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13936 if (tg3_flag(tp
, 5755_PLUS
))
13937 tg3_flag_set(tp
, SHORT_DMA_BUG
);
13939 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13940 tg3_flag_set(tp
, 4K_FIFO_LIMIT
);
13942 if (tg3_flag(tp
, 5717_PLUS
))
13943 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
13945 if (tg3_flag(tp
, 57765_PLUS
) &&
13946 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
13947 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
13949 if (!tg3_flag(tp
, 5705_PLUS
) ||
13950 tg3_flag(tp
, 5780_CLASS
) ||
13951 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
13952 tg3_flag_set(tp
, JUMBO_CAPABLE
);
13954 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13957 if (pci_is_pcie(tp
->pdev
)) {
13960 tg3_flag_set(tp
, PCI_EXPRESS
);
13962 tp
->pcie_readrq
= 4096;
13963 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13964 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13965 tp
->pcie_readrq
= 2048;
13967 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13969 pci_read_config_word(tp
->pdev
,
13970 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
13972 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13973 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13975 tg3_flag_clear(tp
, HW_TSO_2
);
13976 tg3_flag_clear(tp
, TSO_CAPABLE
);
13978 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13979 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13980 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13981 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13982 tg3_flag_set(tp
, CLKREQ_BUG
);
13983 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13984 tg3_flag_set(tp
, L1PLLPD_EN
);
13986 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13987 /* BCM5785 devices are effectively PCIe devices, and should
13988 * follow PCIe codepaths, but do not have a PCIe capabilities
13991 tg3_flag_set(tp
, PCI_EXPRESS
);
13992 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
13993 tg3_flag(tp
, 5780_CLASS
)) {
13994 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13995 if (!tp
->pcix_cap
) {
13996 dev_err(&tp
->pdev
->dev
,
13997 "Cannot find PCI-X capability, aborting\n");
14001 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14002 tg3_flag_set(tp
, PCIX_MODE
);
14005 /* If we have an AMD 762 or VIA K8T800 chipset, write
14006 * reordering to the mailbox registers done by the host
14007 * controller can cause major troubles. We read back from
14008 * every mailbox register write to force the writes to be
14009 * posted to the chip in order.
14011 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14012 !tg3_flag(tp
, PCI_EXPRESS
))
14013 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14015 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14016 &tp
->pci_cacheline_sz
);
14017 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14018 &tp
->pci_lat_timer
);
14019 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14020 tp
->pci_lat_timer
< 64) {
14021 tp
->pci_lat_timer
= 64;
14022 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14023 tp
->pci_lat_timer
);
14026 /* Important! -- It is critical that the PCI-X hw workaround
14027 * situation is decided before the first MMIO register access.
14029 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14030 /* 5700 BX chips need to have their TX producer index
14031 * mailboxes written twice to workaround a bug.
14033 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14035 /* If we are in PCI-X mode, enable register write workaround.
14037 * The workaround is to use indirect register accesses
14038 * for all chip writes not to mailbox registers.
14040 if (tg3_flag(tp
, PCIX_MODE
)) {
14043 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14045 /* The chip can have it's power management PCI config
14046 * space registers clobbered due to this bug.
14047 * So explicitly force the chip into D0 here.
14049 pci_read_config_dword(tp
->pdev
,
14050 tp
->pm_cap
+ PCI_PM_CTRL
,
14052 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14053 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14054 pci_write_config_dword(tp
->pdev
,
14055 tp
->pm_cap
+ PCI_PM_CTRL
,
14058 /* Also, force SERR#/PERR# in PCI command. */
14059 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14060 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14061 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14065 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14066 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14067 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14068 tg3_flag_set(tp
, PCI_32BIT
);
14070 /* Chip-specific fixup from Broadcom driver */
14071 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14072 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14073 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14074 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14077 /* Default fast path register access methods */
14078 tp
->read32
= tg3_read32
;
14079 tp
->write32
= tg3_write32
;
14080 tp
->read32_mbox
= tg3_read32
;
14081 tp
->write32_mbox
= tg3_write32
;
14082 tp
->write32_tx_mbox
= tg3_write32
;
14083 tp
->write32_rx_mbox
= tg3_write32
;
14085 /* Various workaround register access methods */
14086 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14087 tp
->write32
= tg3_write_indirect_reg32
;
14088 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14089 (tg3_flag(tp
, PCI_EXPRESS
) &&
14090 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14092 * Back to back register writes can cause problems on these
14093 * chips, the workaround is to read back all reg writes
14094 * except those to mailbox regs.
14096 * See tg3_write_indirect_reg32().
14098 tp
->write32
= tg3_write_flush_reg32
;
14101 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14102 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14103 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14104 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14107 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14108 tp
->read32
= tg3_read_indirect_reg32
;
14109 tp
->write32
= tg3_write_indirect_reg32
;
14110 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14111 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14112 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14113 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14118 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14119 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14120 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14122 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14123 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14124 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14125 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14126 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14129 if (tp
->write32
== tg3_write_indirect_reg32
||
14130 (tg3_flag(tp
, PCIX_MODE
) &&
14131 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14132 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14133 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14135 /* The memory arbiter has to be enabled in order for SRAM accesses
14136 * to succeed. Normally on powerup the tg3 chip firmware will make
14137 * sure it is enabled, but other entities such as system netboot
14138 * code might disable it.
14140 val
= tr32(MEMARB_MODE
);
14141 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14143 if (tg3_flag(tp
, PCIX_MODE
)) {
14144 pci_read_config_dword(tp
->pdev
,
14145 tp
->pcix_cap
+ PCI_X_STATUS
, &val
);
14146 tp
->pci_fn
= val
& 0x7;
14148 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14151 /* Get eeprom hw config before calling tg3_set_power_state().
14152 * In particular, the TG3_FLAG_IS_NIC flag must be
14153 * determined before calling tg3_set_power_state() so that
14154 * we know whether or not to switch out of Vaux power.
14155 * When the flag is set, it means that GPIO1 is used for eeprom
14156 * write protect and also implies that it is a LOM where GPIOs
14157 * are not used to switch power.
14159 tg3_get_eeprom_hw_cfg(tp
);
14161 if (tg3_flag(tp
, ENABLE_APE
)) {
14162 /* Allow reads and writes to the
14163 * APE register and memory space.
14165 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14166 PCISTATE_ALLOW_APE_SHMEM_WR
|
14167 PCISTATE_ALLOW_APE_PSPACE_WR
;
14168 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14171 tg3_ape_lock_init(tp
);
14174 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14175 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14176 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14177 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14178 tg3_flag(tp
, 57765_PLUS
))
14179 tg3_flag_set(tp
, CPMU_PRESENT
);
14181 /* Set up tp->grc_local_ctrl before calling
14182 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14183 * will bring 5700's external PHY out of reset.
14184 * It is also used as eeprom write protect on LOMs.
14186 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14187 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14188 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14189 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14190 GRC_LCLCTRL_GPIO_OUTPUT1
);
14191 /* Unused GPIO3 must be driven as output on 5752 because there
14192 * are no pull-up resistors on unused GPIO pins.
14194 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14195 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14197 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14198 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14199 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
14200 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14202 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14203 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14204 /* Turn off the debug UART. */
14205 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14206 if (tg3_flag(tp
, IS_NIC
))
14207 /* Keep VMain power. */
14208 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14209 GRC_LCLCTRL_GPIO_OUTPUT0
;
14212 /* Switch out of Vaux if it is a NIC */
14213 tg3_pwrsrc_switch_to_vmain(tp
);
14215 /* Derive initial jumbo mode from MTU assigned in
14216 * ether_setup() via the alloc_etherdev() call
14218 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14219 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14221 /* Determine WakeOnLan speed to use. */
14222 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14223 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14224 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14225 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14226 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14228 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14231 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14232 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14234 /* A few boards don't want Ethernet@WireSpeed phy feature */
14235 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14236 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14237 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14238 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14239 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14240 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14241 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14243 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14244 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14245 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14246 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14247 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14249 if (tg3_flag(tp
, 5705_PLUS
) &&
14250 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14251 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14252 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14253 !tg3_flag(tp
, 57765_PLUS
)) {
14254 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14255 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14256 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14257 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14258 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14259 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14260 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14261 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14262 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14264 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14267 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14268 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14269 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14270 if (tp
->phy_otp
== 0)
14271 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14274 if (tg3_flag(tp
, CPMU_PRESENT
))
14275 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14277 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14279 tp
->coalesce_mode
= 0;
14280 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14281 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14282 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14284 /* Set these bits to enable statistics workaround. */
14285 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14286 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14287 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14288 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14289 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14292 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14293 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14294 tg3_flag_set(tp
, USE_PHYLIB
);
14296 err
= tg3_mdio_init(tp
);
14300 /* Initialize data/descriptor byte/word swapping. */
14301 val
= tr32(GRC_MODE
);
14302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14303 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14304 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14305 GRC_MODE_B2HRX_ENABLE
|
14306 GRC_MODE_HTX2B_ENABLE
|
14307 GRC_MODE_HOST_STACKUP
);
14309 val
&= GRC_MODE_HOST_STACKUP
;
14311 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14313 tg3_switch_clocks(tp
);
14315 /* Clear this out for sanity. */
14316 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14318 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14320 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14321 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14322 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14324 if (chiprevid
== CHIPREV_ID_5701_A0
||
14325 chiprevid
== CHIPREV_ID_5701_B0
||
14326 chiprevid
== CHIPREV_ID_5701_B2
||
14327 chiprevid
== CHIPREV_ID_5701_B5
) {
14328 void __iomem
*sram_base
;
14330 /* Write some dummy words into the SRAM status block
14331 * area, see if it reads back correctly. If the return
14332 * value is bad, force enable the PCIX workaround.
14334 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14336 writel(0x00000000, sram_base
);
14337 writel(0x00000000, sram_base
+ 4);
14338 writel(0xffffffff, sram_base
+ 4);
14339 if (readl(sram_base
) != 0x00000000)
14340 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14345 tg3_nvram_init(tp
);
14347 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14348 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14350 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14351 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14352 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14353 tg3_flag_set(tp
, IS_5788
);
14355 if (!tg3_flag(tp
, IS_5788
) &&
14356 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14357 tg3_flag_set(tp
, TAGGED_STATUS
);
14358 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14359 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14360 HOSTCC_MODE_CLRTICK_TXBD
);
14362 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14363 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14364 tp
->misc_host_ctrl
);
14367 /* Preserve the APE MAC_MODE bits */
14368 if (tg3_flag(tp
, ENABLE_APE
))
14369 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14371 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
14373 /* these are limited to 10/100 only */
14374 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14375 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14376 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14377 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14378 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14379 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14380 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14381 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14382 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14383 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14384 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14385 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14386 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14387 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14388 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14389 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14391 err
= tg3_phy_probe(tp
);
14393 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14394 /* ... but do not return immediately ... */
14399 tg3_read_fw_ver(tp
);
14401 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14402 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14404 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14405 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14407 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14410 /* 5700 {AX,BX} chips have a broken status block link
14411 * change bit implementation, so we must use the
14412 * status register in those cases.
14414 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14415 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14417 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14419 /* The led_ctrl is set during tg3_phy_probe, here we might
14420 * have to force the link status polling mechanism based
14421 * upon subsystem IDs.
14423 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14424 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14425 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14426 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14427 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14430 /* For all SERDES we poll the MAC status register. */
14431 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14432 tg3_flag_set(tp
, POLL_SERDES
);
14434 tg3_flag_clear(tp
, POLL_SERDES
);
14436 tp
->rx_offset
= NET_IP_ALIGN
;
14437 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14438 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14439 tg3_flag(tp
, PCIX_MODE
)) {
14441 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14442 tp
->rx_copy_thresh
= ~(u16
)0;
14446 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14447 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14448 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14450 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14452 /* Increment the rx prod index on the rx std ring by at most
14453 * 8 for these chips to workaround hw errata.
14455 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14456 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14457 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14458 tp
->rx_std_max_post
= 8;
14460 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14461 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14462 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14467 #ifdef CONFIG_SPARC
14468 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14470 struct net_device
*dev
= tp
->dev
;
14471 struct pci_dev
*pdev
= tp
->pdev
;
14472 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14473 const unsigned char *addr
;
14476 addr
= of_get_property(dp
, "local-mac-address", &len
);
14477 if (addr
&& len
== 6) {
14478 memcpy(dev
->dev_addr
, addr
, 6);
14479 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14485 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14487 struct net_device
*dev
= tp
->dev
;
14489 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14490 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14495 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14497 struct net_device
*dev
= tp
->dev
;
14498 u32 hi
, lo
, mac_offset
;
14501 #ifdef CONFIG_SPARC
14502 if (!tg3_get_macaddr_sparc(tp
))
14507 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14508 tg3_flag(tp
, 5780_CLASS
)) {
14509 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14511 if (tg3_nvram_lock(tp
))
14512 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14514 tg3_nvram_unlock(tp
);
14515 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14516 if (tp
->pci_fn
& 1)
14518 if (tp
->pci_fn
> 1)
14519 mac_offset
+= 0x18c;
14520 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14523 /* First try to get it from MAC address mailbox. */
14524 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14525 if ((hi
>> 16) == 0x484b) {
14526 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14527 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14529 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14530 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14531 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14532 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14533 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14535 /* Some old bootcode may report a 0 MAC address in SRAM */
14536 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14539 /* Next, try NVRAM. */
14540 if (!tg3_flag(tp
, NO_NVRAM
) &&
14541 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14542 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14543 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14544 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14546 /* Finally just fetch it out of the MAC control regs. */
14548 hi
= tr32(MAC_ADDR_0_HIGH
);
14549 lo
= tr32(MAC_ADDR_0_LOW
);
14551 dev
->dev_addr
[5] = lo
& 0xff;
14552 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14553 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14554 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14555 dev
->dev_addr
[1] = hi
& 0xff;
14556 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14560 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14561 #ifdef CONFIG_SPARC
14562 if (!tg3_get_default_macaddr_sparc(tp
))
14567 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14571 #define BOUNDARY_SINGLE_CACHELINE 1
14572 #define BOUNDARY_MULTI_CACHELINE 2
14574 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14576 int cacheline_size
;
14580 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14582 cacheline_size
= 1024;
14584 cacheline_size
= (int) byte
* 4;
14586 /* On 5703 and later chips, the boundary bits have no
14589 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14590 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14591 !tg3_flag(tp
, PCI_EXPRESS
))
14594 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14595 goal
= BOUNDARY_MULTI_CACHELINE
;
14597 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14598 goal
= BOUNDARY_SINGLE_CACHELINE
;
14604 if (tg3_flag(tp
, 57765_PLUS
)) {
14605 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14612 /* PCI controllers on most RISC systems tend to disconnect
14613 * when a device tries to burst across a cache-line boundary.
14614 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14616 * Unfortunately, for PCI-E there are only limited
14617 * write-side controls for this, and thus for reads
14618 * we will still get the disconnects. We'll also waste
14619 * these PCI cycles for both read and write for chips
14620 * other than 5700 and 5701 which do not implement the
14623 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14624 switch (cacheline_size
) {
14629 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14630 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14631 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14633 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14634 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14639 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14640 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14644 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14645 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14648 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14649 switch (cacheline_size
) {
14653 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14654 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14655 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14661 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14662 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14666 switch (cacheline_size
) {
14668 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14669 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14670 DMA_RWCTRL_WRITE_BNDRY_16
);
14675 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14676 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14677 DMA_RWCTRL_WRITE_BNDRY_32
);
14682 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14683 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14684 DMA_RWCTRL_WRITE_BNDRY_64
);
14689 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14690 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14691 DMA_RWCTRL_WRITE_BNDRY_128
);
14696 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14697 DMA_RWCTRL_WRITE_BNDRY_256
);
14700 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14701 DMA_RWCTRL_WRITE_BNDRY_512
);
14705 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14706 DMA_RWCTRL_WRITE_BNDRY_1024
);
14715 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14717 struct tg3_internal_buffer_desc test_desc
;
14718 u32 sram_dma_descs
;
14721 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14723 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14724 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14725 tw32(RDMAC_STATUS
, 0);
14726 tw32(WDMAC_STATUS
, 0);
14728 tw32(BUFMGR_MODE
, 0);
14729 tw32(FTQ_RESET
, 0);
14731 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14732 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14733 test_desc
.nic_mbuf
= 0x00002100;
14734 test_desc
.len
= size
;
14737 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14738 * the *second* time the tg3 driver was getting loaded after an
14741 * Broadcom tells me:
14742 * ...the DMA engine is connected to the GRC block and a DMA
14743 * reset may affect the GRC block in some unpredictable way...
14744 * The behavior of resets to individual blocks has not been tested.
14746 * Broadcom noted the GRC reset will also reset all sub-components.
14749 test_desc
.cqid_sqid
= (13 << 8) | 2;
14751 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14754 test_desc
.cqid_sqid
= (16 << 8) | 7;
14756 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14759 test_desc
.flags
= 0x00000005;
14761 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14764 val
= *(((u32
*)&test_desc
) + i
);
14765 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14766 sram_dma_descs
+ (i
* sizeof(u32
)));
14767 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14769 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14772 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14774 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14777 for (i
= 0; i
< 40; i
++) {
14781 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14783 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14784 if ((val
& 0xffff) == sram_dma_descs
) {
14795 #define TEST_BUFFER_SIZE 0x2000
14797 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14798 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14802 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14804 dma_addr_t buf_dma
;
14805 u32
*buf
, saved_dma_rwctrl
;
14808 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14809 &buf_dma
, GFP_KERNEL
);
14815 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14816 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14818 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14820 if (tg3_flag(tp
, 57765_PLUS
))
14823 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14824 /* DMA read watermark not used on PCIE */
14825 tp
->dma_rwctrl
|= 0x00180000;
14826 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14827 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14828 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14829 tp
->dma_rwctrl
|= 0x003f0000;
14831 tp
->dma_rwctrl
|= 0x003f000f;
14833 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14834 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14835 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14836 u32 read_water
= 0x7;
14838 /* If the 5704 is behind the EPB bridge, we can
14839 * do the less restrictive ONE_DMA workaround for
14840 * better performance.
14842 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14843 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14844 tp
->dma_rwctrl
|= 0x8000;
14845 else if (ccval
== 0x6 || ccval
== 0x7)
14846 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14848 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14850 /* Set bit 23 to enable PCIX hw bug fix */
14852 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14853 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14855 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14856 /* 5780 always in PCIX mode */
14857 tp
->dma_rwctrl
|= 0x00144000;
14858 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14859 /* 5714 always in PCIX mode */
14860 tp
->dma_rwctrl
|= 0x00148000;
14862 tp
->dma_rwctrl
|= 0x001b000f;
14866 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14867 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14868 tp
->dma_rwctrl
&= 0xfffffff0;
14870 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14871 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14872 /* Remove this if it causes problems for some boards. */
14873 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14875 /* On 5700/5701 chips, we need to set this bit.
14876 * Otherwise the chip will issue cacheline transactions
14877 * to streamable DMA memory with not all the byte
14878 * enables turned on. This is an error on several
14879 * RISC PCI controllers, in particular sparc64.
14881 * On 5703/5704 chips, this bit has been reassigned
14882 * a different meaning. In particular, it is used
14883 * on those chips to enable a PCI-X workaround.
14885 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14888 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14891 /* Unneeded, already done by tg3_get_invariants. */
14892 tg3_switch_clocks(tp
);
14895 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14896 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14899 /* It is best to perform DMA test with maximum write burst size
14900 * to expose the 5700/5701 write DMA bug.
14902 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14903 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14904 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14909 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14912 /* Send the buffer to the chip. */
14913 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14915 dev_err(&tp
->pdev
->dev
,
14916 "%s: Buffer write failed. err = %d\n",
14922 /* validate data reached card RAM correctly. */
14923 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14925 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14926 if (le32_to_cpu(val
) != p
[i
]) {
14927 dev_err(&tp
->pdev
->dev
,
14928 "%s: Buffer corrupted on device! "
14929 "(%d != %d)\n", __func__
, val
, i
);
14930 /* ret = -ENODEV here? */
14935 /* Now read it back. */
14936 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14938 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14939 "err = %d\n", __func__
, ret
);
14944 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14948 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14949 DMA_RWCTRL_WRITE_BNDRY_16
) {
14950 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14951 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14952 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14955 dev_err(&tp
->pdev
->dev
,
14956 "%s: Buffer corrupted on read back! "
14957 "(%d != %d)\n", __func__
, p
[i
], i
);
14963 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14969 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14970 DMA_RWCTRL_WRITE_BNDRY_16
) {
14971 /* DMA test passed without adjusting DMA boundary,
14972 * now look for chipsets that are known to expose the
14973 * DMA bug without failing the test.
14975 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14976 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14977 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14979 /* Safe to use the calculated DMA boundary. */
14980 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14983 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14987 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14992 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14994 if (tg3_flag(tp
, 57765_PLUS
)) {
14995 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14996 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14997 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14998 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14999 tp
->bufmgr_config
.mbuf_high_water
=
15000 DEFAULT_MB_HIGH_WATER_57765
;
15002 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15003 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15004 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15005 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15006 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15007 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15008 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15009 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15010 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15011 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15012 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15013 tp
->bufmgr_config
.mbuf_high_water
=
15014 DEFAULT_MB_HIGH_WATER_5705
;
15015 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15016 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15017 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15018 tp
->bufmgr_config
.mbuf_high_water
=
15019 DEFAULT_MB_HIGH_WATER_5906
;
15022 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15023 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15024 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15025 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15026 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15027 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15029 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15030 DEFAULT_MB_RDMA_LOW_WATER
;
15031 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15032 DEFAULT_MB_MACRX_LOW_WATER
;
15033 tp
->bufmgr_config
.mbuf_high_water
=
15034 DEFAULT_MB_HIGH_WATER
;
15036 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15037 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15038 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15039 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15040 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15041 DEFAULT_MB_HIGH_WATER_JUMBO
;
15044 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15045 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15048 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15050 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15051 case TG3_PHY_ID_BCM5400
: return "5400";
15052 case TG3_PHY_ID_BCM5401
: return "5401";
15053 case TG3_PHY_ID_BCM5411
: return "5411";
15054 case TG3_PHY_ID_BCM5701
: return "5701";
15055 case TG3_PHY_ID_BCM5703
: return "5703";
15056 case TG3_PHY_ID_BCM5704
: return "5704";
15057 case TG3_PHY_ID_BCM5705
: return "5705";
15058 case TG3_PHY_ID_BCM5750
: return "5750";
15059 case TG3_PHY_ID_BCM5752
: return "5752";
15060 case TG3_PHY_ID_BCM5714
: return "5714";
15061 case TG3_PHY_ID_BCM5780
: return "5780";
15062 case TG3_PHY_ID_BCM5755
: return "5755";
15063 case TG3_PHY_ID_BCM5787
: return "5787";
15064 case TG3_PHY_ID_BCM5784
: return "5784";
15065 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15066 case TG3_PHY_ID_BCM5906
: return "5906";
15067 case TG3_PHY_ID_BCM5761
: return "5761";
15068 case TG3_PHY_ID_BCM5718C
: return "5718C";
15069 case TG3_PHY_ID_BCM5718S
: return "5718S";
15070 case TG3_PHY_ID_BCM57765
: return "57765";
15071 case TG3_PHY_ID_BCM5719C
: return "5719C";
15072 case TG3_PHY_ID_BCM5720C
: return "5720C";
15073 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15074 case 0: return "serdes";
15075 default: return "unknown";
15079 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15081 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15082 strcpy(str
, "PCI Express");
15084 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15085 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15087 strcpy(str
, "PCIX:");
15089 if ((clock_ctrl
== 7) ||
15090 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15091 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15092 strcat(str
, "133MHz");
15093 else if (clock_ctrl
== 0)
15094 strcat(str
, "33MHz");
15095 else if (clock_ctrl
== 2)
15096 strcat(str
, "50MHz");
15097 else if (clock_ctrl
== 4)
15098 strcat(str
, "66MHz");
15099 else if (clock_ctrl
== 6)
15100 strcat(str
, "100MHz");
15102 strcpy(str
, "PCI:");
15103 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15104 strcat(str
, "66MHz");
15106 strcat(str
, "33MHz");
15108 if (tg3_flag(tp
, PCI_32BIT
))
15109 strcat(str
, ":32-bit");
15111 strcat(str
, ":64-bit");
15115 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15117 struct pci_dev
*peer
;
15118 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15120 for (func
= 0; func
< 8; func
++) {
15121 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15122 if (peer
&& peer
!= tp
->pdev
)
15126 /* 5704 can be configured in single-port mode, set peer to
15127 * tp->pdev in that case.
15135 * We don't need to keep the refcount elevated; there's no way
15136 * to remove one half of this device without removing the other
15143 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15145 struct ethtool_coalesce
*ec
= &tp
->coal
;
15147 memset(ec
, 0, sizeof(*ec
));
15148 ec
->cmd
= ETHTOOL_GCOALESCE
;
15149 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15150 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15151 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15152 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15153 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15154 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15155 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15156 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15157 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15159 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15160 HOSTCC_MODE_CLRTICK_TXBD
)) {
15161 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15162 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15163 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15164 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15167 if (tg3_flag(tp
, 5705_PLUS
)) {
15168 ec
->rx_coalesce_usecs_irq
= 0;
15169 ec
->tx_coalesce_usecs_irq
= 0;
15170 ec
->stats_block_coalesce_usecs
= 0;
15174 static const struct net_device_ops tg3_netdev_ops
= {
15175 .ndo_open
= tg3_open
,
15176 .ndo_stop
= tg3_close
,
15177 .ndo_start_xmit
= tg3_start_xmit
,
15178 .ndo_get_stats64
= tg3_get_stats64
,
15179 .ndo_validate_addr
= eth_validate_addr
,
15180 .ndo_set_multicast_list
= tg3_set_rx_mode
,
15181 .ndo_set_mac_address
= tg3_set_mac_addr
,
15182 .ndo_do_ioctl
= tg3_ioctl
,
15183 .ndo_tx_timeout
= tg3_tx_timeout
,
15184 .ndo_change_mtu
= tg3_change_mtu
,
15185 .ndo_fix_features
= tg3_fix_features
,
15186 .ndo_set_features
= tg3_set_features
,
15187 #ifdef CONFIG_NET_POLL_CONTROLLER
15188 .ndo_poll_controller
= tg3_poll_controller
,
15192 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15193 const struct pci_device_id
*ent
)
15195 struct net_device
*dev
;
15197 int i
, err
, pm_cap
;
15198 u32 sndmbx
, rcvmbx
, intmbx
;
15200 u64 dma_mask
, persist_dma_mask
;
15203 printk_once(KERN_INFO
"%s\n", version
);
15205 err
= pci_enable_device(pdev
);
15207 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15211 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15213 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15214 goto err_out_disable_pdev
;
15217 pci_set_master(pdev
);
15219 /* Find power-management capability. */
15220 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15222 dev_err(&pdev
->dev
,
15223 "Cannot find Power Management capability, aborting\n");
15225 goto err_out_free_res
;
15228 err
= pci_set_power_state(pdev
, PCI_D0
);
15230 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15231 goto err_out_free_res
;
15234 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15236 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15238 goto err_out_power_down
;
15241 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15243 tp
= netdev_priv(dev
);
15246 tp
->pm_cap
= pm_cap
;
15247 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15248 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15251 tp
->msg_enable
= tg3_debug
;
15253 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15255 /* The word/byte swap controls here control register access byte
15256 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15259 tp
->misc_host_ctrl
=
15260 MISC_HOST_CTRL_MASK_PCI_INT
|
15261 MISC_HOST_CTRL_WORD_SWAP
|
15262 MISC_HOST_CTRL_INDIR_ACCESS
|
15263 MISC_HOST_CTRL_PCISTATE_RW
;
15265 /* The NONFRM (non-frame) byte/word swap controls take effect
15266 * on descriptor entries, anything which isn't packet data.
15268 * The StrongARM chips on the board (one for tx, one for rx)
15269 * are running in big-endian mode.
15271 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15272 GRC_MODE_WSWAP_NONFRM_DATA
);
15273 #ifdef __BIG_ENDIAN
15274 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15276 spin_lock_init(&tp
->lock
);
15277 spin_lock_init(&tp
->indirect_lock
);
15278 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15280 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15282 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15284 goto err_out_free_dev
;
15287 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15288 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15289 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15290 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15291 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15292 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15293 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15294 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15295 tg3_flag_set(tp
, ENABLE_APE
);
15296 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15297 if (!tp
->aperegs
) {
15298 dev_err(&pdev
->dev
,
15299 "Cannot map APE registers, aborting\n");
15301 goto err_out_iounmap
;
15305 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15306 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15308 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15309 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15310 dev
->netdev_ops
= &tg3_netdev_ops
;
15311 dev
->irq
= pdev
->irq
;
15313 err
= tg3_get_invariants(tp
);
15315 dev_err(&pdev
->dev
,
15316 "Problem fetching invariants of chip, aborting\n");
15317 goto err_out_apeunmap
;
15320 /* The EPB bridge inside 5714, 5715, and 5780 and any
15321 * device behind the EPB cannot support DMA addresses > 40-bit.
15322 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15323 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15324 * do DMA address check in tg3_start_xmit().
15326 if (tg3_flag(tp
, IS_5788
))
15327 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15328 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15329 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15330 #ifdef CONFIG_HIGHMEM
15331 dma_mask
= DMA_BIT_MASK(64);
15334 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15336 /* Configure DMA attributes. */
15337 if (dma_mask
> DMA_BIT_MASK(32)) {
15338 err
= pci_set_dma_mask(pdev
, dma_mask
);
15340 features
|= NETIF_F_HIGHDMA
;
15341 err
= pci_set_consistent_dma_mask(pdev
,
15344 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15345 "DMA for consistent allocations\n");
15346 goto err_out_apeunmap
;
15350 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15351 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15353 dev_err(&pdev
->dev
,
15354 "No usable DMA configuration, aborting\n");
15355 goto err_out_apeunmap
;
15359 tg3_init_bufmgr_config(tp
);
15361 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15363 /* 5700 B0 chips do not support checksumming correctly due
15364 * to hardware bugs.
15366 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15367 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15369 if (tg3_flag(tp
, 5755_PLUS
))
15370 features
|= NETIF_F_IPV6_CSUM
;
15373 /* TSO is on by default on chips that support hardware TSO.
15374 * Firmware TSO on older chips gives lower performance, so it
15375 * is off by default, but can be enabled using ethtool.
15377 if ((tg3_flag(tp
, HW_TSO_1
) ||
15378 tg3_flag(tp
, HW_TSO_2
) ||
15379 tg3_flag(tp
, HW_TSO_3
)) &&
15380 (features
& NETIF_F_IP_CSUM
))
15381 features
|= NETIF_F_TSO
;
15382 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15383 if (features
& NETIF_F_IPV6_CSUM
)
15384 features
|= NETIF_F_TSO6
;
15385 if (tg3_flag(tp
, HW_TSO_3
) ||
15386 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15387 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15388 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15389 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15390 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15391 features
|= NETIF_F_TSO_ECN
;
15394 dev
->features
|= features
;
15395 dev
->vlan_features
|= features
;
15398 * Add loopback capability only for a subset of devices that support
15399 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15400 * loopback for the remaining devices.
15402 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15403 !tg3_flag(tp
, CPMU_PRESENT
))
15404 /* Add the loopback capability */
15405 features
|= NETIF_F_LOOPBACK
;
15407 dev
->hw_features
|= features
;
15409 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15410 !tg3_flag(tp
, TSO_CAPABLE
) &&
15411 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15412 tg3_flag_set(tp
, MAX_RXPEND_64
);
15413 tp
->rx_pending
= 63;
15416 err
= tg3_get_device_address(tp
);
15418 dev_err(&pdev
->dev
,
15419 "Could not obtain valid ethernet address, aborting\n");
15420 goto err_out_apeunmap
;
15424 * Reset chip in case UNDI or EFI driver did not shutdown
15425 * DMA self test will enable WDMAC and we'll see (spurious)
15426 * pending DMA on the PCI bus at that point.
15428 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15429 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15430 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15431 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15434 err
= tg3_test_dma(tp
);
15436 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15437 goto err_out_apeunmap
;
15440 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15441 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15442 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15443 for (i
= 0; i
< tp
->irq_max
; i
++) {
15444 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15447 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15449 tnapi
->int_mbox
= intmbx
;
15455 tnapi
->consmbox
= rcvmbx
;
15456 tnapi
->prodmbox
= sndmbx
;
15459 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15461 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15463 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15467 * If we support MSIX, we'll be using RSS. If we're using
15468 * RSS, the first vector only handles link interrupts and the
15469 * remaining vectors handle rx and tx interrupts. Reuse the
15470 * mailbox values for the next iteration. The values we setup
15471 * above are still useful for the single vectored mode.
15486 pci_set_drvdata(pdev
, dev
);
15488 if (tg3_flag(tp
, 5717_PLUS
)) {
15489 /* Resume a low-power mode */
15490 tg3_frob_aux_power(tp
, false);
15493 err
= register_netdev(dev
);
15495 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15496 goto err_out_apeunmap
;
15499 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15500 tp
->board_part_number
,
15501 tp
->pci_chip_rev_id
,
15502 tg3_bus_string(tp
, str
),
15505 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15506 struct phy_device
*phydev
;
15507 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15509 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15510 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15514 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15515 ethtype
= "10/100Base-TX";
15516 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15517 ethtype
= "1000Base-SX";
15519 ethtype
= "10/100/1000Base-T";
15521 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15522 "(WireSpeed[%d], EEE[%d])\n",
15523 tg3_phy_string(tp
), ethtype
,
15524 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15525 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15528 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15529 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15530 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15531 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15532 tg3_flag(tp
, ENABLE_ASF
) != 0,
15533 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15534 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15536 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15537 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15539 pci_save_state(pdev
);
15545 iounmap(tp
->aperegs
);
15546 tp
->aperegs
= NULL
;
15558 err_out_power_down
:
15559 pci_set_power_state(pdev
, PCI_D3hot
);
15562 pci_release_regions(pdev
);
15564 err_out_disable_pdev
:
15565 pci_disable_device(pdev
);
15566 pci_set_drvdata(pdev
, NULL
);
15570 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15572 struct net_device
*dev
= pci_get_drvdata(pdev
);
15575 struct tg3
*tp
= netdev_priv(dev
);
15578 release_firmware(tp
->fw
);
15580 cancel_work_sync(&tp
->reset_task
);
15582 if (!tg3_flag(tp
, USE_PHYLIB
)) {
15587 unregister_netdev(dev
);
15589 iounmap(tp
->aperegs
);
15590 tp
->aperegs
= NULL
;
15597 pci_release_regions(pdev
);
15598 pci_disable_device(pdev
);
15599 pci_set_drvdata(pdev
, NULL
);
15603 #ifdef CONFIG_PM_SLEEP
15604 static int tg3_suspend(struct device
*device
)
15606 struct pci_dev
*pdev
= to_pci_dev(device
);
15607 struct net_device
*dev
= pci_get_drvdata(pdev
);
15608 struct tg3
*tp
= netdev_priv(dev
);
15611 if (!netif_running(dev
))
15614 flush_work_sync(&tp
->reset_task
);
15616 tg3_netif_stop(tp
);
15618 del_timer_sync(&tp
->timer
);
15620 tg3_full_lock(tp
, 1);
15621 tg3_disable_ints(tp
);
15622 tg3_full_unlock(tp
);
15624 netif_device_detach(dev
);
15626 tg3_full_lock(tp
, 0);
15627 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15628 tg3_flag_clear(tp
, INIT_COMPLETE
);
15629 tg3_full_unlock(tp
);
15631 err
= tg3_power_down_prepare(tp
);
15635 tg3_full_lock(tp
, 0);
15637 tg3_flag_set(tp
, INIT_COMPLETE
);
15638 err2
= tg3_restart_hw(tp
, 1);
15642 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15643 add_timer(&tp
->timer
);
15645 netif_device_attach(dev
);
15646 tg3_netif_start(tp
);
15649 tg3_full_unlock(tp
);
15658 static int tg3_resume(struct device
*device
)
15660 struct pci_dev
*pdev
= to_pci_dev(device
);
15661 struct net_device
*dev
= pci_get_drvdata(pdev
);
15662 struct tg3
*tp
= netdev_priv(dev
);
15665 if (!netif_running(dev
))
15668 netif_device_attach(dev
);
15670 tg3_full_lock(tp
, 0);
15672 tg3_flag_set(tp
, INIT_COMPLETE
);
15673 err
= tg3_restart_hw(tp
, 1);
15677 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15678 add_timer(&tp
->timer
);
15680 tg3_netif_start(tp
);
15683 tg3_full_unlock(tp
);
15691 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15692 #define TG3_PM_OPS (&tg3_pm_ops)
15696 #define TG3_PM_OPS NULL
15698 #endif /* CONFIG_PM_SLEEP */
15701 * tg3_io_error_detected - called when PCI error is detected
15702 * @pdev: Pointer to PCI device
15703 * @state: The current pci connection state
15705 * This function is called after a PCI bus error affecting
15706 * this device has been detected.
15708 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15709 pci_channel_state_t state
)
15711 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15712 struct tg3
*tp
= netdev_priv(netdev
);
15713 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15715 netdev_info(netdev
, "PCI I/O error detected\n");
15719 if (!netif_running(netdev
))
15724 tg3_netif_stop(tp
);
15726 del_timer_sync(&tp
->timer
);
15727 tg3_flag_clear(tp
, RESTART_TIMER
);
15729 /* Want to make sure that the reset task doesn't run */
15730 cancel_work_sync(&tp
->reset_task
);
15731 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15732 tg3_flag_clear(tp
, RESTART_TIMER
);
15734 netif_device_detach(netdev
);
15736 /* Clean up software state, even if MMIO is blocked */
15737 tg3_full_lock(tp
, 0);
15738 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15739 tg3_full_unlock(tp
);
15742 if (state
== pci_channel_io_perm_failure
)
15743 err
= PCI_ERS_RESULT_DISCONNECT
;
15745 pci_disable_device(pdev
);
15753 * tg3_io_slot_reset - called after the pci bus has been reset.
15754 * @pdev: Pointer to PCI device
15756 * Restart the card from scratch, as if from a cold-boot.
15757 * At this point, the card has exprienced a hard reset,
15758 * followed by fixups by BIOS, and has its config space
15759 * set up identically to what it was at cold boot.
15761 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15763 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15764 struct tg3
*tp
= netdev_priv(netdev
);
15765 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15770 if (pci_enable_device(pdev
)) {
15771 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15775 pci_set_master(pdev
);
15776 pci_restore_state(pdev
);
15777 pci_save_state(pdev
);
15779 if (!netif_running(netdev
)) {
15780 rc
= PCI_ERS_RESULT_RECOVERED
;
15784 err
= tg3_power_up(tp
);
15788 rc
= PCI_ERS_RESULT_RECOVERED
;
15797 * tg3_io_resume - called when traffic can start flowing again.
15798 * @pdev: Pointer to PCI device
15800 * This callback is called when the error recovery driver tells
15801 * us that its OK to resume normal operation.
15803 static void tg3_io_resume(struct pci_dev
*pdev
)
15805 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15806 struct tg3
*tp
= netdev_priv(netdev
);
15811 if (!netif_running(netdev
))
15814 tg3_full_lock(tp
, 0);
15815 tg3_flag_set(tp
, INIT_COMPLETE
);
15816 err
= tg3_restart_hw(tp
, 1);
15817 tg3_full_unlock(tp
);
15819 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15823 netif_device_attach(netdev
);
15825 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15826 add_timer(&tp
->timer
);
15828 tg3_netif_start(tp
);
15836 static struct pci_error_handlers tg3_err_handler
= {
15837 .error_detected
= tg3_io_error_detected
,
15838 .slot_reset
= tg3_io_slot_reset
,
15839 .resume
= tg3_io_resume
15842 static struct pci_driver tg3_driver
= {
15843 .name
= DRV_MODULE_NAME
,
15844 .id_table
= tg3_pci_tbl
,
15845 .probe
= tg3_init_one
,
15846 .remove
= __devexit_p(tg3_remove_one
),
15847 .err_handler
= &tg3_err_handler
,
15848 .driver
.pm
= TG3_PM_OPS
,
15851 static int __init
tg3_init(void)
15853 return pci_register_driver(&tg3_driver
);
15856 static void __exit
tg3_cleanup(void)
15858 pci_unregister_driver(&tg3_driver
);
15861 module_init(tg3_init
);
15862 module_exit(tg3_cleanup
);