2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
69 return test_bit(flag
, bits
);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
79 clear_bit(flag
, bits
);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version
[] __devinitdata
=
200 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION
);
206 MODULE_FIRMWARE(FIRMWARE_TG3
);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
210 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug
, int, 0);
212 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
298 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
300 static const struct {
301 const char string
[ETH_GSTRING_LEN
];
302 } ethtool_stats_keys
[] = {
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
335 { "tx_flow_control" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
367 { "mbuf_lwm_thresh_hit" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
385 static const struct {
386 const char string
[ETH_GSTRING_LEN
];
387 } ethtool_test_keys
[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
399 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
401 writel(val
, tp
->regs
+ off
);
404 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
406 return readl(tp
->regs
+ off
);
409 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
411 writel(val
, tp
->aperegs
+ off
);
414 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
416 return readl(tp
->aperegs
+ off
);
419 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
423 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
424 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
425 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
426 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
429 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
431 writel(val
, tp
->regs
+ off
);
432 readl(tp
->regs
+ off
);
435 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
440 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
441 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
442 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
443 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
447 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
451 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
452 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
453 TG3_64BIT_REG_LOW
, val
);
456 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
457 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
458 TG3_64BIT_REG_LOW
, val
);
462 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
463 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
464 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
465 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
470 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
472 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
473 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
477 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
482 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
483 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
484 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
485 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
496 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
497 /* Non-posted methods */
498 tp
->write32(tp
, off
, val
);
501 tg3_write32(tp
, off
, val
);
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
513 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
515 tp
->write32_mbox(tp
, off
, val
);
516 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
517 tp
->read32_mbox(tp
, off
);
520 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
522 void __iomem
*mbox
= tp
->regs
+ off
;
524 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
526 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
530 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
532 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
535 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
537 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
551 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
555 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
556 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
559 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
560 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
561 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
562 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
568 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
573 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
576 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
580 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
581 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
586 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
587 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
588 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
589 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
595 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
600 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
603 static void tg3_ape_lock_init(struct tg3
*tp
)
608 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
609 regbase
= TG3_APE_LOCK_GRANT
;
611 regbase
= TG3_APE_PER_LOCK_GRANT
;
613 /* Make sure the driver hasn't any stale locks. */
614 for (i
= 0; i
< 8; i
++)
615 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
618 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
622 u32 status
, req
, gnt
;
624 if (!tg3_flag(tp
, ENABLE_APE
))
628 case TG3_APE_LOCK_GRC
:
629 case TG3_APE_LOCK_MEM
:
635 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
636 req
= TG3_APE_LOCK_REQ
;
637 gnt
= TG3_APE_LOCK_GRANT
;
639 req
= TG3_APE_PER_LOCK_REQ
;
640 gnt
= TG3_APE_PER_LOCK_GRANT
;
645 tg3_ape_write32(tp
, req
+ off
, APE_LOCK_REQ_DRIVER
);
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i
= 0; i
< 100; i
++) {
649 status
= tg3_ape_read32(tp
, gnt
+ off
);
650 if (status
== APE_LOCK_GRANT_DRIVER
)
655 if (status
!= APE_LOCK_GRANT_DRIVER
) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp
, gnt
+ off
,
658 APE_LOCK_GRANT_DRIVER
);
666 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
670 if (!tg3_flag(tp
, ENABLE_APE
))
674 case TG3_APE_LOCK_GRC
:
675 case TG3_APE_LOCK_MEM
:
681 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
682 gnt
= TG3_APE_LOCK_GRANT
;
684 gnt
= TG3_APE_PER_LOCK_GRANT
;
686 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, APE_LOCK_GRANT_DRIVER
);
689 static void tg3_disable_ints(struct tg3
*tp
)
693 tw32(TG3PCI_MISC_HOST_CTRL
,
694 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
695 for (i
= 0; i
< tp
->irq_max
; i
++)
696 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
699 static void tg3_enable_ints(struct tg3
*tp
)
706 tw32(TG3PCI_MISC_HOST_CTRL
,
707 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
709 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
710 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
711 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
713 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
714 if (tg3_flag(tp
, 1SHOT_MSI
))
715 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
717 tp
->coal_now
|= tnapi
->coal_now
;
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
722 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
723 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
725 tw32(HOSTCC_MODE
, tp
->coal_now
);
727 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
730 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
732 struct tg3
*tp
= tnapi
->tp
;
733 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
734 unsigned int work_exists
= 0;
736 /* check for phy events */
737 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
738 if (sblk
->status
& SD_STATUS_LINK_CHG
)
741 /* check for RX/TX work to do */
742 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
743 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
754 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
756 struct tg3
*tp
= tnapi
->tp
;
758 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
765 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
766 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
767 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
770 static void tg3_switch_clocks(struct tg3
*tp
)
775 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
778 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
780 orig_clock_ctrl
= clock_ctrl
;
781 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
782 CLOCK_CTRL_CLKRUN_OENABLE
|
784 tp
->pci_clock_ctrl
= clock_ctrl
;
786 if (tg3_flag(tp
, 5705_PLUS
)) {
787 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
789 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
791 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
794 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
796 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
797 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
800 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
803 #define PHY_BUSY_LOOPS 5000
805 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
811 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
813 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
819 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
820 MI_COM_PHY_ADDR_MASK
);
821 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
822 MI_COM_REG_ADDR_MASK
);
823 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
825 tw32_f(MAC_MI_COM
, frame_val
);
827 loops
= PHY_BUSY_LOOPS
;
830 frame_val
= tr32(MAC_MI_COM
);
832 if ((frame_val
& MI_COM_BUSY
) == 0) {
834 frame_val
= tr32(MAC_MI_COM
);
842 *val
= frame_val
& MI_COM_DATA_MASK
;
846 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
847 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
854 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
860 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
861 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
864 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
866 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
870 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
871 MI_COM_PHY_ADDR_MASK
);
872 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
873 MI_COM_REG_ADDR_MASK
);
874 frame_val
|= (val
& MI_COM_DATA_MASK
);
875 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
877 tw32_f(MAC_MI_COM
, frame_val
);
879 loops
= PHY_BUSY_LOOPS
;
882 frame_val
= tr32(MAC_MI_COM
);
883 if ((frame_val
& MI_COM_BUSY
) == 0) {
885 frame_val
= tr32(MAC_MI_COM
);
895 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
896 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
903 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
907 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
911 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
915 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
916 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
920 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
926 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
930 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
934 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
938 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
939 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
943 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
949 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
953 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
955 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
960 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
964 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
966 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
971 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
975 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
976 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC
);
979 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
984 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
986 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
987 set
|= MII_TG3_AUXCTL_MISC_WREN
;
989 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1001 static int tg3_bmcr_reset(struct tg3
*tp
)
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1009 phy_control
= BMCR_RESET
;
1010 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1016 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1020 if ((phy_control
& BMCR_RESET
) == 0) {
1032 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1034 struct tg3
*tp
= bp
->priv
;
1037 spin_lock_bh(&tp
->lock
);
1039 if (tg3_readphy(tp
, reg
, &val
))
1042 spin_unlock_bh(&tp
->lock
);
1047 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1049 struct tg3
*tp
= bp
->priv
;
1052 spin_lock_bh(&tp
->lock
);
1054 if (tg3_writephy(tp
, reg
, val
))
1057 spin_unlock_bh(&tp
->lock
);
1062 static int tg3_mdio_reset(struct mii_bus
*bp
)
1067 static void tg3_mdio_config_5785(struct tg3
*tp
)
1070 struct phy_device
*phydev
;
1072 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1073 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1074 case PHY_ID_BCM50610
:
1075 case PHY_ID_BCM50610M
:
1076 val
= MAC_PHYCFG2_50610_LED_MODES
;
1078 case PHY_ID_BCMAC131
:
1079 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1081 case PHY_ID_RTL8211C
:
1082 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1084 case PHY_ID_RTL8201E
:
1085 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1091 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1092 tw32(MAC_PHYCFG2
, val
);
1094 val
= tr32(MAC_PHYCFG1
);
1095 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1096 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1097 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1098 tw32(MAC_PHYCFG1
, val
);
1103 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1104 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1105 MAC_PHYCFG2_FMODE_MASK_MASK
|
1106 MAC_PHYCFG2_GMODE_MASK_MASK
|
1107 MAC_PHYCFG2_ACT_MASK_MASK
|
1108 MAC_PHYCFG2_QUAL_MASK_MASK
|
1109 MAC_PHYCFG2_INBAND_ENABLE
;
1111 tw32(MAC_PHYCFG2
, val
);
1113 val
= tr32(MAC_PHYCFG1
);
1114 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1116 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1117 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1118 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1119 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1120 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1122 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1123 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1124 tw32(MAC_PHYCFG1
, val
);
1126 val
= tr32(MAC_EXT_RGMII_MODE
);
1127 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1128 MAC_RGMII_MODE_RX_QUALITY
|
1129 MAC_RGMII_MODE_RX_ACTIVITY
|
1130 MAC_RGMII_MODE_RX_ENG_DET
|
1131 MAC_RGMII_MODE_TX_ENABLE
|
1132 MAC_RGMII_MODE_TX_LOWPWR
|
1133 MAC_RGMII_MODE_TX_RESET
);
1134 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1135 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1136 val
|= MAC_RGMII_MODE_RX_INT_B
|
1137 MAC_RGMII_MODE_RX_QUALITY
|
1138 MAC_RGMII_MODE_RX_ACTIVITY
|
1139 MAC_RGMII_MODE_RX_ENG_DET
;
1140 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1141 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1142 MAC_RGMII_MODE_TX_LOWPWR
|
1143 MAC_RGMII_MODE_TX_RESET
;
1145 tw32(MAC_EXT_RGMII_MODE
, val
);
1148 static void tg3_mdio_start(struct tg3
*tp
)
1150 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1151 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1154 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1155 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1156 tg3_mdio_config_5785(tp
);
1159 static int tg3_mdio_init(struct tg3
*tp
)
1163 struct phy_device
*phydev
;
1165 if (tg3_flag(tp
, 5717_PLUS
)) {
1168 tp
->phy_addr
= PCI_FUNC(tp
->pdev
->devfn
) + 1;
1170 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1171 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1173 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1178 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1182 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1185 tp
->mdio_bus
= mdiobus_alloc();
1186 if (tp
->mdio_bus
== NULL
)
1189 tp
->mdio_bus
->name
= "tg3 mdio bus";
1190 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1191 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1192 tp
->mdio_bus
->priv
= tp
;
1193 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1194 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1195 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1196 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1197 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1198 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1200 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1201 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1208 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1211 i
= mdiobus_register(tp
->mdio_bus
);
1213 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1214 mdiobus_free(tp
->mdio_bus
);
1218 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1220 if (!phydev
|| !phydev
->drv
) {
1221 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1222 mdiobus_unregister(tp
->mdio_bus
);
1223 mdiobus_free(tp
->mdio_bus
);
1227 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1228 case PHY_ID_BCM57780
:
1229 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1230 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1232 case PHY_ID_BCM50610
:
1233 case PHY_ID_BCM50610M
:
1234 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1235 PHY_BRCM_RX_REFCLK_UNUSED
|
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1238 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1239 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1240 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1241 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1242 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1243 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1245 case PHY_ID_RTL8211C
:
1246 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1248 case PHY_ID_RTL8201E
:
1249 case PHY_ID_BCMAC131
:
1250 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1251 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1252 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1256 tg3_flag_set(tp
, MDIOBUS_INITED
);
1258 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1259 tg3_mdio_config_5785(tp
);
1264 static void tg3_mdio_fini(struct tg3
*tp
)
1266 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1267 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1268 mdiobus_unregister(tp
->mdio_bus
);
1269 mdiobus_free(tp
->mdio_bus
);
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1278 val
= tr32(GRC_RX_CPU_EVENT
);
1279 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1280 tw32_f(GRC_RX_CPU_EVENT
, val
);
1282 tp
->last_event_jiffies
= jiffies
;
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1291 unsigned int delay_cnt
;
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1298 if (time_remain
< 0)
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt
= jiffies_to_usecs(time_remain
);
1303 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1304 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1305 delay_cnt
= (delay_cnt
>> 3) + 1;
1307 for (i
= 0; i
< delay_cnt
; i
++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3
*tp
)
1320 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1323 tg3_wait_for_event_ack(tp
);
1325 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1327 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1330 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1332 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1333 val
|= (reg
& 0xffff);
1334 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1337 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1339 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1340 val
|= (reg
& 0xffff);
1341 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1344 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1345 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1347 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1348 val
|= (reg
& 0xffff);
1350 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1352 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1356 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1358 tg3_generate_fw_event(tp
);
1361 static void tg3_link_report(struct tg3
*tp
)
1363 if (!netif_carrier_ok(tp
->dev
)) {
1364 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1365 tg3_ump_link_report(tp
);
1366 } else if (netif_msg_link(tp
)) {
1367 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1368 (tp
->link_config
.active_speed
== SPEED_1000
?
1370 (tp
->link_config
.active_speed
== SPEED_100
?
1372 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1375 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1376 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1378 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1381 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1382 netdev_info(tp
->dev
, "EEE is %s\n",
1383 tp
->setlpicnt
? "enabled" : "disabled");
1385 tg3_ump_link_report(tp
);
1389 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1393 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1394 miireg
= ADVERTISE_PAUSE_CAP
;
1395 else if (flow_ctrl
& FLOW_CTRL_TX
)
1396 miireg
= ADVERTISE_PAUSE_ASYM
;
1397 else if (flow_ctrl
& FLOW_CTRL_RX
)
1398 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1405 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1409 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1410 miireg
= ADVERTISE_1000XPAUSE
;
1411 else if (flow_ctrl
& FLOW_CTRL_TX
)
1412 miireg
= ADVERTISE_1000XPSE_ASYM
;
1413 else if (flow_ctrl
& FLOW_CTRL_RX
)
1414 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1421 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1425 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1426 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1427 if (rmtadv
& LPA_1000XPAUSE
)
1428 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1429 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1432 if (rmtadv
& LPA_1000XPAUSE
)
1433 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1435 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1436 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1443 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1447 u32 old_rx_mode
= tp
->rx_mode
;
1448 u32 old_tx_mode
= tp
->tx_mode
;
1450 if (tg3_flag(tp
, USE_PHYLIB
))
1451 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1453 autoneg
= tp
->link_config
.autoneg
;
1455 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1456 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1457 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1459 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1461 flowctrl
= tp
->link_config
.flowctrl
;
1463 tp
->link_config
.active_flowctrl
= flowctrl
;
1465 if (flowctrl
& FLOW_CTRL_RX
)
1466 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1468 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1470 if (old_rx_mode
!= tp
->rx_mode
)
1471 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1473 if (flowctrl
& FLOW_CTRL_TX
)
1474 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1476 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1478 if (old_tx_mode
!= tp
->tx_mode
)
1479 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1482 static void tg3_adjust_link(struct net_device
*dev
)
1484 u8 oldflowctrl
, linkmesg
= 0;
1485 u32 mac_mode
, lcl_adv
, rmt_adv
;
1486 struct tg3
*tp
= netdev_priv(dev
);
1487 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1489 spin_lock_bh(&tp
->lock
);
1491 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1492 MAC_MODE_HALF_DUPLEX
);
1494 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1500 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1501 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1502 else if (phydev
->speed
== SPEED_1000
||
1503 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1504 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1506 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1508 if (phydev
->duplex
== DUPLEX_HALF
)
1509 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1511 lcl_adv
= tg3_advert_flowctrl_1000T(
1512 tp
->link_config
.flowctrl
);
1515 rmt_adv
= LPA_PAUSE_CAP
;
1516 if (phydev
->asym_pause
)
1517 rmt_adv
|= LPA_PAUSE_ASYM
;
1520 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1522 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1524 if (mac_mode
!= tp
->mac_mode
) {
1525 tp
->mac_mode
= mac_mode
;
1526 tw32_f(MAC_MODE
, tp
->mac_mode
);
1530 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1531 if (phydev
->speed
== SPEED_10
)
1533 MAC_MI_STAT_10MBPS_MODE
|
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1536 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1539 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1540 tw32(MAC_TX_LENGTHS
,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1542 (6 << TX_LENGTHS_IPG_SHIFT
) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1545 tw32(MAC_TX_LENGTHS
,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1547 (6 << TX_LENGTHS_IPG_SHIFT
) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1550 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1551 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1552 phydev
->speed
!= tp
->link_config
.active_speed
||
1553 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1554 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1557 tp
->link_config
.active_speed
= phydev
->speed
;
1558 tp
->link_config
.active_duplex
= phydev
->duplex
;
1560 spin_unlock_bh(&tp
->lock
);
1563 tg3_link_report(tp
);
1566 static int tg3_phy_init(struct tg3
*tp
)
1568 struct phy_device
*phydev
;
1570 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1573 /* Bring the PHY back to a known state. */
1576 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1578 /* Attach the MAC to the PHY. */
1579 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1580 phydev
->dev_flags
, phydev
->interface
);
1581 if (IS_ERR(phydev
)) {
1582 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev
);
1586 /* Mask with MAC supported features. */
1587 switch (phydev
->interface
) {
1588 case PHY_INTERFACE_MODE_GMII
:
1589 case PHY_INTERFACE_MODE_RGMII
:
1590 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1591 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1593 SUPPORTED_Asym_Pause
);
1597 case PHY_INTERFACE_MODE_MII
:
1598 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1600 SUPPORTED_Asym_Pause
);
1603 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1607 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1609 phydev
->advertising
= phydev
->supported
;
1614 static void tg3_phy_start(struct tg3
*tp
)
1616 struct phy_device
*phydev
;
1618 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1621 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1623 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1624 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1625 phydev
->speed
= tp
->link_config
.orig_speed
;
1626 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1627 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1628 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1633 phy_start_aneg(phydev
);
1636 static void tg3_phy_stop(struct tg3
*tp
)
1638 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1641 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1644 static void tg3_phy_fini(struct tg3
*tp
)
1646 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1647 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1648 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1652 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1656 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1659 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1660 phytest
| MII_TG3_FET_SHADOW_EN
);
1661 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1663 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1665 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1666 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1668 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1672 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1676 if (!tg3_flag(tp
, 5705_PLUS
) ||
1677 (tg3_flag(tp
, 5717_PLUS
) &&
1678 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1681 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1682 tg3_phy_fet_toggle_apd(tp
, enable
);
1686 reg
= MII_TG3_MISC_SHDW_WREN
|
1687 MII_TG3_MISC_SHDW_SCR5_SEL
|
1688 MII_TG3_MISC_SHDW_SCR5_LPED
|
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1690 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1691 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1692 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1693 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1695 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1698 reg
= MII_TG3_MISC_SHDW_WREN
|
1699 MII_TG3_MISC_SHDW_APD_SEL
|
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1702 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1704 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1707 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1711 if (!tg3_flag(tp
, 5705_PLUS
) ||
1712 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1715 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1718 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1719 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1721 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1722 ephy
| MII_TG3_FET_SHADOW_EN
);
1723 if (!tg3_readphy(tp
, reg
, &phy
)) {
1725 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1727 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1728 tg3_writephy(tp
, reg
, phy
);
1730 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1735 ret
= tg3_phy_auxctl_read(tp
,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
1739 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1741 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1742 tg3_phy_auxctl_write(tp
,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
1748 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1753 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1756 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
1758 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
1759 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
1762 static void tg3_phy_apply_otp(struct tg3
*tp
)
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
1774 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1775 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1776 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1778 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1779 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1780 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1782 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1783 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1784 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1786 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1787 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1789 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1790 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1792 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1793 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1794 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1799 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1803 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1808 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1809 current_link_up
== 1 &&
1810 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1811 (tp
->link_config
.active_speed
== SPEED_100
||
1812 tp
->link_config
.active_speed
== SPEED_1000
)) {
1815 if (tp
->link_config
.active_speed
== SPEED_1000
)
1816 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1818 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1820 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1822 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1823 TG3_CL45_D7_EEERES_STAT
, &val
);
1826 case TG3_CL45_D7_EEERES_STAT_LP_1000T
:
1827 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
1830 case ASIC_REV_57765
:
1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1832 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
,
1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX
:
1843 if (!tp
->setlpicnt
) {
1844 val
= tr32(TG3_CPMU_EEE_MODE
);
1845 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1849 static int tg3_wait_macro_done(struct tg3
*tp
)
1856 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1857 if ((tmp32
& 0x1000) == 0)
1867 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1869 static const u32 test_pat
[4][6] = {
1870 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1877 for (chan
= 0; chan
< 4; chan
++) {
1880 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1881 (chan
* 0x2000) | 0x0200);
1882 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1884 for (i
= 0; i
< 6; i
++)
1885 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1888 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1889 if (tg3_wait_macro_done(tp
)) {
1894 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1895 (chan
* 0x2000) | 0x0200);
1896 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1897 if (tg3_wait_macro_done(tp
)) {
1902 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1903 if (tg3_wait_macro_done(tp
)) {
1908 for (i
= 0; i
< 6; i
+= 2) {
1911 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1912 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1913 tg3_wait_macro_done(tp
)) {
1919 if (low
!= test_pat
[chan
][i
] ||
1920 high
!= test_pat
[chan
][i
+1]) {
1921 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1922 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1923 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1933 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1937 for (chan
= 0; chan
< 4; chan
++) {
1940 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1941 (chan
* 0x2000) | 0x0200);
1942 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1943 for (i
= 0; i
< 6; i
++)
1944 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1945 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1946 if (tg3_wait_macro_done(tp
))
1953 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1955 u32 reg32
, phy9_orig
;
1956 int retries
, do_phy_reset
, err
;
1962 err
= tg3_bmcr_reset(tp
);
1968 /* Disable transmitter and interrupt. */
1969 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
1973 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1975 /* Set full-duplex, 1000 mbps. */
1976 tg3_writephy(tp
, MII_BMCR
,
1977 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
1979 /* Set to master mode. */
1980 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
1983 tg3_writephy(tp
, MII_TG3_CTRL
,
1984 (MII_TG3_CTRL_AS_MASTER
|
1985 MII_TG3_CTRL_ENABLE_AS_MASTER
));
1987 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
1991 /* Block the PHY control access. */
1992 tg3_phydsp_write(tp
, 0x8005, 0x0800);
1994 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
1997 } while (--retries
);
1999 err
= tg3_phy_reset_chanpat(tp
);
2003 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2005 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2006 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2010 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
2012 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2014 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2021 /* This will reset the tigon3 PHY if there is no valid
2022 * link unless the FORCE argument is non-zero.
2024 static int tg3_phy_reset(struct tg3
*tp
)
2029 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2030 val
= tr32(GRC_MISC_CFG
);
2031 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2034 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2035 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2039 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2040 netif_carrier_off(tp
->dev
);
2041 tg3_link_report(tp
);
2044 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2045 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2046 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2047 err
= tg3_phy_reset_5703_4_5(tp
);
2054 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2055 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2056 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2057 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2059 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2062 err
= tg3_bmcr_reset(tp
);
2066 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2067 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2068 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2070 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2073 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2074 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2075 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2076 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2077 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2078 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2080 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2084 if (tg3_flag(tp
, 5717_PLUS
) &&
2085 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2088 tg3_phy_apply_otp(tp
);
2090 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2091 tg3_phy_toggle_apd(tp
, true);
2093 tg3_phy_toggle_apd(tp
, false);
2096 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2098 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2099 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2103 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2104 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2105 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2108 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2110 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2111 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2112 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2115 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2117 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2118 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2119 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2120 tg3_writephy(tp
, MII_TG3_TEST1
,
2121 MII_TG3_TEST1_TRIM_EN
| 0x4);
2123 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2129 /* Set Extended packet length bit (bit 14) on all chips that */
2130 /* support jumbo frames */
2131 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2132 /* Cannot do read-modify-write on 5401 */
2133 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2134 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2135 /* Set bit 14 with read-modify-write to preserve other bits */
2136 err
= tg3_phy_auxctl_read(tp
,
2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2139 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2140 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144 * jumbo frames transmission.
2146 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2147 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2148 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2149 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2152 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2153 /* adjust output voltage */
2154 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2157 tg3_phy_toggle_automdix(tp
, 1);
2158 tg3_phy_set_wirespeed(tp
);
2162 static void tg3_frob_aux_power(struct tg3
*tp
)
2164 bool need_vaux
= false;
2166 /* The GPIOs do something completely different on 57765. */
2167 if (!tg3_flag(tp
, IS_NIC
) ||
2168 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2169 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2172 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2173 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
2174 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2175 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) &&
2176 tp
->pdev_peer
!= tp
->pdev
) {
2177 struct net_device
*dev_peer
;
2179 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2181 /* remove_one() may have been run on the peer. */
2183 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2185 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2188 if (tg3_flag(tp_peer
, WOL_ENABLE
) ||
2189 tg3_flag(tp_peer
, ENABLE_ASF
))
2194 if (tg3_flag(tp
, WOL_ENABLE
) || tg3_flag(tp
, ENABLE_ASF
))
2198 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2199 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2200 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2201 (GRC_LCLCTRL_GPIO_OE0
|
2202 GRC_LCLCTRL_GPIO_OE1
|
2203 GRC_LCLCTRL_GPIO_OE2
|
2204 GRC_LCLCTRL_GPIO_OUTPUT0
|
2205 GRC_LCLCTRL_GPIO_OUTPUT1
),
2207 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2208 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2209 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2211 GRC_LCLCTRL_GPIO_OE1
|
2212 GRC_LCLCTRL_GPIO_OE2
|
2213 GRC_LCLCTRL_GPIO_OUTPUT0
|
2214 GRC_LCLCTRL_GPIO_OUTPUT1
|
2216 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2218 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2219 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2221 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2222 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2225 u32 grc_local_ctrl
= 0;
2227 /* Workaround to prevent overdrawing Amps. */
2228 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2230 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2231 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2232 grc_local_ctrl
, 100);
2235 /* On 5753 and variants, GPIO2 cannot be used. */
2236 no_gpio2
= tp
->nic_sram_data_cfg
&
2237 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2239 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2240 GRC_LCLCTRL_GPIO_OE1
|
2241 GRC_LCLCTRL_GPIO_OE2
|
2242 GRC_LCLCTRL_GPIO_OUTPUT1
|
2243 GRC_LCLCTRL_GPIO_OUTPUT2
;
2245 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2246 GRC_LCLCTRL_GPIO_OUTPUT2
);
2248 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2249 grc_local_ctrl
, 100);
2251 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2253 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2254 grc_local_ctrl
, 100);
2257 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2258 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2259 grc_local_ctrl
, 100);
2263 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
2264 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
2265 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2266 (GRC_LCLCTRL_GPIO_OE1
|
2267 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2269 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2270 GRC_LCLCTRL_GPIO_OE1
, 100);
2272 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2273 (GRC_LCLCTRL_GPIO_OE1
|
2274 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2279 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2281 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2283 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2284 if (speed
!= SPEED_10
)
2286 } else if (speed
== SPEED_10
)
2292 static int tg3_setup_phy(struct tg3
*, int);
2294 #define RESET_KIND_SHUTDOWN 0
2295 #define RESET_KIND_INIT 1
2296 #define RESET_KIND_SUSPEND 2
2298 static void tg3_write_sig_post_reset(struct tg3
*, int);
2299 static int tg3_halt_cpu(struct tg3
*, u32
);
2301 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2305 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2306 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2307 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2308 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2311 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2312 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2313 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2318 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2320 val
= tr32(GRC_MISC_CFG
);
2321 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2324 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2326 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2329 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2330 tg3_writephy(tp
, MII_BMCR
,
2331 BMCR_ANENABLE
| BMCR_ANRESTART
);
2333 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2334 phytest
| MII_TG3_FET_SHADOW_EN
);
2335 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2336 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2338 MII_TG3_FET_SHDW_AUXMODE4
,
2341 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2344 } else if (do_low_power
) {
2345 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2348 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2350 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2351 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2354 /* The PHY should not be powered down on some chips because
2357 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2358 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2359 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2360 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2363 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2364 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2365 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2366 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2367 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2368 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2371 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3
*tp
)
2377 if (tg3_flag(tp
, NVRAM
)) {
2380 if (tp
->nvram_lock_cnt
== 0) {
2381 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2382 for (i
= 0; i
< 8000; i
++) {
2383 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2388 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2392 tp
->nvram_lock_cnt
++;
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3
*tp
)
2400 if (tg3_flag(tp
, NVRAM
)) {
2401 if (tp
->nvram_lock_cnt
> 0)
2402 tp
->nvram_lock_cnt
--;
2403 if (tp
->nvram_lock_cnt
== 0)
2404 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3
*tp
)
2411 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2412 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2414 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3
*tp
)
2421 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2422 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2424 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2428 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2429 u32 offset
, u32
*val
)
2434 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2437 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2438 EEPROM_ADDR_DEVID_MASK
|
2440 tw32(GRC_EEPROM_ADDR
,
2442 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2443 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2444 EEPROM_ADDR_ADDR_MASK
) |
2445 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2447 for (i
= 0; i
< 1000; i
++) {
2448 tmp
= tr32(GRC_EEPROM_ADDR
);
2450 if (tmp
& EEPROM_ADDR_COMPLETE
)
2454 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2457 tmp
= tr32(GRC_EEPROM_DATA
);
2460 * The data will always be opposite the native endian
2461 * format. Perform a blind byteswap to compensate.
2468 #define NVRAM_CMD_TIMEOUT 10000
2470 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2474 tw32(NVRAM_CMD
, nvram_cmd
);
2475 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2477 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2483 if (i
== NVRAM_CMD_TIMEOUT
)
2489 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2491 if (tg3_flag(tp
, NVRAM
) &&
2492 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2493 tg3_flag(tp
, FLASH
) &&
2494 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2495 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2497 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2498 ATMEL_AT45DB0X1B_PAGE_POS
) +
2499 (addr
% tp
->nvram_pagesize
);
2504 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2506 if (tg3_flag(tp
, NVRAM
) &&
2507 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2508 tg3_flag(tp
, FLASH
) &&
2509 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2510 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2512 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2513 tp
->nvram_pagesize
) +
2514 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520 * the byteswapping settings for all other register accesses.
2521 * tg3 devices are BE devices, so on a BE machine, the data
2522 * returned will be exactly as it is seen in NVRAM. On a LE
2523 * machine, the 32-bit value will be byteswapped.
2525 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2529 if (!tg3_flag(tp
, NVRAM
))
2530 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2532 offset
= tg3_nvram_phys_addr(tp
, offset
);
2534 if (offset
> NVRAM_ADDR_MSK
)
2537 ret
= tg3_nvram_lock(tp
);
2541 tg3_enable_nvram_access(tp
);
2543 tw32(NVRAM_ADDR
, offset
);
2544 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2545 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2548 *val
= tr32(NVRAM_RDDATA
);
2550 tg3_disable_nvram_access(tp
);
2552 tg3_nvram_unlock(tp
);
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2561 int res
= tg3_nvram_read(tp
, offset
, &v
);
2563 *val
= cpu_to_be32(v
);
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2570 u32 addr_high
, addr_low
;
2573 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2574 tp
->dev
->dev_addr
[1]);
2575 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2576 (tp
->dev
->dev_addr
[3] << 16) |
2577 (tp
->dev
->dev_addr
[4] << 8) |
2578 (tp
->dev
->dev_addr
[5] << 0));
2579 for (i
= 0; i
< 4; i
++) {
2580 if (i
== 1 && skip_mac_1
)
2582 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2583 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2586 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2587 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2588 for (i
= 0; i
< 12; i
++) {
2589 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2590 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2594 addr_high
= (tp
->dev
->dev_addr
[0] +
2595 tp
->dev
->dev_addr
[1] +
2596 tp
->dev
->dev_addr
[2] +
2597 tp
->dev
->dev_addr
[3] +
2598 tp
->dev
->dev_addr
[4] +
2599 tp
->dev
->dev_addr
[5]) &
2600 TX_BACKOFF_SEED_MASK
;
2601 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2604 static void tg3_enable_register_access(struct tg3
*tp
)
2607 * Make sure register accesses (indirect or otherwise) will function
2610 pci_write_config_dword(tp
->pdev
,
2611 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2614 static int tg3_power_up(struct tg3
*tp
)
2616 tg3_enable_register_access(tp
);
2618 pci_set_power_state(tp
->pdev
, PCI_D0
);
2620 /* Switch out of Vaux if it is a NIC */
2621 if (tg3_flag(tp
, IS_NIC
))
2622 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
2627 static int tg3_power_down_prepare(struct tg3
*tp
)
2630 bool device_should_wake
, do_low_power
;
2632 tg3_enable_register_access(tp
);
2634 /* Restore the CLKREQ setting. */
2635 if (tg3_flag(tp
, CLKREQ_BUG
)) {
2638 pci_read_config_word(tp
->pdev
,
2639 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2641 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2642 pci_write_config_word(tp
->pdev
,
2643 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2647 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2648 tw32(TG3PCI_MISC_HOST_CTRL
,
2649 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2651 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2652 tg3_flag(tp
, WOL_ENABLE
);
2654 if (tg3_flag(tp
, USE_PHYLIB
)) {
2655 do_low_power
= false;
2656 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2657 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2658 struct phy_device
*phydev
;
2659 u32 phyid
, advertising
;
2661 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2663 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2665 tp
->link_config
.orig_speed
= phydev
->speed
;
2666 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2667 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2668 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2670 advertising
= ADVERTISED_TP
|
2672 ADVERTISED_Autoneg
|
2673 ADVERTISED_10baseT_Half
;
2675 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
2676 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2678 ADVERTISED_100baseT_Half
|
2679 ADVERTISED_100baseT_Full
|
2680 ADVERTISED_10baseT_Full
;
2682 advertising
|= ADVERTISED_10baseT_Full
;
2685 phydev
->advertising
= advertising
;
2687 phy_start_aneg(phydev
);
2689 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2690 if (phyid
!= PHY_ID_BCMAC131
) {
2691 phyid
&= PHY_BCM_OUI_MASK
;
2692 if (phyid
== PHY_BCM_OUI_1
||
2693 phyid
== PHY_BCM_OUI_2
||
2694 phyid
== PHY_BCM_OUI_3
)
2695 do_low_power
= true;
2699 do_low_power
= true;
2701 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2702 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2703 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2704 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2705 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2708 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2709 tp
->link_config
.speed
= SPEED_10
;
2710 tp
->link_config
.duplex
= DUPLEX_HALF
;
2711 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2712 tg3_setup_phy(tp
, 0);
2716 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2719 val
= tr32(GRC_VCPU_EXT_CTRL
);
2720 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2721 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
2725 for (i
= 0; i
< 200; i
++) {
2726 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2727 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2732 if (tg3_flag(tp
, WOL_CAP
))
2733 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2734 WOL_DRV_STATE_SHUTDOWN
|
2738 if (device_should_wake
) {
2741 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2743 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
2744 tg3_phy_auxctl_write(tp
,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN
|
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
2752 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2753 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2755 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2757 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2758 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2760 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
2761 SPEED_100
: SPEED_10
;
2762 if (tg3_5700_link_polarity(tp
, speed
))
2763 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2765 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2768 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2771 if (!tg3_flag(tp
, 5750_PLUS
))
2772 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2774 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2775 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
2776 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
2777 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2779 if (tg3_flag(tp
, ENABLE_APE
))
2780 mac_mode
|= MAC_MODE_APE_TX_EN
|
2781 MAC_MODE_APE_RX_EN
|
2782 MAC_MODE_TDE_ENABLE
;
2784 tw32_f(MAC_MODE
, mac_mode
);
2787 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2791 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
2792 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2793 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2796 base_val
= tp
->pci_clock_ctrl
;
2797 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2798 CLOCK_CTRL_TXCLK_DISABLE
);
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2801 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2802 } else if (tg3_flag(tp
, 5780_CLASS
) ||
2803 tg3_flag(tp
, CPMU_PRESENT
) ||
2804 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)) {
2806 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
2807 u32 newbits1
, newbits2
;
2809 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2810 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2811 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2812 CLOCK_CTRL_TXCLK_DISABLE
|
2814 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2815 } else if (tg3_flag(tp
, 5705_PLUS
)) {
2816 newbits1
= CLOCK_CTRL_625_CORE
;
2817 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2819 newbits1
= CLOCK_CTRL_ALTCLK
;
2820 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2823 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2826 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2829 if (!tg3_flag(tp
, 5705_PLUS
)) {
2832 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2833 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2834 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2835 CLOCK_CTRL_TXCLK_DISABLE
|
2836 CLOCK_CTRL_44MHZ_CORE
);
2838 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
2841 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
2842 tp
->pci_clock_ctrl
| newbits3
, 40);
2846 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
2847 tg3_power_down_phy(tp
, do_low_power
);
2849 tg3_frob_aux_power(tp
);
2851 /* Workaround for unstable PLL clock */
2852 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
2853 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
2854 u32 val
= tr32(0x7d00);
2856 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2858 if (!tg3_flag(tp
, ENABLE_ASF
)) {
2861 err
= tg3_nvram_lock(tp
);
2862 tg3_halt_cpu(tp
, RX_CPU_BASE
);
2864 tg3_nvram_unlock(tp
);
2868 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
2873 static void tg3_power_down(struct tg3
*tp
)
2875 tg3_power_down_prepare(tp
);
2877 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
2878 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
2883 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
2884 case MII_TG3_AUX_STAT_10HALF
:
2886 *duplex
= DUPLEX_HALF
;
2889 case MII_TG3_AUX_STAT_10FULL
:
2891 *duplex
= DUPLEX_FULL
;
2894 case MII_TG3_AUX_STAT_100HALF
:
2896 *duplex
= DUPLEX_HALF
;
2899 case MII_TG3_AUX_STAT_100FULL
:
2901 *duplex
= DUPLEX_FULL
;
2904 case MII_TG3_AUX_STAT_1000HALF
:
2905 *speed
= SPEED_1000
;
2906 *duplex
= DUPLEX_HALF
;
2909 case MII_TG3_AUX_STAT_1000FULL
:
2910 *speed
= SPEED_1000
;
2911 *duplex
= DUPLEX_FULL
;
2915 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2916 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
2918 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
2922 *speed
= SPEED_INVALID
;
2923 *duplex
= DUPLEX_INVALID
;
2928 static void tg3_phy_copper_begin(struct tg3
*tp
)
2933 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2934 /* Entering low power mode. Disable gigabit and
2935 * 100baseT advertisements.
2937 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
2939 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
2940 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
2941 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2942 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
2944 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2945 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
2946 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
2947 tp
->link_config
.advertising
&=
2948 ~(ADVERTISED_1000baseT_Half
|
2949 ADVERTISED_1000baseT_Full
);
2951 new_adv
= ADVERTISE_CSMA
;
2952 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
2953 new_adv
|= ADVERTISE_10HALF
;
2954 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
2955 new_adv
|= ADVERTISE_10FULL
;
2956 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
2957 new_adv
|= ADVERTISE_100HALF
;
2958 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
2959 new_adv
|= ADVERTISE_100FULL
;
2961 new_adv
|= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
2963 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2965 if (tp
->link_config
.advertising
&
2966 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
2968 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2969 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
2970 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2971 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
2972 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
) &&
2973 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2974 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
2975 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2976 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2977 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
2979 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
2982 new_adv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
2983 new_adv
|= ADVERTISE_CSMA
;
2985 /* Asking for a specific link mode. */
2986 if (tp
->link_config
.speed
== SPEED_1000
) {
2987 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2989 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2990 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
2992 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
2993 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2994 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
2995 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2996 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2998 if (tp
->link_config
.speed
== SPEED_100
) {
2999 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3000 new_adv
|= ADVERTISE_100FULL
;
3002 new_adv
|= ADVERTISE_100HALF
;
3004 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3005 new_adv
|= ADVERTISE_10FULL
;
3007 new_adv
|= ADVERTISE_10HALF
;
3009 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3014 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
3017 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
3020 tw32(TG3_CPMU_EEE_MODE
,
3021 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3025 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3027 case ASIC_REV_57765
:
3028 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3029 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3030 MII_TG3_DSP_CH34TP2_HIBW01
);
3033 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3034 MII_TG3_DSP_TAP26_RMRXSTO
|
3035 MII_TG3_DSP_TAP26_OPCSINPT
;
3036 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3040 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3041 /* Advertise 100-BaseTX EEE ability */
3042 if (tp
->link_config
.advertising
&
3043 ADVERTISED_100baseT_Full
)
3044 val
|= MDIO_AN_EEE_ADV_100TX
;
3045 /* Advertise 1000-BaseT EEE ability */
3046 if (tp
->link_config
.advertising
&
3047 ADVERTISED_1000baseT_Full
)
3048 val
|= MDIO_AN_EEE_ADV_1000T
;
3050 tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3055 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3056 tp
->link_config
.speed
!= SPEED_INVALID
) {
3057 u32 bmcr
, orig_bmcr
;
3059 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3060 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3063 switch (tp
->link_config
.speed
) {
3069 bmcr
|= BMCR_SPEED100
;
3073 bmcr
|= TG3_BMCR_SPEED1000
;
3077 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3078 bmcr
|= BMCR_FULLDPLX
;
3080 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3081 (bmcr
!= orig_bmcr
)) {
3082 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3083 for (i
= 0; i
< 1500; i
++) {
3087 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3088 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3090 if (!(tmp
& BMSR_LSTATUS
)) {
3095 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3099 tg3_writephy(tp
, MII_BMCR
,
3100 BMCR_ANENABLE
| BMCR_ANRESTART
);
3104 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3108 /* Turn off tap power management. */
3109 /* Set Extended packet length bit */
3110 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3112 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3113 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3114 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3115 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3116 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3123 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3125 u32 adv_reg
, all_mask
= 0;
3127 if (mask
& ADVERTISED_10baseT_Half
)
3128 all_mask
|= ADVERTISE_10HALF
;
3129 if (mask
& ADVERTISED_10baseT_Full
)
3130 all_mask
|= ADVERTISE_10FULL
;
3131 if (mask
& ADVERTISED_100baseT_Half
)
3132 all_mask
|= ADVERTISE_100HALF
;
3133 if (mask
& ADVERTISED_100baseT_Full
)
3134 all_mask
|= ADVERTISE_100FULL
;
3136 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3139 if ((adv_reg
& all_mask
) != all_mask
)
3141 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3145 if (mask
& ADVERTISED_1000baseT_Half
)
3146 all_mask
|= ADVERTISE_1000HALF
;
3147 if (mask
& ADVERTISED_1000baseT_Full
)
3148 all_mask
|= ADVERTISE_1000FULL
;
3150 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
3153 if ((tg3_ctrl
& all_mask
) != all_mask
)
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3163 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3166 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3167 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3169 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3170 if (curadv
!= reqadv
)
3173 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3174 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3176 /* Reprogram the advertisement register, even if it
3177 * does not affect the current link. If the link
3178 * gets renegotiated in the future, we can save an
3179 * additional renegotiation cycle by advertising
3180 * it correctly in the first place.
3182 if (curadv
!= reqadv
) {
3183 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3184 ADVERTISE_PAUSE_ASYM
);
3185 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3192 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3194 int current_link_up
;
3196 u32 lcl_adv
, rmt_adv
;
3204 (MAC_STATUS_SYNC_CHANGED
|
3205 MAC_STATUS_CFG_CHANGED
|
3206 MAC_STATUS_MI_COMPLETION
|
3207 MAC_STATUS_LNKSTATE_CHANGED
));
3210 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3212 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3216 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3218 /* Some third-party PHYs need to be reset on link going
3221 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3222 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3223 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3224 netif_carrier_ok(tp
->dev
)) {
3225 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3226 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3227 !(bmsr
& BMSR_LSTATUS
))
3233 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3234 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3235 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3236 !tg3_flag(tp
, INIT_COMPLETE
))
3239 if (!(bmsr
& BMSR_LSTATUS
)) {
3240 err
= tg3_init_5401phy_dsp(tp
);
3244 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3245 for (i
= 0; i
< 1000; i
++) {
3247 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3248 (bmsr
& BMSR_LSTATUS
)) {
3254 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3255 TG3_PHY_REV_BCM5401_B0
&&
3256 !(bmsr
& BMSR_LSTATUS
) &&
3257 tp
->link_config
.active_speed
== SPEED_1000
) {
3258 err
= tg3_phy_reset(tp
);
3260 err
= tg3_init_5401phy_dsp(tp
);
3265 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3266 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3267 /* 5701 {A0,B0} CRC bug workaround */
3268 tg3_writephy(tp
, 0x15, 0x0a75);
3269 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3270 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3271 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3274 /* Clear pending interrupts... */
3275 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3276 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3278 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3279 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3280 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3281 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3283 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3284 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3285 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3286 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3287 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3289 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3292 current_link_up
= 0;
3293 current_speed
= SPEED_INVALID
;
3294 current_duplex
= DUPLEX_INVALID
;
3296 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3297 err
= tg3_phy_auxctl_read(tp
,
3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3300 if (!err
&& !(val
& (1 << 10))) {
3301 tg3_phy_auxctl_write(tp
,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3309 for (i
= 0; i
< 100; i
++) {
3310 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3311 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3312 (bmsr
& BMSR_LSTATUS
))
3317 if (bmsr
& BMSR_LSTATUS
) {
3320 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3321 for (i
= 0; i
< 2000; i
++) {
3323 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3328 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3333 for (i
= 0; i
< 200; i
++) {
3334 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3335 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3337 if (bmcr
&& bmcr
!= 0x7fff)
3345 tp
->link_config
.active_speed
= current_speed
;
3346 tp
->link_config
.active_duplex
= current_duplex
;
3348 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3349 if ((bmcr
& BMCR_ANENABLE
) &&
3350 tg3_copper_is_advertising_all(tp
,
3351 tp
->link_config
.advertising
)) {
3352 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3354 current_link_up
= 1;
3357 if (!(bmcr
& BMCR_ANENABLE
) &&
3358 tp
->link_config
.speed
== current_speed
&&
3359 tp
->link_config
.duplex
== current_duplex
&&
3360 tp
->link_config
.flowctrl
==
3361 tp
->link_config
.active_flowctrl
) {
3362 current_link_up
= 1;
3366 if (current_link_up
== 1 &&
3367 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3368 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3372 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3373 tg3_phy_copper_begin(tp
);
3375 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3376 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
3377 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
3378 current_link_up
= 1;
3381 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3382 if (current_link_up
== 1) {
3383 if (tp
->link_config
.active_speed
== SPEED_100
||
3384 tp
->link_config
.active_speed
== SPEED_10
)
3385 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3387 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3388 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3389 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3391 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3393 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3394 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3395 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3397 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3398 if (current_link_up
== 1 &&
3399 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3400 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3402 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3405 /* ??? Without this setting Netgear GA302T PHY does not
3406 * ??? send/receive packets...
3408 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3409 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3410 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3411 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3415 tw32_f(MAC_MODE
, tp
->mac_mode
);
3418 tg3_phy_eee_adjust(tp
, current_link_up
);
3420 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
3421 /* Polled via timer. */
3422 tw32_f(MAC_EVENT
, 0);
3424 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3428 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3429 current_link_up
== 1 &&
3430 tp
->link_config
.active_speed
== SPEED_1000
&&
3431 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
3434 (MAC_STATUS_SYNC_CHANGED
|
3435 MAC_STATUS_CFG_CHANGED
));
3438 NIC_SRAM_FIRMWARE_MBOX
,
3439 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3442 /* Prevent send BD corruption. */
3443 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3444 u16 oldlnkctl
, newlnkctl
;
3446 pci_read_config_word(tp
->pdev
,
3447 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3449 if (tp
->link_config
.active_speed
== SPEED_100
||
3450 tp
->link_config
.active_speed
== SPEED_10
)
3451 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3453 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3454 if (newlnkctl
!= oldlnkctl
)
3455 pci_write_config_word(tp
->pdev
,
3456 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3460 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3461 if (current_link_up
)
3462 netif_carrier_on(tp
->dev
);
3464 netif_carrier_off(tp
->dev
);
3465 tg3_link_report(tp
);
3471 struct tg3_fiber_aneginfo
{
3473 #define ANEG_STATE_UNKNOWN 0
3474 #define ANEG_STATE_AN_ENABLE 1
3475 #define ANEG_STATE_RESTART_INIT 2
3476 #define ANEG_STATE_RESTART 3
3477 #define ANEG_STATE_DISABLE_LINK_OK 4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3479 #define ANEG_STATE_ABILITY_DETECT 6
3480 #define ANEG_STATE_ACK_DETECT_INIT 7
3481 #define ANEG_STATE_ACK_DETECT 8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3483 #define ANEG_STATE_COMPLETE_ACK 10
3484 #define ANEG_STATE_IDLE_DETECT_INIT 11
3485 #define ANEG_STATE_IDLE_DETECT 12
3486 #define ANEG_STATE_LINK_OK 13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3491 #define MR_AN_ENABLE 0x00000001
3492 #define MR_RESTART_AN 0x00000002
3493 #define MR_AN_COMPLETE 0x00000004
3494 #define MR_PAGE_RX 0x00000008
3495 #define MR_NP_LOADED 0x00000010
3496 #define MR_TOGGLE_TX 0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3504 #define MR_TOGGLE_RX 0x00002000
3505 #define MR_NP_RX 0x00004000
3507 #define MR_LINK_OK 0x80000000
3509 unsigned long link_time
, cur_time
;
3511 u32 ability_match_cfg
;
3512 int ability_match_count
;
3514 char ability_match
, idle_match
, ack_match
;
3516 u32 txconfig
, rxconfig
;
3517 #define ANEG_CFG_NP 0x00000080
3518 #define ANEG_CFG_ACK 0x00000040
3519 #define ANEG_CFG_RF2 0x00000020
3520 #define ANEG_CFG_RF1 0x00000010
3521 #define ANEG_CFG_PS2 0x00000001
3522 #define ANEG_CFG_PS1 0x00008000
3523 #define ANEG_CFG_HD 0x00004000
3524 #define ANEG_CFG_FD 0x00002000
3525 #define ANEG_CFG_INVAL 0x00001f06
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED -1
3533 #define ANEG_STATE_SETTLE_TIME 10000
3535 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3536 struct tg3_fiber_aneginfo
*ap
)
3539 unsigned long delta
;
3543 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3547 ap
->ability_match_cfg
= 0;
3548 ap
->ability_match_count
= 0;
3549 ap
->ability_match
= 0;
3555 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3556 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3558 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3559 ap
->ability_match_cfg
= rx_cfg_reg
;
3560 ap
->ability_match
= 0;
3561 ap
->ability_match_count
= 0;
3563 if (++ap
->ability_match_count
> 1) {
3564 ap
->ability_match
= 1;
3565 ap
->ability_match_cfg
= rx_cfg_reg
;
3568 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3576 ap
->ability_match_cfg
= 0;
3577 ap
->ability_match_count
= 0;
3578 ap
->ability_match
= 0;
3584 ap
->rxconfig
= rx_cfg_reg
;
3587 switch (ap
->state
) {
3588 case ANEG_STATE_UNKNOWN
:
3589 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3590 ap
->state
= ANEG_STATE_AN_ENABLE
;
3593 case ANEG_STATE_AN_ENABLE
:
3594 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3595 if (ap
->flags
& MR_AN_ENABLE
) {
3598 ap
->ability_match_cfg
= 0;
3599 ap
->ability_match_count
= 0;
3600 ap
->ability_match
= 0;
3604 ap
->state
= ANEG_STATE_RESTART_INIT
;
3606 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3610 case ANEG_STATE_RESTART_INIT
:
3611 ap
->link_time
= ap
->cur_time
;
3612 ap
->flags
&= ~(MR_NP_LOADED
);
3614 tw32(MAC_TX_AUTO_NEG
, 0);
3615 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3616 tw32_f(MAC_MODE
, tp
->mac_mode
);
3619 ret
= ANEG_TIMER_ENAB
;
3620 ap
->state
= ANEG_STATE_RESTART
;
3623 case ANEG_STATE_RESTART
:
3624 delta
= ap
->cur_time
- ap
->link_time
;
3625 if (delta
> ANEG_STATE_SETTLE_TIME
)
3626 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3628 ret
= ANEG_TIMER_ENAB
;
3631 case ANEG_STATE_DISABLE_LINK_OK
:
3635 case ANEG_STATE_ABILITY_DETECT_INIT
:
3636 ap
->flags
&= ~(MR_TOGGLE_TX
);
3637 ap
->txconfig
= ANEG_CFG_FD
;
3638 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3639 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3640 ap
->txconfig
|= ANEG_CFG_PS1
;
3641 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3642 ap
->txconfig
|= ANEG_CFG_PS2
;
3643 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3644 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3645 tw32_f(MAC_MODE
, tp
->mac_mode
);
3648 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3651 case ANEG_STATE_ABILITY_DETECT
:
3652 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3653 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3656 case ANEG_STATE_ACK_DETECT_INIT
:
3657 ap
->txconfig
|= ANEG_CFG_ACK
;
3658 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3659 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3660 tw32_f(MAC_MODE
, tp
->mac_mode
);
3663 ap
->state
= ANEG_STATE_ACK_DETECT
;
3666 case ANEG_STATE_ACK_DETECT
:
3667 if (ap
->ack_match
!= 0) {
3668 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3669 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3670 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3672 ap
->state
= ANEG_STATE_AN_ENABLE
;
3674 } else if (ap
->ability_match
!= 0 &&
3675 ap
->rxconfig
== 0) {
3676 ap
->state
= ANEG_STATE_AN_ENABLE
;
3680 case ANEG_STATE_COMPLETE_ACK_INIT
:
3681 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3685 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3686 MR_LP_ADV_HALF_DUPLEX
|
3687 MR_LP_ADV_SYM_PAUSE
|
3688 MR_LP_ADV_ASYM_PAUSE
|
3689 MR_LP_ADV_REMOTE_FAULT1
|
3690 MR_LP_ADV_REMOTE_FAULT2
|
3691 MR_LP_ADV_NEXT_PAGE
|
3694 if (ap
->rxconfig
& ANEG_CFG_FD
)
3695 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3696 if (ap
->rxconfig
& ANEG_CFG_HD
)
3697 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3698 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3699 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3700 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3701 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3702 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3703 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3704 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3705 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3706 if (ap
->rxconfig
& ANEG_CFG_NP
)
3707 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3709 ap
->link_time
= ap
->cur_time
;
3711 ap
->flags
^= (MR_TOGGLE_TX
);
3712 if (ap
->rxconfig
& 0x0008)
3713 ap
->flags
|= MR_TOGGLE_RX
;
3714 if (ap
->rxconfig
& ANEG_CFG_NP
)
3715 ap
->flags
|= MR_NP_RX
;
3716 ap
->flags
|= MR_PAGE_RX
;
3718 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3719 ret
= ANEG_TIMER_ENAB
;
3722 case ANEG_STATE_COMPLETE_ACK
:
3723 if (ap
->ability_match
!= 0 &&
3724 ap
->rxconfig
== 0) {
3725 ap
->state
= ANEG_STATE_AN_ENABLE
;
3728 delta
= ap
->cur_time
- ap
->link_time
;
3729 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3730 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3731 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3733 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3734 !(ap
->flags
& MR_NP_RX
)) {
3735 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3743 case ANEG_STATE_IDLE_DETECT_INIT
:
3744 ap
->link_time
= ap
->cur_time
;
3745 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3746 tw32_f(MAC_MODE
, tp
->mac_mode
);
3749 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3750 ret
= ANEG_TIMER_ENAB
;
3753 case ANEG_STATE_IDLE_DETECT
:
3754 if (ap
->ability_match
!= 0 &&
3755 ap
->rxconfig
== 0) {
3756 ap
->state
= ANEG_STATE_AN_ENABLE
;
3759 delta
= ap
->cur_time
- ap
->link_time
;
3760 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3761 /* XXX another gem from the Broadcom driver :( */
3762 ap
->state
= ANEG_STATE_LINK_OK
;
3766 case ANEG_STATE_LINK_OK
:
3767 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3771 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3772 /* ??? unimplemented */
3775 case ANEG_STATE_NEXT_PAGE_WAIT
:
3776 /* ??? unimplemented */
3787 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3790 struct tg3_fiber_aneginfo aninfo
;
3791 int status
= ANEG_FAILED
;
3795 tw32_f(MAC_TX_AUTO_NEG
, 0);
3797 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3798 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3801 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3804 memset(&aninfo
, 0, sizeof(aninfo
));
3805 aninfo
.flags
|= MR_AN_ENABLE
;
3806 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3807 aninfo
.cur_time
= 0;
3809 while (++tick
< 195000) {
3810 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3811 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3817 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3818 tw32_f(MAC_MODE
, tp
->mac_mode
);
3821 *txflags
= aninfo
.txconfig
;
3822 *rxflags
= aninfo
.flags
;
3824 if (status
== ANEG_DONE
&&
3825 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3826 MR_LP_ADV_FULL_DUPLEX
)))
3832 static void tg3_init_bcm8002(struct tg3
*tp
)
3834 u32 mac_status
= tr32(MAC_STATUS
);
3837 /* Reset when initting first time or we have a link. */
3838 if (tg3_flag(tp
, INIT_COMPLETE
) &&
3839 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
3842 /* Set PLL lock range. */
3843 tg3_writephy(tp
, 0x16, 0x8007);
3846 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
3848 /* Wait for reset to complete. */
3849 /* XXX schedule_timeout() ... */
3850 for (i
= 0; i
< 500; i
++)
3853 /* Config mode; select PMA/Ch 1 regs. */
3854 tg3_writephy(tp
, 0x10, 0x8411);
3856 /* Enable auto-lock and comdet, select txclk for tx. */
3857 tg3_writephy(tp
, 0x11, 0x0a10);
3859 tg3_writephy(tp
, 0x18, 0x00a0);
3860 tg3_writephy(tp
, 0x16, 0x41ff);
3862 /* Assert and deassert POR. */
3863 tg3_writephy(tp
, 0x13, 0x0400);
3865 tg3_writephy(tp
, 0x13, 0x0000);
3867 tg3_writephy(tp
, 0x11, 0x0a50);
3869 tg3_writephy(tp
, 0x11, 0x0a10);
3871 /* Wait for signal to stabilize */
3872 /* XXX schedule_timeout() ... */
3873 for (i
= 0; i
< 15000; i
++)
3876 /* Deselect the channel register so we can read the PHYID
3879 tg3_writephy(tp
, 0x10, 0x8011);
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
3885 u32 sg_dig_ctrl
, sg_dig_status
;
3886 u32 serdes_cfg
, expected_sg_dig_ctrl
;
3887 int workaround
, port_a
;
3888 int current_link_up
;
3891 expected_sg_dig_ctrl
= 0;
3894 current_link_up
= 0;
3896 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
3897 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
3899 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
3902 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903 /* preserve bits 20-23 for voltage regulator */
3904 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
3907 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3909 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
3910 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
3912 u32 val
= serdes_cfg
;
3918 tw32_f(MAC_SERDES_CFG
, val
);
3921 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3923 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
3924 tg3_setup_flow_control(tp
, 0, 0);
3925 current_link_up
= 1;
3930 /* Want auto-negotiation. */
3931 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
3933 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3934 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3935 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
3936 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3937 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
3939 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
3940 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
3941 tp
->serdes_counter
&&
3942 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
3943 MAC_STATUS_RCVD_CFG
)) ==
3944 MAC_STATUS_PCS_SYNCED
)) {
3945 tp
->serdes_counter
--;
3946 current_link_up
= 1;
3951 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
3952 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
3954 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
3956 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
3957 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3958 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
3959 MAC_STATUS_SIGNAL_DET
)) {
3960 sg_dig_status
= tr32(SG_DIG_STATUS
);
3961 mac_status
= tr32(MAC_STATUS
);
3963 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
3964 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
3965 u32 local_adv
= 0, remote_adv
= 0;
3967 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
3968 local_adv
|= ADVERTISE_1000XPAUSE
;
3969 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
3970 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
3972 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
3973 remote_adv
|= LPA_1000XPAUSE
;
3974 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
3975 remote_adv
|= LPA_1000XPAUSE_ASYM
;
3977 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
3978 current_link_up
= 1;
3979 tp
->serdes_counter
= 0;
3980 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3981 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
3982 if (tp
->serdes_counter
)
3983 tp
->serdes_counter
--;
3986 u32 val
= serdes_cfg
;
3993 tw32_f(MAC_SERDES_CFG
, val
);
3996 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3999 /* Link parallel detection - link is up */
4000 /* only if we have PCS_SYNC and not */
4001 /* receiving config code words */
4002 mac_status
= tr32(MAC_STATUS
);
4003 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4004 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4005 tg3_setup_flow_control(tp
, 0, 0);
4006 current_link_up
= 1;
4008 TG3_PHYFLG_PARALLEL_DETECT
;
4009 tp
->serdes_counter
=
4010 SERDES_PARALLEL_DET_TIMEOUT
;
4012 goto restart_autoneg
;
4016 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4017 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4021 return current_link_up
;
4024 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4026 int current_link_up
= 0;
4028 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4031 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4032 u32 txflags
, rxflags
;
4035 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4036 u32 local_adv
= 0, remote_adv
= 0;
4038 if (txflags
& ANEG_CFG_PS1
)
4039 local_adv
|= ADVERTISE_1000XPAUSE
;
4040 if (txflags
& ANEG_CFG_PS2
)
4041 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4043 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4044 remote_adv
|= LPA_1000XPAUSE
;
4045 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4046 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4048 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4050 current_link_up
= 1;
4052 for (i
= 0; i
< 30; i
++) {
4055 (MAC_STATUS_SYNC_CHANGED
|
4056 MAC_STATUS_CFG_CHANGED
));
4058 if ((tr32(MAC_STATUS
) &
4059 (MAC_STATUS_SYNC_CHANGED
|
4060 MAC_STATUS_CFG_CHANGED
)) == 0)
4064 mac_status
= tr32(MAC_STATUS
);
4065 if (current_link_up
== 0 &&
4066 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4067 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4068 current_link_up
= 1;
4070 tg3_setup_flow_control(tp
, 0, 0);
4072 /* Forcing 1000FD link up. */
4073 current_link_up
= 1;
4075 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4078 tw32_f(MAC_MODE
, tp
->mac_mode
);
4083 return current_link_up
;
4086 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4089 u16 orig_active_speed
;
4090 u8 orig_active_duplex
;
4092 int current_link_up
;
4095 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4096 orig_active_speed
= tp
->link_config
.active_speed
;
4097 orig_active_duplex
= tp
->link_config
.active_duplex
;
4099 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4100 netif_carrier_ok(tp
->dev
) &&
4101 tg3_flag(tp
, INIT_COMPLETE
)) {
4102 mac_status
= tr32(MAC_STATUS
);
4103 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4104 MAC_STATUS_SIGNAL_DET
|
4105 MAC_STATUS_CFG_CHANGED
|
4106 MAC_STATUS_RCVD_CFG
);
4107 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4108 MAC_STATUS_SIGNAL_DET
)) {
4109 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4110 MAC_STATUS_CFG_CHANGED
));
4115 tw32_f(MAC_TX_AUTO_NEG
, 0);
4117 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4118 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4119 tw32_f(MAC_MODE
, tp
->mac_mode
);
4122 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4123 tg3_init_bcm8002(tp
);
4125 /* Enable link change event even when serdes polling. */
4126 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4129 current_link_up
= 0;
4130 mac_status
= tr32(MAC_STATUS
);
4132 if (tg3_flag(tp
, HW_AUTONEG
))
4133 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4135 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4137 tp
->napi
[0].hw_status
->status
=
4138 (SD_STATUS_UPDATED
|
4139 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4141 for (i
= 0; i
< 100; i
++) {
4142 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4143 MAC_STATUS_CFG_CHANGED
));
4145 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4146 MAC_STATUS_CFG_CHANGED
|
4147 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4151 mac_status
= tr32(MAC_STATUS
);
4152 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4153 current_link_up
= 0;
4154 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4155 tp
->serdes_counter
== 0) {
4156 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4157 MAC_MODE_SEND_CONFIGS
));
4159 tw32_f(MAC_MODE
, tp
->mac_mode
);
4163 if (current_link_up
== 1) {
4164 tp
->link_config
.active_speed
= SPEED_1000
;
4165 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4166 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4167 LED_CTRL_LNKLED_OVERRIDE
|
4168 LED_CTRL_1000MBPS_ON
));
4170 tp
->link_config
.active_speed
= SPEED_INVALID
;
4171 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4172 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4173 LED_CTRL_LNKLED_OVERRIDE
|
4174 LED_CTRL_TRAFFIC_OVERRIDE
));
4177 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4178 if (current_link_up
)
4179 netif_carrier_on(tp
->dev
);
4181 netif_carrier_off(tp
->dev
);
4182 tg3_link_report(tp
);
4184 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4185 if (orig_pause_cfg
!= now_pause_cfg
||
4186 orig_active_speed
!= tp
->link_config
.active_speed
||
4187 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4188 tg3_link_report(tp
);
4194 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4196 int current_link_up
, err
= 0;
4200 u32 local_adv
, remote_adv
;
4202 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4203 tw32_f(MAC_MODE
, tp
->mac_mode
);
4209 (MAC_STATUS_SYNC_CHANGED
|
4210 MAC_STATUS_CFG_CHANGED
|
4211 MAC_STATUS_MI_COMPLETION
|
4212 MAC_STATUS_LNKSTATE_CHANGED
));
4218 current_link_up
= 0;
4219 current_speed
= SPEED_INVALID
;
4220 current_duplex
= DUPLEX_INVALID
;
4222 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4223 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4224 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4225 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4226 bmsr
|= BMSR_LSTATUS
;
4228 bmsr
&= ~BMSR_LSTATUS
;
4231 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4233 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4234 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4235 /* do nothing, just check for link up at the end */
4236 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4239 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4240 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4241 ADVERTISE_1000XPAUSE
|
4242 ADVERTISE_1000XPSE_ASYM
|
4245 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4247 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4248 new_adv
|= ADVERTISE_1000XHALF
;
4249 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4250 new_adv
|= ADVERTISE_1000XFULL
;
4252 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4253 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4254 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4255 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4257 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4258 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4259 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4266 bmcr
&= ~BMCR_SPEED1000
;
4267 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4269 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4270 new_bmcr
|= BMCR_FULLDPLX
;
4272 if (new_bmcr
!= bmcr
) {
4273 /* BMCR_SPEED1000 is a reserved bit that needs
4274 * to be set on write.
4276 new_bmcr
|= BMCR_SPEED1000
;
4278 /* Force a linkdown */
4279 if (netif_carrier_ok(tp
->dev
)) {
4282 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4283 adv
&= ~(ADVERTISE_1000XFULL
|
4284 ADVERTISE_1000XHALF
|
4286 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4287 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4291 netif_carrier_off(tp
->dev
);
4293 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4295 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4296 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4297 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4299 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4300 bmsr
|= BMSR_LSTATUS
;
4302 bmsr
&= ~BMSR_LSTATUS
;
4304 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4308 if (bmsr
& BMSR_LSTATUS
) {
4309 current_speed
= SPEED_1000
;
4310 current_link_up
= 1;
4311 if (bmcr
& BMCR_FULLDPLX
)
4312 current_duplex
= DUPLEX_FULL
;
4314 current_duplex
= DUPLEX_HALF
;
4319 if (bmcr
& BMCR_ANENABLE
) {
4322 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4323 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4324 common
= local_adv
& remote_adv
;
4325 if (common
& (ADVERTISE_1000XHALF
|
4326 ADVERTISE_1000XFULL
)) {
4327 if (common
& ADVERTISE_1000XFULL
)
4328 current_duplex
= DUPLEX_FULL
;
4330 current_duplex
= DUPLEX_HALF
;
4331 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4332 /* Link is up via parallel detect */
4334 current_link_up
= 0;
4339 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4340 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4342 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4343 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4344 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4346 tw32_f(MAC_MODE
, tp
->mac_mode
);
4349 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4351 tp
->link_config
.active_speed
= current_speed
;
4352 tp
->link_config
.active_duplex
= current_duplex
;
4354 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4355 if (current_link_up
)
4356 netif_carrier_on(tp
->dev
);
4358 netif_carrier_off(tp
->dev
);
4359 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4361 tg3_link_report(tp
);
4366 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4368 if (tp
->serdes_counter
) {
4369 /* Give autoneg time to complete. */
4370 tp
->serdes_counter
--;
4374 if (!netif_carrier_ok(tp
->dev
) &&
4375 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4378 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4379 if (bmcr
& BMCR_ANENABLE
) {
4382 /* Select shadow register 0x1f */
4383 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4384 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4386 /* Select expansion interrupt status register */
4387 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4388 MII_TG3_DSP_EXP1_INT_STAT
);
4389 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4390 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4392 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4393 /* We have signal detect and not receiving
4394 * config code words, link is up by parallel
4398 bmcr
&= ~BMCR_ANENABLE
;
4399 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4400 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4401 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4404 } else if (netif_carrier_ok(tp
->dev
) &&
4405 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4406 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4409 /* Select expansion interrupt status register */
4410 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4411 MII_TG3_DSP_EXP1_INT_STAT
);
4412 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4416 /* Config code words received, turn on autoneg. */
4417 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4418 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4420 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4426 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4431 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4432 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4433 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4434 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4436 err
= tg3_setup_copper_phy(tp
, force_reset
);
4438 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4441 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4442 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4444 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4449 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4450 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4451 tw32(GRC_MISC_CFG
, val
);
4454 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4455 (6 << TX_LENGTHS_IPG_SHIFT
);
4456 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
4457 val
|= tr32(MAC_TX_LENGTHS
) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
4459 TX_LENGTHS_CNT_DWN_VAL_MSK
);
4461 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4462 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4463 tw32(MAC_TX_LENGTHS
, val
|
4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
4466 tw32(MAC_TX_LENGTHS
, val
|
4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
4469 if (!tg3_flag(tp
, 5705_PLUS
)) {
4470 if (netif_carrier_ok(tp
->dev
)) {
4471 tw32(HOSTCC_STAT_COAL_TICKS
,
4472 tp
->coal
.stats_block_coalesce_usecs
);
4474 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4478 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
4479 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4480 if (!netif_carrier_ok(tp
->dev
))
4481 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4484 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4485 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4491 static inline int tg3_irq_sync(struct tg3
*tp
)
4493 return tp
->irq_sync
;
4496 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
4500 dst
= (u32
*)((u8
*)dst
+ off
);
4501 for (i
= 0; i
< len
; i
+= sizeof(u32
))
4502 *dst
++ = tr32(off
+ i
);
4505 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
4507 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
4508 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
4509 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
4510 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
4511 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
4512 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
4513 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
4514 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
4515 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
4516 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
4517 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
4518 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
4519 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
4520 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
4521 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
4522 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
4523 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
4524 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
4525 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
4527 if (tg3_flag(tp
, SUPPORT_MSIX
))
4528 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
4530 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
4531 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
4532 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
4533 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
4534 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
4535 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
4536 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
4537 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
4539 if (!tg3_flag(tp
, 5705_PLUS
)) {
4540 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
4541 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
4542 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
4545 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
4546 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
4547 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
4548 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
4549 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
4551 if (tg3_flag(tp
, NVRAM
))
4552 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
4555 static void tg3_dump_state(struct tg3
*tp
)
4560 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
4562 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
4566 if (tg3_flag(tp
, PCI_EXPRESS
)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
4569 regs
[i
/ sizeof(u32
)] = tr32(i
);
4571 tg3_dump_legacy_regs(tp
, regs
);
4573 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
4574 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
4575 !regs
[i
+ 2] && !regs
[i
+ 3])
4578 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4580 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
4585 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
4586 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
4588 /* SW status block */
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4592 tnapi
->hw_status
->status
,
4593 tnapi
->hw_status
->status_tag
,
4594 tnapi
->hw_status
->rx_jumbo_consumer
,
4595 tnapi
->hw_status
->rx_consumer
,
4596 tnapi
->hw_status
->rx_mini_consumer
,
4597 tnapi
->hw_status
->idx
[0].rx_producer
,
4598 tnapi
->hw_status
->idx
[0].tx_consumer
);
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4603 tnapi
->last_tag
, tnapi
->last_irq_tag
,
4604 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
4606 tnapi
->prodring
.rx_std_prod_idx
,
4607 tnapi
->prodring
.rx_std_cons_idx
,
4608 tnapi
->prodring
.rx_jmb_prod_idx
,
4609 tnapi
->prodring
.rx_jmb_cons_idx
);
4613 /* This is called whenever we suspect that the system chipset is re-
4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615 * is bogus tx completions. We try to recover by setting the
4616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4619 static void tg3_tx_recover(struct tg3
*tp
)
4621 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
4622 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4624 netdev_warn(tp
->dev
,
4625 "The system may be re-ordering memory-mapped I/O "
4626 "cycles to the network device, attempting to recover. "
4627 "Please report the problem to the driver maintainer "
4628 "and include system chipset information.\n");
4630 spin_lock(&tp
->lock
);
4631 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
4632 spin_unlock(&tp
->lock
);
4635 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4637 /* Tell compiler to fetch tx indices from memory. */
4639 return tnapi
->tx_pending
-
4640 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4643 /* Tigon3 never reports partial packet sends. So we do not
4644 * need special logic to handle SKBs that have not had all
4645 * of their frags sent yet, like SunGEM does.
4647 static void tg3_tx(struct tg3_napi
*tnapi
)
4649 struct tg3
*tp
= tnapi
->tp
;
4650 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4651 u32 sw_idx
= tnapi
->tx_cons
;
4652 struct netdev_queue
*txq
;
4653 int index
= tnapi
- tp
->napi
;
4655 if (tg3_flag(tp
, ENABLE_TSS
))
4658 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4660 while (sw_idx
!= hw_idx
) {
4661 struct ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4662 struct sk_buff
*skb
= ri
->skb
;
4665 if (unlikely(skb
== NULL
)) {
4670 pci_unmap_single(tp
->pdev
,
4671 dma_unmap_addr(ri
, mapping
),
4677 sw_idx
= NEXT_TX(sw_idx
);
4679 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4680 ri
= &tnapi
->tx_buffers
[sw_idx
];
4681 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4684 pci_unmap_page(tp
->pdev
,
4685 dma_unmap_addr(ri
, mapping
),
4686 skb_shinfo(skb
)->frags
[i
].size
,
4688 sw_idx
= NEXT_TX(sw_idx
);
4693 if (unlikely(tx_bug
)) {
4699 tnapi
->tx_cons
= sw_idx
;
4701 /* Need to make the tx_cons update visible to tg3_start_xmit()
4702 * before checking for netif_queue_stopped(). Without the
4703 * memory barrier, there is a small possibility that tg3_start_xmit()
4704 * will miss it and cause the queue to be stopped forever.
4708 if (unlikely(netif_tx_queue_stopped(txq
) &&
4709 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4710 __netif_tx_lock(txq
, smp_processor_id());
4711 if (netif_tx_queue_stopped(txq
) &&
4712 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4713 netif_tx_wake_queue(txq
);
4714 __netif_tx_unlock(txq
);
4718 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4723 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4724 map_sz
, PCI_DMA_FROMDEVICE
);
4725 dev_kfree_skb_any(ri
->skb
);
4729 /* Returns size of skb allocated or < 0 on error.
4731 * We only need to fill in the address because the other members
4732 * of the RX descriptor are invariant, see tg3_init_rings.
4734 * Note the purposeful assymetry of cpu vs. chip accesses. For
4735 * posting buffers we only dirty the first cache line of the RX
4736 * descriptor (containing the address). Whereas for the RX status
4737 * buffers the cpu only reads the last cacheline of the RX descriptor
4738 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4740 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4741 u32 opaque_key
, u32 dest_idx_unmasked
)
4743 struct tg3_rx_buffer_desc
*desc
;
4744 struct ring_info
*map
;
4745 struct sk_buff
*skb
;
4747 int skb_size
, dest_idx
;
4749 switch (opaque_key
) {
4750 case RXD_OPAQUE_RING_STD
:
4751 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4752 desc
= &tpr
->rx_std
[dest_idx
];
4753 map
= &tpr
->rx_std_buffers
[dest_idx
];
4754 skb_size
= tp
->rx_pkt_map_sz
;
4757 case RXD_OPAQUE_RING_JUMBO
:
4758 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4759 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4760 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4761 skb_size
= TG3_RX_JMB_MAP_SZ
;
4768 /* Do not overwrite any of the map or rp information
4769 * until we are sure we can commit to a new buffer.
4771 * Callers depend upon this behavior and assume that
4772 * we leave everything unchanged if we fail.
4774 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4778 skb_reserve(skb
, tp
->rx_offset
);
4780 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4781 PCI_DMA_FROMDEVICE
);
4782 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4788 dma_unmap_addr_set(map
, mapping
, mapping
);
4790 desc
->addr_hi
= ((u64
)mapping
>> 32);
4791 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4796 /* We only need to move over in the address because the other
4797 * members of the RX descriptor are invariant. See notes above
4798 * tg3_alloc_rx_skb for full details.
4800 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4801 struct tg3_rx_prodring_set
*dpr
,
4802 u32 opaque_key
, int src_idx
,
4803 u32 dest_idx_unmasked
)
4805 struct tg3
*tp
= tnapi
->tp
;
4806 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4807 struct ring_info
*src_map
, *dest_map
;
4808 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4811 switch (opaque_key
) {
4812 case RXD_OPAQUE_RING_STD
:
4813 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4814 dest_desc
= &dpr
->rx_std
[dest_idx
];
4815 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4816 src_desc
= &spr
->rx_std
[src_idx
];
4817 src_map
= &spr
->rx_std_buffers
[src_idx
];
4820 case RXD_OPAQUE_RING_JUMBO
:
4821 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4822 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
4823 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
4824 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
4825 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
4832 dest_map
->skb
= src_map
->skb
;
4833 dma_unmap_addr_set(dest_map
, mapping
,
4834 dma_unmap_addr(src_map
, mapping
));
4835 dest_desc
->addr_hi
= src_desc
->addr_hi
;
4836 dest_desc
->addr_lo
= src_desc
->addr_lo
;
4838 /* Ensure that the update to the skb happens after the physical
4839 * addresses have been transferred to the new BD location.
4843 src_map
->skb
= NULL
;
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847 * buffers to the chip, and one special ring the chip uses to report
4848 * status back to the host.
4850 * The special ring reports the status of received packets to the
4851 * host. The chip does not write into the original descriptor the
4852 * RX buffer was obtained from. The chip simply takes the original
4853 * descriptor as provided by the host, updates the status and length
4854 * field, then writes this into the next status ring entry.
4856 * Each ring the host uses to post buffers to the chip is described
4857 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4858 * it is first placed into the on-chip ram. When the packet's length
4859 * is known, it walks down the TG3_BDINFO entries to select the ring.
4860 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861 * which is within the range of the new packet's length is chosen.
4863 * The "separate ring for rx status" scheme may sound queer, but it makes
4864 * sense from a cache coherency perspective. If only the host writes
4865 * to the buffer post rings, and only the chip writes to the rx status
4866 * rings, then cache lines never move beyond shared-modified state.
4867 * If both the host and chip were to write into the same ring, cache line
4868 * eviction could occur since both entities want it in an exclusive state.
4870 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
4872 struct tg3
*tp
= tnapi
->tp
;
4873 u32 work_mask
, rx_std_posted
= 0;
4874 u32 std_prod_idx
, jmb_prod_idx
;
4875 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
4878 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
4880 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
4882 * We need to order the read of hw_idx and the read of
4883 * the opaque cookie.
4888 std_prod_idx
= tpr
->rx_std_prod_idx
;
4889 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
4890 while (sw_idx
!= hw_idx
&& budget
> 0) {
4891 struct ring_info
*ri
;
4892 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
4894 struct sk_buff
*skb
;
4895 dma_addr_t dma_addr
;
4896 u32 opaque_key
, desc_idx
, *post_ptr
;
4898 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
4899 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
4900 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
4901 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
4902 dma_addr
= dma_unmap_addr(ri
, mapping
);
4904 post_ptr
= &std_prod_idx
;
4906 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
4907 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
4908 dma_addr
= dma_unmap_addr(ri
, mapping
);
4910 post_ptr
= &jmb_prod_idx
;
4912 goto next_pkt_nopost
;
4914 work_mask
|= opaque_key
;
4916 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
4917 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
4919 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4920 desc_idx
, *post_ptr
);
4922 /* Other statistics kept track of by card. */
4927 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
4930 if (len
> TG3_RX_COPY_THRESH(tp
)) {
4933 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
4938 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
4939 PCI_DMA_FROMDEVICE
);
4941 /* Ensure that the update to the skb happens
4942 * after the usage of the old DMA mapping.
4950 struct sk_buff
*copy_skb
;
4952 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4953 desc_idx
, *post_ptr
);
4955 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
4957 if (copy_skb
== NULL
)
4958 goto drop_it_no_recycle
;
4960 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
4961 skb_put(copy_skb
, len
);
4962 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4963 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
4964 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4966 /* We'll reuse the original ring buffer. */
4970 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
4971 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
4972 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
4973 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
4974 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4976 skb_checksum_none_assert(skb
);
4978 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
4980 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
4981 skb
->protocol
!= htons(ETH_P_8021Q
)) {
4983 goto drop_it_no_recycle
;
4986 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
4987 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
4988 __vlan_hwaccel_put_tag(skb
,
4989 desc
->err_vlan
& RXD_VLAN_MASK
);
4991 napi_gro_receive(&tnapi
->napi
, skb
);
4999 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5000 tpr
->rx_std_prod_idx
= std_prod_idx
&
5001 tp
->rx_std_ring_mask
;
5002 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5003 tpr
->rx_std_prod_idx
);
5004 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5009 sw_idx
&= tp
->rx_ret_ring_mask
;
5011 /* Refresh hw_idx to see if there is new work */
5012 if (sw_idx
== hw_idx
) {
5013 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5018 /* ACK the status ring. */
5019 tnapi
->rx_rcb_ptr
= sw_idx
;
5020 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5022 /* Refill RX ring(s). */
5023 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5024 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5025 tpr
->rx_std_prod_idx
= std_prod_idx
&
5026 tp
->rx_std_ring_mask
;
5027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5028 tpr
->rx_std_prod_idx
);
5030 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5031 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5032 tp
->rx_jmb_ring_mask
;
5033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5034 tpr
->rx_jmb_prod_idx
);
5037 } else if (work_mask
) {
5038 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039 * updated before the producer indices can be updated.
5043 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5044 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5046 if (tnapi
!= &tp
->napi
[1])
5047 napi_schedule(&tp
->napi
[1].napi
);
5053 static void tg3_poll_link(struct tg3
*tp
)
5055 /* handle link change and other phy events */
5056 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5057 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5059 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5060 sblk
->status
= SD_STATUS_UPDATED
|
5061 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5062 spin_lock(&tp
->lock
);
5063 if (tg3_flag(tp
, USE_PHYLIB
)) {
5065 (MAC_STATUS_SYNC_CHANGED
|
5066 MAC_STATUS_CFG_CHANGED
|
5067 MAC_STATUS_MI_COMPLETION
|
5068 MAC_STATUS_LNKSTATE_CHANGED
));
5071 tg3_setup_phy(tp
, 0);
5072 spin_unlock(&tp
->lock
);
5077 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5078 struct tg3_rx_prodring_set
*dpr
,
5079 struct tg3_rx_prodring_set
*spr
)
5081 u32 si
, di
, cpycnt
, src_prod_idx
;
5085 src_prod_idx
= spr
->rx_std_prod_idx
;
5087 /* Make sure updates to the rx_std_buffers[] entries and the
5088 * standard producer index are seen in the correct order.
5092 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5095 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5096 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5098 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5099 spr
->rx_std_cons_idx
;
5101 cpycnt
= min(cpycnt
,
5102 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5104 si
= spr
->rx_std_cons_idx
;
5105 di
= dpr
->rx_std_prod_idx
;
5107 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5108 if (dpr
->rx_std_buffers
[i
].skb
) {
5118 /* Ensure that updates to the rx_std_buffers ring and the
5119 * shadowed hardware producer ring from tg3_recycle_skb() are
5120 * ordered correctly WRT the skb check above.
5124 memcpy(&dpr
->rx_std_buffers
[di
],
5125 &spr
->rx_std_buffers
[si
],
5126 cpycnt
* sizeof(struct ring_info
));
5128 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5129 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5130 sbd
= &spr
->rx_std
[si
];
5131 dbd
= &dpr
->rx_std
[di
];
5132 dbd
->addr_hi
= sbd
->addr_hi
;
5133 dbd
->addr_lo
= sbd
->addr_lo
;
5136 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5137 tp
->rx_std_ring_mask
;
5138 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5139 tp
->rx_std_ring_mask
;
5143 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5145 /* Make sure updates to the rx_jmb_buffers[] entries and
5146 * the jumbo producer index are seen in the correct order.
5150 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5153 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5154 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5156 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5157 spr
->rx_jmb_cons_idx
;
5159 cpycnt
= min(cpycnt
,
5160 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5162 si
= spr
->rx_jmb_cons_idx
;
5163 di
= dpr
->rx_jmb_prod_idx
;
5165 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5166 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5176 /* Ensure that updates to the rx_jmb_buffers ring and the
5177 * shadowed hardware producer ring from tg3_recycle_skb() are
5178 * ordered correctly WRT the skb check above.
5182 memcpy(&dpr
->rx_jmb_buffers
[di
],
5183 &spr
->rx_jmb_buffers
[si
],
5184 cpycnt
* sizeof(struct ring_info
));
5186 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5187 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5188 sbd
= &spr
->rx_jmb
[si
].std
;
5189 dbd
= &dpr
->rx_jmb
[di
].std
;
5190 dbd
->addr_hi
= sbd
->addr_hi
;
5191 dbd
->addr_lo
= sbd
->addr_lo
;
5194 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5195 tp
->rx_jmb_ring_mask
;
5196 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5197 tp
->rx_jmb_ring_mask
;
5203 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5205 struct tg3
*tp
= tnapi
->tp
;
5207 /* run TX completion thread */
5208 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5210 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5214 /* run RX thread, within the bounds set by NAPI.
5215 * All RX "locking" is done by ensuring outside
5216 * code synchronizes with tg3->napi.poll()
5218 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5219 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5221 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5222 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5224 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5225 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5227 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5228 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5229 &tp
->napi
[i
].prodring
);
5233 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5235 dpr
->rx_std_prod_idx
);
5237 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5239 dpr
->rx_jmb_prod_idx
);
5244 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5250 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5252 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5253 struct tg3
*tp
= tnapi
->tp
;
5255 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5258 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5260 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5263 if (unlikely(work_done
>= budget
))
5266 /* tp->last_tag is used in tg3_int_reenable() below
5267 * to tell the hw how much work has been processed,
5268 * so we must read it before checking for more work.
5270 tnapi
->last_tag
= sblk
->status_tag
;
5271 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5274 /* check for RX/TX work to do */
5275 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5276 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5277 napi_complete(napi
);
5278 /* Reenable interrupts. */
5279 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5288 /* work_done is guaranteed to be less than budget. */
5289 napi_complete(napi
);
5290 schedule_work(&tp
->reset_task
);
5294 static void tg3_process_error(struct tg3
*tp
)
5297 bool real_error
= false;
5299 if (tg3_flag(tp
, ERROR_PROCESSED
))
5302 /* Check Flow Attention register */
5303 val
= tr32(HOSTCC_FLOW_ATTN
);
5304 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5305 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5309 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5310 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5314 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5315 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
5324 tg3_flag_set(tp
, ERROR_PROCESSED
);
5325 schedule_work(&tp
->reset_task
);
5328 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5330 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5331 struct tg3
*tp
= tnapi
->tp
;
5333 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5336 if (sblk
->status
& SD_STATUS_ERROR
)
5337 tg3_process_error(tp
);
5341 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5343 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5346 if (unlikely(work_done
>= budget
))
5349 if (tg3_flag(tp
, TAGGED_STATUS
)) {
5350 /* tp->last_tag is used in tg3_int_reenable() below
5351 * to tell the hw how much work has been processed,
5352 * so we must read it before checking for more work.
5354 tnapi
->last_tag
= sblk
->status_tag
;
5355 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5358 sblk
->status
&= ~SD_STATUS_UPDATED
;
5360 if (likely(!tg3_has_work(tnapi
))) {
5361 napi_complete(napi
);
5362 tg3_int_reenable(tnapi
);
5370 /* work_done is guaranteed to be less than budget. */
5371 napi_complete(napi
);
5372 schedule_work(&tp
->reset_task
);
5376 static void tg3_napi_disable(struct tg3
*tp
)
5380 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5381 napi_disable(&tp
->napi
[i
].napi
);
5384 static void tg3_napi_enable(struct tg3
*tp
)
5388 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5389 napi_enable(&tp
->napi
[i
].napi
);
5392 static void tg3_napi_init(struct tg3
*tp
)
5396 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5397 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5398 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5401 static void tg3_napi_fini(struct tg3
*tp
)
5405 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5406 netif_napi_del(&tp
->napi
[i
].napi
);
5409 static inline void tg3_netif_stop(struct tg3
*tp
)
5411 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5412 tg3_napi_disable(tp
);
5413 netif_tx_disable(tp
->dev
);
5416 static inline void tg3_netif_start(struct tg3
*tp
)
5418 /* NOTE: unconditional netif_tx_wake_all_queues is only
5419 * appropriate so long as all callers are assured to
5420 * have free tx slots (such as after tg3_init_hw)
5422 netif_tx_wake_all_queues(tp
->dev
);
5424 tg3_napi_enable(tp
);
5425 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5426 tg3_enable_ints(tp
);
5429 static void tg3_irq_quiesce(struct tg3
*tp
)
5433 BUG_ON(tp
->irq_sync
);
5438 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5439 synchronize_irq(tp
->napi
[i
].irq_vec
);
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444 * with as well. Most of the time, this is not necessary except when
5445 * shutting down the device.
5447 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5449 spin_lock_bh(&tp
->lock
);
5451 tg3_irq_quiesce(tp
);
5454 static inline void tg3_full_unlock(struct tg3
*tp
)
5456 spin_unlock_bh(&tp
->lock
);
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460 * after sending MSI so driver doesn't have to do it.
5462 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5464 struct tg3_napi
*tnapi
= dev_id
;
5465 struct tg3
*tp
= tnapi
->tp
;
5467 prefetch(tnapi
->hw_status
);
5469 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5471 if (likely(!tg3_irq_sync(tp
)))
5472 napi_schedule(&tnapi
->napi
);
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478 * flush status block and interrupt mailbox. PCI ordering rules
5479 * guarantee that MSI will arrive after the status block.
5481 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5483 struct tg3_napi
*tnapi
= dev_id
;
5484 struct tg3
*tp
= tnapi
->tp
;
5486 prefetch(tnapi
->hw_status
);
5488 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5490 * Writing any value to intr-mbox-0 clears PCI INTA# and
5491 * chip-internal interrupt pending events.
5492 * Writing non-zero to intr-mbox-0 additional tells the
5493 * NIC to stop sending us irqs, engaging "in-intr-handler"
5496 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5497 if (likely(!tg3_irq_sync(tp
)))
5498 napi_schedule(&tnapi
->napi
);
5500 return IRQ_RETVAL(1);
5503 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5505 struct tg3_napi
*tnapi
= dev_id
;
5506 struct tg3
*tp
= tnapi
->tp
;
5507 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5508 unsigned int handled
= 1;
5510 /* In INTx mode, it is possible for the interrupt to arrive at
5511 * the CPU before the status block posted prior to the interrupt.
5512 * Reading the PCI State register will confirm whether the
5513 * interrupt is ours and will flush the status block.
5515 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5516 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5517 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5524 * Writing any value to intr-mbox-0 clears PCI INTA# and
5525 * chip-internal interrupt pending events.
5526 * Writing non-zero to intr-mbox-0 additional tells the
5527 * NIC to stop sending us irqs, engaging "in-intr-handler"
5530 * Flush the mailbox to de-assert the IRQ immediately to prevent
5531 * spurious interrupts. The flush impacts performance but
5532 * excessive spurious interrupts can be worse in some cases.
5534 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5535 if (tg3_irq_sync(tp
))
5537 sblk
->status
&= ~SD_STATUS_UPDATED
;
5538 if (likely(tg3_has_work(tnapi
))) {
5539 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5540 napi_schedule(&tnapi
->napi
);
5542 /* No work, shared interrupt perhaps? re-enable
5543 * interrupts, and flush that PCI write
5545 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5549 return IRQ_RETVAL(handled
);
5552 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5554 struct tg3_napi
*tnapi
= dev_id
;
5555 struct tg3
*tp
= tnapi
->tp
;
5556 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5557 unsigned int handled
= 1;
5559 /* In INTx mode, it is possible for the interrupt to arrive at
5560 * the CPU before the status block posted prior to the interrupt.
5561 * Reading the PCI State register will confirm whether the
5562 * interrupt is ours and will flush the status block.
5564 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5565 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5566 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5573 * writing any value to intr-mbox-0 clears PCI INTA# and
5574 * chip-internal interrupt pending events.
5575 * writing non-zero to intr-mbox-0 additional tells the
5576 * NIC to stop sending us irqs, engaging "in-intr-handler"
5579 * Flush the mailbox to de-assert the IRQ immediately to prevent
5580 * spurious interrupts. The flush impacts performance but
5581 * excessive spurious interrupts can be worse in some cases.
5583 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5586 * In a shared interrupt configuration, sometimes other devices'
5587 * interrupts will scream. We record the current status tag here
5588 * so that the above check can report that the screaming interrupts
5589 * are unhandled. Eventually they will be silenced.
5591 tnapi
->last_irq_tag
= sblk
->status_tag
;
5593 if (tg3_irq_sync(tp
))
5596 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5598 napi_schedule(&tnapi
->napi
);
5601 return IRQ_RETVAL(handled
);
5604 /* ISR for interrupt test */
5605 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5607 struct tg3_napi
*tnapi
= dev_id
;
5608 struct tg3
*tp
= tnapi
->tp
;
5609 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5611 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5612 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5613 tg3_disable_ints(tp
);
5614 return IRQ_RETVAL(1);
5616 return IRQ_RETVAL(0);
5619 static int tg3_init_hw(struct tg3
*, int);
5620 static int tg3_halt(struct tg3
*, int, int);
5622 /* Restart hardware after configuration changes, self-test, etc.
5623 * Invoked with tp->lock held.
5625 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5626 __releases(tp
->lock
)
5627 __acquires(tp
->lock
)
5631 err
= tg3_init_hw(tp
, reset_phy
);
5634 "Failed to re-initialize device, aborting\n");
5635 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5636 tg3_full_unlock(tp
);
5637 del_timer_sync(&tp
->timer
);
5639 tg3_napi_enable(tp
);
5641 tg3_full_lock(tp
, 0);
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device
*dev
)
5650 struct tg3
*tp
= netdev_priv(dev
);
5652 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5653 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5657 static void tg3_reset_task(struct work_struct
*work
)
5659 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5661 unsigned int restart_timer
;
5663 tg3_full_lock(tp
, 0);
5665 if (!netif_running(tp
->dev
)) {
5666 tg3_full_unlock(tp
);
5670 tg3_full_unlock(tp
);
5676 tg3_full_lock(tp
, 1);
5678 restart_timer
= tg3_flag(tp
, RESTART_TIMER
);
5679 tg3_flag_clear(tp
, RESTART_TIMER
);
5681 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
5682 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5683 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5684 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
5685 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
5688 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5689 err
= tg3_init_hw(tp
, 1);
5693 tg3_netif_start(tp
);
5696 mod_timer(&tp
->timer
, jiffies
+ 1);
5699 tg3_full_unlock(tp
);
5705 static void tg3_tx_timeout(struct net_device
*dev
)
5707 struct tg3
*tp
= netdev_priv(dev
);
5709 if (netif_msg_tx_err(tp
)) {
5710 netdev_err(dev
, "transmit timed out, resetting\n");
5714 schedule_work(&tp
->reset_task
);
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5720 u32 base
= (u32
) mapping
& 0xffffffff;
5722 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
5731 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5738 static void tg3_set_txd(struct tg3_napi
*, int, dma_addr_t
, int, u32
, u32
);
5740 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5741 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
5742 struct sk_buff
*skb
, u32 last_plus_one
,
5743 u32
*start
, u32 base_flags
, u32 mss
)
5745 struct tg3
*tp
= tnapi
->tp
;
5746 struct sk_buff
*new_skb
;
5747 dma_addr_t new_addr
= 0;
5751 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
5752 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
5754 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
5756 new_skb
= skb_copy_expand(skb
,
5757 skb_headroom(skb
) + more_headroom
,
5758 skb_tailroom(skb
), GFP_ATOMIC
);
5764 /* New SKB is guaranteed to be linear. */
5766 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
5768 /* Make sure the mapping succeeded */
5769 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
5771 dev_kfree_skb(new_skb
);
5774 /* Make sure new skb does not cross any 4G boundaries.
5775 * Drop the packet if it does.
5777 } else if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
5778 tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
5779 pci_unmap_single(tp
->pdev
, new_addr
, new_skb
->len
,
5782 dev_kfree_skb(new_skb
);
5785 tg3_set_txd(tnapi
, entry
, new_addr
, new_skb
->len
,
5786 base_flags
, 1 | (mss
<< 1));
5787 *start
= NEXT_TX(entry
);
5791 /* Now clean up the sw ring entries. */
5793 while (entry
!= last_plus_one
) {
5797 len
= skb_headlen(skb
);
5799 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
5801 pci_unmap_single(tp
->pdev
,
5802 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
5804 len
, PCI_DMA_TODEVICE
);
5806 tnapi
->tx_buffers
[entry
].skb
= new_skb
;
5807 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
5810 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5812 entry
= NEXT_TX(entry
);
5821 static void tg3_set_txd(struct tg3_napi
*tnapi
, int entry
,
5822 dma_addr_t mapping
, int len
, u32 flags
,
5825 struct tg3_tx_buffer_desc
*txd
= &tnapi
->tx_ring
[entry
];
5826 int is_end
= (mss_and_is_end
& 0x1);
5827 u32 mss
= (mss_and_is_end
>> 1);
5831 flags
|= TXD_FLAG_END
;
5832 if (flags
& TXD_FLAG_VLAN
) {
5833 vlan_tag
= flags
>> 16;
5836 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
5838 txd
->addr_hi
= ((u64
) mapping
>> 32);
5839 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5840 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
5841 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
5844 /* hard_start_xmit for devices that don't have any bugs and
5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5847 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
,
5848 struct net_device
*dev
)
5850 struct tg3
*tp
= netdev_priv(dev
);
5851 u32 len
, entry
, base_flags
, mss
;
5853 struct tg3_napi
*tnapi
;
5854 struct netdev_queue
*txq
;
5855 unsigned int i
, last
;
5857 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
5858 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
5859 if (tg3_flag(tp
, ENABLE_TSS
))
5862 /* We are running in BH disabled context with netif_tx_lock
5863 * and TX reclaim runs via tp->napi.poll inside of a software
5864 * interrupt. Furthermore, IRQ processing runs lockless so we have
5865 * no IRQ context deadlocks to worry about either. Rejoice!
5867 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
5868 if (!netif_tx_queue_stopped(txq
)) {
5869 netif_tx_stop_queue(txq
);
5871 /* This is a hard error, log it. */
5873 "BUG! Tx Ring full when queue awake!\n");
5875 return NETDEV_TX_BUSY
;
5878 entry
= tnapi
->tx_prod
;
5880 mss
= skb_shinfo(skb
)->gso_size
;
5882 int tcp_opt_len
, ip_tcp_len
;
5885 if (skb_header_cloned(skb
) &&
5886 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
5891 if (skb_is_gso_v6(skb
)) {
5892 hdrlen
= skb_headlen(skb
) - ETH_HLEN
;
5894 struct iphdr
*iph
= ip_hdr(skb
);
5896 tcp_opt_len
= tcp_optlen(skb
);
5897 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
5900 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
5901 hdrlen
= ip_tcp_len
+ tcp_opt_len
;
5904 if (tg3_flag(tp
, HW_TSO_3
)) {
5905 mss
|= (hdrlen
& 0xc) << 12;
5907 base_flags
|= 0x00000010;
5908 base_flags
|= (hdrlen
& 0x3e0) << 5;
5912 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
5913 TXD_FLAG_CPU_POST_DMA
);
5915 tcp_hdr(skb
)->check
= 0;
5917 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5918 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
5921 if (vlan_tx_tag_present(skb
))
5922 base_flags
|= (TXD_FLAG_VLAN
|
5923 (vlan_tx_tag_get(skb
) << 16));
5925 len
= skb_headlen(skb
);
5927 /* Queue skb data, a.k.a. the main skb fragment. */
5928 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
5929 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5934 tnapi
->tx_buffers
[entry
].skb
= skb
;
5935 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
5937 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
5938 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
5939 base_flags
|= TXD_FLAG_JMB_PKT
;
5941 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
5942 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
5944 entry
= NEXT_TX(entry
);
5946 /* Now loop through additional data fragments, and queue them. */
5947 if (skb_shinfo(skb
)->nr_frags
> 0) {
5948 last
= skb_shinfo(skb
)->nr_frags
- 1;
5949 for (i
= 0; i
<= last
; i
++) {
5950 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5953 mapping
= pci_map_page(tp
->pdev
,
5956 len
, PCI_DMA_TODEVICE
);
5957 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
5960 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5961 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
5964 tg3_set_txd(tnapi
, entry
, mapping
, len
,
5965 base_flags
, (i
== last
) | (mss
<< 1));
5967 entry
= NEXT_TX(entry
);
5971 /* Packets are ready, update Tx producer idx local and on card. */
5972 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
5974 tnapi
->tx_prod
= entry
;
5975 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
5976 netif_tx_stop_queue(txq
);
5978 /* netif_tx_stop_queue() must be done before checking
5979 * checking tx index in tg3_tx_avail() below, because in
5980 * tg3_tx(), we update tx index before checking for
5981 * netif_tx_queue_stopped().
5984 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
5985 netif_tx_wake_queue(txq
);
5991 return NETDEV_TX_OK
;
5995 entry
= tnapi
->tx_prod
;
5996 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5997 pci_unmap_single(tp
->pdev
,
5998 dma_unmap_addr(&tnapi
->tx_buffers
[entry
], mapping
),
6001 for (i
= 0; i
<= last
; i
++) {
6002 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6003 entry
= NEXT_TX(entry
);
6005 pci_unmap_page(tp
->pdev
,
6006 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
6008 frag
->size
, PCI_DMA_TODEVICE
);
6012 return NETDEV_TX_OK
;
6015 static netdev_tx_t
tg3_start_xmit_dma_bug(struct sk_buff
*,
6016 struct net_device
*);
6018 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6019 * TSO header is greater than 80 bytes.
6021 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6023 struct sk_buff
*segs
, *nskb
;
6024 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6026 /* Estimate the number of fragments in the worst case */
6027 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6028 netif_stop_queue(tp
->dev
);
6030 /* netif_tx_stop_queue() must be done before checking
6031 * checking tx index in tg3_tx_avail() below, because in
6032 * tg3_tx(), we update tx index before checking for
6033 * netif_tx_queue_stopped().
6036 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6037 return NETDEV_TX_BUSY
;
6039 netif_wake_queue(tp
->dev
);
6042 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6044 goto tg3_tso_bug_end
;
6050 tg3_start_xmit_dma_bug(nskb
, tp
->dev
);
6056 return NETDEV_TX_OK
;
6059 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6062 static netdev_tx_t
tg3_start_xmit_dma_bug(struct sk_buff
*skb
,
6063 struct net_device
*dev
)
6065 struct tg3
*tp
= netdev_priv(dev
);
6066 u32 len
, entry
, base_flags
, mss
;
6067 int would_hit_hwbug
;
6069 struct tg3_napi
*tnapi
;
6070 struct netdev_queue
*txq
;
6071 unsigned int i
, last
;
6073 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6074 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6075 if (tg3_flag(tp
, ENABLE_TSS
))
6078 /* We are running in BH disabled context with netif_tx_lock
6079 * and TX reclaim runs via tp->napi.poll inside of a software
6080 * interrupt. Furthermore, IRQ processing runs lockless so we have
6081 * no IRQ context deadlocks to worry about either. Rejoice!
6083 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6084 if (!netif_tx_queue_stopped(txq
)) {
6085 netif_tx_stop_queue(txq
);
6087 /* This is a hard error, log it. */
6089 "BUG! Tx Ring full when queue awake!\n");
6091 return NETDEV_TX_BUSY
;
6094 entry
= tnapi
->tx_prod
;
6096 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6097 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6099 mss
= skb_shinfo(skb
)->gso_size
;
6102 u32 tcp_opt_len
, hdr_len
;
6104 if (skb_header_cloned(skb
) &&
6105 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
6111 tcp_opt_len
= tcp_optlen(skb
);
6113 if (skb_is_gso_v6(skb
)) {
6114 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
6118 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
6119 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
6122 iph
->tot_len
= htons(mss
+ hdr_len
);
6125 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6126 tg3_flag(tp
, TSO_BUG
))
6127 return tg3_tso_bug(tp
, skb
);
6129 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6130 TXD_FLAG_CPU_POST_DMA
);
6132 if (tg3_flag(tp
, HW_TSO_1
) ||
6133 tg3_flag(tp
, HW_TSO_2
) ||
6134 tg3_flag(tp
, HW_TSO_3
)) {
6135 tcp_hdr(skb
)->check
= 0;
6136 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6138 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6143 if (tg3_flag(tp
, HW_TSO_3
)) {
6144 mss
|= (hdr_len
& 0xc) << 12;
6146 base_flags
|= 0x00000010;
6147 base_flags
|= (hdr_len
& 0x3e0) << 5;
6148 } else if (tg3_flag(tp
, HW_TSO_2
))
6149 mss
|= hdr_len
<< 9;
6150 else if (tg3_flag(tp
, HW_TSO_1
) ||
6151 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6152 if (tcp_opt_len
|| iph
->ihl
> 5) {
6155 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6156 mss
|= (tsflags
<< 11);
6159 if (tcp_opt_len
|| iph
->ihl
> 5) {
6162 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6163 base_flags
|= tsflags
<< 12;
6168 if (vlan_tx_tag_present(skb
))
6169 base_flags
|= (TXD_FLAG_VLAN
|
6170 (vlan_tx_tag_get(skb
) << 16));
6172 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6173 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6174 base_flags
|= TXD_FLAG_JMB_PKT
;
6176 len
= skb_headlen(skb
);
6178 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6179 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
6184 tnapi
->tx_buffers
[entry
].skb
= skb
;
6185 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6187 would_hit_hwbug
= 0;
6189 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6190 would_hit_hwbug
= 1;
6192 if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
6193 tg3_4g_overflow_test(mapping
, len
))
6194 would_hit_hwbug
= 1;
6196 if (tg3_flag(tp
, 40BIT_DMA_LIMIT_BUG
) &&
6197 tg3_40bit_overflow_test(tp
, mapping
, len
))
6198 would_hit_hwbug
= 1;
6200 if (tg3_flag(tp
, 5701_DMA_BUG
))
6201 would_hit_hwbug
= 1;
6203 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
6204 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
6206 entry
= NEXT_TX(entry
);
6208 /* Now loop through additional data fragments, and queue them. */
6209 if (skb_shinfo(skb
)->nr_frags
> 0) {
6210 last
= skb_shinfo(skb
)->nr_frags
- 1;
6211 for (i
= 0; i
<= last
; i
++) {
6212 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6215 mapping
= pci_map_page(tp
->pdev
,
6218 len
, PCI_DMA_TODEVICE
);
6220 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6221 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6223 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6226 if (tg3_flag(tp
, SHORT_DMA_BUG
) &&
6228 would_hit_hwbug
= 1;
6230 if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
6231 tg3_4g_overflow_test(mapping
, len
))
6232 would_hit_hwbug
= 1;
6234 if (tg3_flag(tp
, 40BIT_DMA_LIMIT_BUG
) &&
6235 tg3_40bit_overflow_test(tp
, mapping
, len
))
6236 would_hit_hwbug
= 1;
6238 if (tg3_flag(tp
, HW_TSO_1
) ||
6239 tg3_flag(tp
, HW_TSO_2
) ||
6240 tg3_flag(tp
, HW_TSO_3
))
6241 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6242 base_flags
, (i
== last
)|(mss
<< 1));
6244 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6245 base_flags
, (i
== last
));
6247 entry
= NEXT_TX(entry
);
6251 if (would_hit_hwbug
) {
6252 u32 last_plus_one
= entry
;
6255 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
6256 start
&= (TG3_TX_RING_SIZE
- 1);
6258 /* If the workaround fails due to memory/mapping
6259 * failure, silently drop this packet.
6261 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, last_plus_one
,
6262 &start
, base_flags
, mss
))
6268 /* Packets are ready, update Tx producer idx local and on card. */
6269 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6271 tnapi
->tx_prod
= entry
;
6272 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6273 netif_tx_stop_queue(txq
);
6275 /* netif_tx_stop_queue() must be done before checking
6276 * checking tx index in tg3_tx_avail() below, because in
6277 * tg3_tx(), we update tx index before checking for
6278 * netif_tx_queue_stopped().
6281 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6282 netif_tx_wake_queue(txq
);
6288 return NETDEV_TX_OK
;
6292 entry
= tnapi
->tx_prod
;
6293 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6294 pci_unmap_single(tp
->pdev
,
6295 dma_unmap_addr(&tnapi
->tx_buffers
[entry
], mapping
),
6298 for (i
= 0; i
<= last
; i
++) {
6299 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6300 entry
= NEXT_TX(entry
);
6302 pci_unmap_page(tp
->pdev
,
6303 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
6305 frag
->size
, PCI_DMA_TODEVICE
);
6309 return NETDEV_TX_OK
;
6312 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6314 struct tg3
*tp
= netdev_priv(dev
);
6316 if (features
& NETIF_F_LOOPBACK
) {
6317 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6321 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6322 * loopback mode if Half-Duplex mode was negotiated earlier.
6324 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6326 /* Enable internal MAC loopback mode */
6327 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6328 spin_lock_bh(&tp
->lock
);
6329 tw32(MAC_MODE
, tp
->mac_mode
);
6330 netif_carrier_on(tp
->dev
);
6331 spin_unlock_bh(&tp
->lock
);
6332 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6334 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6337 /* Disable internal MAC loopback mode */
6338 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6339 spin_lock_bh(&tp
->lock
);
6340 tw32(MAC_MODE
, tp
->mac_mode
);
6341 /* Force link status check */
6342 tg3_setup_phy(tp
, 1);
6343 spin_unlock_bh(&tp
->lock
);
6344 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6348 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6350 struct tg3
*tp
= netdev_priv(dev
);
6352 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6353 features
&= ~NETIF_F_ALL_TSO
;
6358 static int tg3_set_features(struct net_device
*dev
, u32 features
)
6360 u32 changed
= dev
->features
^ features
;
6362 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6363 tg3_set_loopback(dev
, features
);
6368 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6373 if (new_mtu
> ETH_DATA_LEN
) {
6374 if (tg3_flag(tp
, 5780_CLASS
)) {
6375 netdev_update_features(dev
);
6376 tg3_flag_clear(tp
, TSO_CAPABLE
);
6378 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
6381 if (tg3_flag(tp
, 5780_CLASS
)) {
6382 tg3_flag_set(tp
, TSO_CAPABLE
);
6383 netdev_update_features(dev
);
6385 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
6389 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6391 struct tg3
*tp
= netdev_priv(dev
);
6394 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6397 if (!netif_running(dev
)) {
6398 /* We'll just catch it later when the
6401 tg3_set_mtu(dev
, tp
, new_mtu
);
6409 tg3_full_lock(tp
, 1);
6411 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6413 tg3_set_mtu(dev
, tp
, new_mtu
);
6415 err
= tg3_restart_hw(tp
, 0);
6418 tg3_netif_start(tp
);
6420 tg3_full_unlock(tp
);
6428 static void tg3_rx_prodring_free(struct tg3
*tp
,
6429 struct tg3_rx_prodring_set
*tpr
)
6433 if (tpr
!= &tp
->napi
[0].prodring
) {
6434 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6435 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6436 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6439 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
6440 for (i
= tpr
->rx_jmb_cons_idx
;
6441 i
!= tpr
->rx_jmb_prod_idx
;
6442 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6443 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6451 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6452 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6455 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6456 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6457 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6462 /* Initialize rx rings for packet processing.
6464 * The chip has been shut down and the driver detached from
6465 * the networking, so no interrupts or new tx packets will
6466 * end up in the driver. tp->{tx,}lock are held and thus
6469 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6470 struct tg3_rx_prodring_set
*tpr
)
6472 u32 i
, rx_pkt_dma_sz
;
6474 tpr
->rx_std_cons_idx
= 0;
6475 tpr
->rx_std_prod_idx
= 0;
6476 tpr
->rx_jmb_cons_idx
= 0;
6477 tpr
->rx_jmb_prod_idx
= 0;
6479 if (tpr
!= &tp
->napi
[0].prodring
) {
6480 memset(&tpr
->rx_std_buffers
[0], 0,
6481 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6482 if (tpr
->rx_jmb_buffers
)
6483 memset(&tpr
->rx_jmb_buffers
[0], 0,
6484 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6488 /* Zero out all descriptors. */
6489 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6491 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6492 if (tg3_flag(tp
, 5780_CLASS
) &&
6493 tp
->dev
->mtu
> ETH_DATA_LEN
)
6494 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6495 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6497 /* Initialize invariants of the rings, we only set this
6498 * stuff once. This works because the card does not
6499 * write into the rx buffer posting rings.
6501 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6502 struct tg3_rx_buffer_desc
*rxd
;
6504 rxd
= &tpr
->rx_std
[i
];
6505 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6506 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6507 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6508 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6511 /* Now allocate fresh SKBs for each rx ring. */
6512 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6513 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6514 netdev_warn(tp
->dev
,
6515 "Using a smaller RX standard ring. Only "
6516 "%d out of %d buffers were allocated "
6517 "successfully\n", i
, tp
->rx_pending
);
6525 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
6528 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6530 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
6533 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6534 struct tg3_rx_buffer_desc
*rxd
;
6536 rxd
= &tpr
->rx_jmb
[i
].std
;
6537 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6538 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6540 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6541 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6544 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6545 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6546 netdev_warn(tp
->dev
,
6547 "Using a smaller RX jumbo ring. Only %d "
6548 "out of %d buffers were allocated "
6549 "successfully\n", i
, tp
->rx_jumbo_pending
);
6552 tp
->rx_jumbo_pending
= i
;
6561 tg3_rx_prodring_free(tp
, tpr
);
6565 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6566 struct tg3_rx_prodring_set
*tpr
)
6568 kfree(tpr
->rx_std_buffers
);
6569 tpr
->rx_std_buffers
= NULL
;
6570 kfree(tpr
->rx_jmb_buffers
);
6571 tpr
->rx_jmb_buffers
= NULL
;
6573 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6574 tpr
->rx_std
, tpr
->rx_std_mapping
);
6578 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6579 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6584 static int tg3_rx_prodring_init(struct tg3
*tp
,
6585 struct tg3_rx_prodring_set
*tpr
)
6587 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6589 if (!tpr
->rx_std_buffers
)
6592 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6593 TG3_RX_STD_RING_BYTES(tp
),
6594 &tpr
->rx_std_mapping
,
6599 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6600 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6602 if (!tpr
->rx_jmb_buffers
)
6605 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6606 TG3_RX_JMB_RING_BYTES(tp
),
6607 &tpr
->rx_jmb_mapping
,
6616 tg3_rx_prodring_fini(tp
, tpr
);
6620 /* Free up pending packets in all rx/tx rings.
6622 * The chip has been shut down and the driver detached from
6623 * the networking, so no interrupts or new tx packets will
6624 * end up in the driver. tp->{tx,}lock is not held and we are not
6625 * in an interrupt context and thus may sleep.
6627 static void tg3_free_rings(struct tg3
*tp
)
6631 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6632 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6634 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6636 if (!tnapi
->tx_buffers
)
6639 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
6640 struct ring_info
*txp
;
6641 struct sk_buff
*skb
;
6644 txp
= &tnapi
->tx_buffers
[i
];
6652 pci_unmap_single(tp
->pdev
,
6653 dma_unmap_addr(txp
, mapping
),
6660 for (k
= 0; k
< skb_shinfo(skb
)->nr_frags
; k
++) {
6661 txp
= &tnapi
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
6662 pci_unmap_page(tp
->pdev
,
6663 dma_unmap_addr(txp
, mapping
),
6664 skb_shinfo(skb
)->frags
[k
].size
,
6669 dev_kfree_skb_any(skb
);
6674 /* Initialize tx/rx rings for packet processing.
6676 * The chip has been shut down and the driver detached from
6677 * the networking, so no interrupts or new tx packets will
6678 * end up in the driver. tp->{tx,}lock are held and thus
6681 static int tg3_init_rings(struct tg3
*tp
)
6685 /* Free up all the SKBs. */
6688 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6689 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6691 tnapi
->last_tag
= 0;
6692 tnapi
->last_irq_tag
= 0;
6693 tnapi
->hw_status
->status
= 0;
6694 tnapi
->hw_status
->status_tag
= 0;
6695 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6700 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6702 tnapi
->rx_rcb_ptr
= 0;
6704 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6706 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6716 * Must not be invoked with interrupt sources disabled and
6717 * the hardware shutdown down.
6719 static void tg3_free_consistent(struct tg3
*tp
)
6723 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6724 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6726 if (tnapi
->tx_ring
) {
6727 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6728 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6729 tnapi
->tx_ring
= NULL
;
6732 kfree(tnapi
->tx_buffers
);
6733 tnapi
->tx_buffers
= NULL
;
6735 if (tnapi
->rx_rcb
) {
6736 dma_free_coherent(&tp
->pdev
->dev
,
6737 TG3_RX_RCB_RING_BYTES(tp
),
6739 tnapi
->rx_rcb_mapping
);
6740 tnapi
->rx_rcb
= NULL
;
6743 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6745 if (tnapi
->hw_status
) {
6746 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6748 tnapi
->status_mapping
);
6749 tnapi
->hw_status
= NULL
;
6754 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6755 tp
->hw_stats
, tp
->stats_mapping
);
6756 tp
->hw_stats
= NULL
;
6761 * Must not be invoked with interrupt sources disabled and
6762 * the hardware shutdown down. Can sleep.
6764 static int tg3_alloc_consistent(struct tg3
*tp
)
6768 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6769 sizeof(struct tg3_hw_stats
),
6775 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6777 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6778 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6779 struct tg3_hw_status
*sblk
;
6781 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6783 &tnapi
->status_mapping
,
6785 if (!tnapi
->hw_status
)
6788 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6789 sblk
= tnapi
->hw_status
;
6791 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6794 /* If multivector TSS is enabled, vector 0 does not handle
6795 * tx interrupts. Don't allocate any resources for it.
6797 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
6798 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
6799 tnapi
->tx_buffers
= kzalloc(sizeof(struct ring_info
) *
6802 if (!tnapi
->tx_buffers
)
6805 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6807 &tnapi
->tx_desc_mapping
,
6809 if (!tnapi
->tx_ring
)
6814 * When RSS is enabled, the status block format changes
6815 * slightly. The "rx_jumbo_consumer", "reserved",
6816 * and "rx_mini_consumer" members get mapped to the
6817 * other three rx return ring producer indexes.
6821 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6824 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6827 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6830 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6835 * If multivector RSS is enabled, vector 0 does not handle
6836 * rx or tx interrupts. Don't allocate any resources for it.
6838 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
6841 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6842 TG3_RX_RCB_RING_BYTES(tp
),
6843 &tnapi
->rx_rcb_mapping
,
6848 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6854 tg3_free_consistent(tp
);
6858 #define MAX_WAIT_CNT 1000
6860 /* To stop a block, clear the enable bit and poll till it
6861 * clears. tp->lock is held.
6863 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6868 if (tg3_flag(tp
, 5705_PLUS
)) {
6875 /* We can't enable/disable these bits of the
6876 * 5705/5750, just say success.
6889 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6892 if ((val
& enable_bit
) == 0)
6896 if (i
== MAX_WAIT_CNT
&& !silent
) {
6897 dev_err(&tp
->pdev
->dev
,
6898 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6906 /* tp->lock is held. */
6907 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6911 tg3_disable_ints(tp
);
6913 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6914 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6917 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6918 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6919 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6920 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6921 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6922 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6924 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6925 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6926 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6927 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6928 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6929 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6930 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6932 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6933 tw32_f(MAC_MODE
, tp
->mac_mode
);
6936 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6937 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6939 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6941 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6944 if (i
>= MAX_WAIT_CNT
) {
6945 dev_err(&tp
->pdev
->dev
,
6946 "%s timed out, TX_MODE_ENABLE will not clear "
6947 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6951 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6952 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6953 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6955 tw32(FTQ_RESET
, 0xffffffff);
6956 tw32(FTQ_RESET
, 0x00000000);
6958 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6959 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6961 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6962 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6963 if (tnapi
->hw_status
)
6964 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6967 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6972 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6977 /* NCSI does not support APE events */
6978 if (tg3_flag(tp
, APE_HAS_NCSI
))
6981 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6982 if (apedata
!= APE_SEG_SIG_MAGIC
)
6985 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6986 if (!(apedata
& APE_FW_STATUS_READY
))
6989 /* Wait for up to 1 millisecond for APE to service previous event. */
6990 for (i
= 0; i
< 10; i
++) {
6991 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
6994 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
6996 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6997 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
6998 event
| APE_EVENT_STATUS_EVENT_PENDING
);
7000 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
7002 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
7008 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
7009 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
7012 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
7017 if (!tg3_flag(tp
, ENABLE_APE
))
7021 case RESET_KIND_INIT
:
7022 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
7023 APE_HOST_SEG_SIG_MAGIC
);
7024 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
7025 APE_HOST_SEG_LEN_MAGIC
);
7026 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
7027 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
7028 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
7029 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
7030 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
7031 APE_HOST_BEHAV_NO_PHYLOCK
);
7032 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
7033 TG3_APE_HOST_DRVR_STATE_START
);
7035 event
= APE_EVENT_STATUS_STATE_START
;
7037 case RESET_KIND_SHUTDOWN
:
7038 /* With the interface we are currently using,
7039 * APE does not track driver state. Wiping
7040 * out the HOST SEGMENT SIGNATURE forces
7041 * the APE to assume OS absent status.
7043 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
7045 if (device_may_wakeup(&tp
->pdev
->dev
) &&
7046 tg3_flag(tp
, WOL_ENABLE
)) {
7047 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
7048 TG3_APE_HOST_WOL_SPEED_AUTO
);
7049 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
7051 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
7053 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
7055 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
7057 case RESET_KIND_SUSPEND
:
7058 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
7064 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
7066 tg3_ape_send_event(tp
, event
);
7069 /* tp->lock is held. */
7070 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
7072 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
7073 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
7075 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7077 case RESET_KIND_INIT
:
7078 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7082 case RESET_KIND_SHUTDOWN
:
7083 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7087 case RESET_KIND_SUSPEND
:
7088 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7097 if (kind
== RESET_KIND_INIT
||
7098 kind
== RESET_KIND_SUSPEND
)
7099 tg3_ape_driver_state_change(tp
, kind
);
7102 /* tp->lock is held. */
7103 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
7105 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7107 case RESET_KIND_INIT
:
7108 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7109 DRV_STATE_START_DONE
);
7112 case RESET_KIND_SHUTDOWN
:
7113 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7114 DRV_STATE_UNLOAD_DONE
);
7122 if (kind
== RESET_KIND_SHUTDOWN
)
7123 tg3_ape_driver_state_change(tp
, kind
);
7126 /* tp->lock is held. */
7127 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
7129 if (tg3_flag(tp
, ENABLE_ASF
)) {
7131 case RESET_KIND_INIT
:
7132 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7136 case RESET_KIND_SHUTDOWN
:
7137 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7141 case RESET_KIND_SUSPEND
:
7142 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7152 static int tg3_poll_fw(struct tg3
*tp
)
7157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7158 /* Wait up to 20ms for init done. */
7159 for (i
= 0; i
< 200; i
++) {
7160 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
7167 /* Wait for firmware initialization to complete. */
7168 for (i
= 0; i
< 100000; i
++) {
7169 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
7170 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
7175 /* Chip might not be fitted with firmware. Some Sun onboard
7176 * parts are configured like that. So don't signal the timeout
7177 * of the above loop as an error, but do report the lack of
7178 * running firmware once.
7180 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
7181 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
7183 netdev_info(tp
->dev
, "No firmware running\n");
7186 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7187 /* The 57765 A0 needs a little more
7188 * time to do some important work.
7196 /* Save PCI command register before chip reset */
7197 static void tg3_save_pci_state(struct tg3
*tp
)
7199 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7202 /* Restore PCI state after chip reset */
7203 static void tg3_restore_pci_state(struct tg3
*tp
)
7207 /* Re-enable indirect register accesses. */
7208 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7209 tp
->misc_host_ctrl
);
7211 /* Set MAX PCI retry to zero. */
7212 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7213 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7214 tg3_flag(tp
, PCIX_MODE
))
7215 val
|= PCISTATE_RETRY_SAME_DMA
;
7216 /* Allow reads and writes to the APE register and memory space. */
7217 if (tg3_flag(tp
, ENABLE_APE
))
7218 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7219 PCISTATE_ALLOW_APE_SHMEM_WR
|
7220 PCISTATE_ALLOW_APE_PSPACE_WR
;
7221 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7223 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7225 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7226 if (tg3_flag(tp
, PCI_EXPRESS
))
7227 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7229 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7230 tp
->pci_cacheline_sz
);
7231 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7236 /* Make sure PCI-X relaxed ordering bit is clear. */
7237 if (tg3_flag(tp
, PCIX_MODE
)) {
7240 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7242 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7243 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7247 if (tg3_flag(tp
, 5780_CLASS
)) {
7249 /* Chip reset on 5780 will reset MSI enable bit,
7250 * so need to restore it.
7252 if (tg3_flag(tp
, USING_MSI
)) {
7255 pci_read_config_word(tp
->pdev
,
7256 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7258 pci_write_config_word(tp
->pdev
,
7259 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7260 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7261 val
= tr32(MSGINT_MODE
);
7262 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7267 static void tg3_stop_fw(struct tg3
*);
7269 /* tp->lock is held. */
7270 static int tg3_chip_reset(struct tg3
*tp
)
7273 void (*write_op
)(struct tg3
*, u32
, u32
);
7278 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7280 /* No matching tg3_nvram_unlock() after this because
7281 * chip reset below will undo the nvram lock.
7283 tp
->nvram_lock_cnt
= 0;
7285 /* GRC_MISC_CFG core clock reset will clear the memory
7286 * enable bit in PCI register 4 and the MSI enable bit
7287 * on some chips, so we save relevant registers here.
7289 tg3_save_pci_state(tp
);
7291 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7292 tg3_flag(tp
, 5755_PLUS
))
7293 tw32(GRC_FASTBOOT_PC
, 0);
7296 * We must avoid the readl() that normally takes place.
7297 * It locks machines, causes machine checks, and other
7298 * fun things. So, temporarily disable the 5701
7299 * hardware workaround, while we do the reset.
7301 write_op
= tp
->write32
;
7302 if (write_op
== tg3_write_flush_reg32
)
7303 tp
->write32
= tg3_write32
;
7305 /* Prevent the irq handler from reading or writing PCI registers
7306 * during chip reset when the memory enable bit in the PCI command
7307 * register may be cleared. The chip does not generate interrupt
7308 * at this time, but the irq handler may still be called due to irq
7309 * sharing or irqpoll.
7311 tg3_flag_set(tp
, CHIP_RESETTING
);
7312 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7313 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7314 if (tnapi
->hw_status
) {
7315 tnapi
->hw_status
->status
= 0;
7316 tnapi
->hw_status
->status_tag
= 0;
7318 tnapi
->last_tag
= 0;
7319 tnapi
->last_irq_tag
= 0;
7323 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7324 synchronize_irq(tp
->napi
[i
].irq_vec
);
7326 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7327 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7328 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7332 val
= GRC_MISC_CFG_CORECLK_RESET
;
7334 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7335 /* Force PCIe 1.0a mode */
7336 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7337 !tg3_flag(tp
, 57765_PLUS
) &&
7338 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7339 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7340 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7342 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7343 tw32(GRC_MISC_CFG
, (1 << 29));
7348 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7349 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7350 tw32(GRC_VCPU_EXT_CTRL
,
7351 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7354 /* Manage gphy power for all CPMU absent PCIe devices. */
7355 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7356 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7358 tw32(GRC_MISC_CFG
, val
);
7360 /* restore 5701 hardware bug workaround write method */
7361 tp
->write32
= write_op
;
7363 /* Unfortunately, we have to delay before the PCI read back.
7364 * Some 575X chips even will not respond to a PCI cfg access
7365 * when the reset command is given to the chip.
7367 * How do these hardware designers expect things to work
7368 * properly if the PCI write is posted for a long period
7369 * of time? It is always necessary to have some method by
7370 * which a register read back can occur to push the write
7371 * out which does the reset.
7373 * For most tg3 variants the trick below was working.
7378 /* Flush PCI posted writes. The normal MMIO registers
7379 * are inaccessible at this time so this is the only
7380 * way to make this reliably (actually, this is no longer
7381 * the case, see above). I tried to use indirect
7382 * register read/write but this upset some 5701 variants.
7384 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7388 if (tg3_flag(tp
, PCI_EXPRESS
) && tp
->pcie_cap
) {
7391 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7395 /* Wait for link training to complete. */
7396 for (i
= 0; i
< 5000; i
++)
7399 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7400 pci_write_config_dword(tp
->pdev
, 0xc4,
7401 cfg_val
| (1 << 15));
7404 /* Clear the "no snoop" and "relaxed ordering" bits. */
7405 pci_read_config_word(tp
->pdev
,
7406 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7408 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7409 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7411 * Older PCIe devices only support the 128 byte
7412 * MPS setting. Enforce the restriction.
7414 if (!tg3_flag(tp
, CPMU_PRESENT
))
7415 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7416 pci_write_config_word(tp
->pdev
,
7417 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7420 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7422 /* Clear error status */
7423 pci_write_config_word(tp
->pdev
,
7424 tp
->pcie_cap
+ PCI_EXP_DEVSTA
,
7425 PCI_EXP_DEVSTA_CED
|
7426 PCI_EXP_DEVSTA_NFED
|
7427 PCI_EXP_DEVSTA_FED
|
7428 PCI_EXP_DEVSTA_URD
);
7431 tg3_restore_pci_state(tp
);
7433 tg3_flag_clear(tp
, CHIP_RESETTING
);
7434 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7437 if (tg3_flag(tp
, 5780_CLASS
))
7438 val
= tr32(MEMARB_MODE
);
7439 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7441 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7443 tw32(0x5000, 0x400);
7446 tw32(GRC_MODE
, tp
->grc_mode
);
7448 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7451 tw32(0xc4, val
| (1 << 15));
7454 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7455 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7456 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7457 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7458 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7459 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7462 if (tg3_flag(tp
, ENABLE_APE
))
7463 tp
->mac_mode
= MAC_MODE_APE_TX_EN
|
7464 MAC_MODE_APE_RX_EN
|
7465 MAC_MODE_TDE_ENABLE
;
7467 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7468 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
7470 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7471 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7476 tw32_f(MAC_MODE
, val
);
7479 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7481 err
= tg3_poll_fw(tp
);
7487 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7488 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7489 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7490 !tg3_flag(tp
, 57765_PLUS
)) {
7493 tw32(0x7c00, val
| (1 << 25));
7496 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7497 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7498 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7501 /* Reprobe ASF enable state. */
7502 tg3_flag_clear(tp
, ENABLE_ASF
);
7503 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7504 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7505 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7508 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7509 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7510 tg3_flag_set(tp
, ENABLE_ASF
);
7511 tp
->last_event_jiffies
= jiffies
;
7512 if (tg3_flag(tp
, 5750_PLUS
))
7513 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7520 /* tp->lock is held. */
7521 static void tg3_stop_fw(struct tg3
*tp
)
7523 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
7524 /* Wait for RX cpu to ACK the previous event. */
7525 tg3_wait_for_event_ack(tp
);
7527 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7529 tg3_generate_fw_event(tp
);
7531 /* Wait for RX cpu to ACK this event. */
7532 tg3_wait_for_event_ack(tp
);
7536 /* tp->lock is held. */
7537 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7543 tg3_write_sig_pre_reset(tp
, kind
);
7545 tg3_abort_hw(tp
, silent
);
7546 err
= tg3_chip_reset(tp
);
7548 __tg3_set_mac_addr(tp
, 0);
7550 tg3_write_sig_legacy(tp
, kind
);
7551 tg3_write_sig_post_reset(tp
, kind
);
7559 #define RX_CPU_SCRATCH_BASE 0x30000
7560 #define RX_CPU_SCRATCH_SIZE 0x04000
7561 #define TX_CPU_SCRATCH_BASE 0x34000
7562 #define TX_CPU_SCRATCH_SIZE 0x04000
7564 /* tp->lock is held. */
7565 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7569 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
7571 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7572 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7574 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7577 if (offset
== RX_CPU_BASE
) {
7578 for (i
= 0; i
< 10000; i
++) {
7579 tw32(offset
+ CPU_STATE
, 0xffffffff);
7580 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7581 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7585 tw32(offset
+ CPU_STATE
, 0xffffffff);
7586 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7589 for (i
= 0; i
< 10000; i
++) {
7590 tw32(offset
+ CPU_STATE
, 0xffffffff);
7591 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7592 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7598 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7599 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7603 /* Clear firmware's nvram arbitration. */
7604 if (tg3_flag(tp
, NVRAM
))
7605 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7610 unsigned int fw_base
;
7611 unsigned int fw_len
;
7612 const __be32
*fw_data
;
7615 /* tp->lock is held. */
7616 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7617 int cpu_scratch_size
, struct fw_info
*info
)
7619 int err
, lock_err
, i
;
7620 void (*write_op
)(struct tg3
*, u32
, u32
);
7622 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
7624 "%s: Trying to load TX cpu firmware which is 5705\n",
7629 if (tg3_flag(tp
, 5705_PLUS
))
7630 write_op
= tg3_write_mem
;
7632 write_op
= tg3_write_indirect_reg32
;
7634 /* It is possible that bootcode is still loading at this point.
7635 * Get the nvram lock first before halting the cpu.
7637 lock_err
= tg3_nvram_lock(tp
);
7638 err
= tg3_halt_cpu(tp
, cpu_base
);
7640 tg3_nvram_unlock(tp
);
7644 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7645 write_op(tp
, cpu_scratch_base
+ i
, 0);
7646 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7647 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7648 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7649 write_op(tp
, (cpu_scratch_base
+
7650 (info
->fw_base
& 0xffff) +
7652 be32_to_cpu(info
->fw_data
[i
]));
7660 /* tp->lock is held. */
7661 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7663 struct fw_info info
;
7664 const __be32
*fw_data
;
7667 fw_data
= (void *)tp
->fw
->data
;
7669 /* Firmware blob starts with version numbers, followed by
7670 start address and length. We are setting complete length.
7671 length = end_address_of_bss - start_address_of_text.
7672 Remainder is the blob to be loaded contiguously
7673 from start address. */
7675 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7676 info
.fw_len
= tp
->fw
->size
- 12;
7677 info
.fw_data
= &fw_data
[3];
7679 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7680 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7685 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7686 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7691 /* Now startup only the RX cpu. */
7692 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7693 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7695 for (i
= 0; i
< 5; i
++) {
7696 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7698 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7699 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7700 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7704 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7705 "should be %08x\n", __func__
,
7706 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7709 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7710 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7715 /* tp->lock is held. */
7716 static int tg3_load_tso_firmware(struct tg3
*tp
)
7718 struct fw_info info
;
7719 const __be32
*fw_data
;
7720 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7723 if (tg3_flag(tp
, HW_TSO_1
) ||
7724 tg3_flag(tp
, HW_TSO_2
) ||
7725 tg3_flag(tp
, HW_TSO_3
))
7728 fw_data
= (void *)tp
->fw
->data
;
7730 /* Firmware blob starts with version numbers, followed by
7731 start address and length. We are setting complete length.
7732 length = end_address_of_bss - start_address_of_text.
7733 Remainder is the blob to be loaded contiguously
7734 from start address. */
7736 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7737 cpu_scratch_size
= tp
->fw_len
;
7738 info
.fw_len
= tp
->fw
->size
- 12;
7739 info
.fw_data
= &fw_data
[3];
7741 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7742 cpu_base
= RX_CPU_BASE
;
7743 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7745 cpu_base
= TX_CPU_BASE
;
7746 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7747 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7750 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7751 cpu_scratch_base
, cpu_scratch_size
,
7756 /* Now startup the cpu. */
7757 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7758 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7760 for (i
= 0; i
< 5; i
++) {
7761 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7763 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7764 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7765 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7770 "%s fails to set CPU PC, is %08x should be %08x\n",
7771 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7774 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7775 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7780 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7782 struct tg3
*tp
= netdev_priv(dev
);
7783 struct sockaddr
*addr
= p
;
7784 int err
= 0, skip_mac_1
= 0;
7786 if (!is_valid_ether_addr(addr
->sa_data
))
7789 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7791 if (!netif_running(dev
))
7794 if (tg3_flag(tp
, ENABLE_ASF
)) {
7795 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7797 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7798 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7799 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7800 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7802 /* Skip MAC addr 1 if ASF is using it. */
7803 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7804 !(addr1_high
== 0 && addr1_low
== 0))
7807 spin_lock_bh(&tp
->lock
);
7808 __tg3_set_mac_addr(tp
, skip_mac_1
);
7809 spin_unlock_bh(&tp
->lock
);
7814 /* tp->lock is held. */
7815 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7816 dma_addr_t mapping
, u32 maxlen_flags
,
7820 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7821 ((u64
) mapping
>> 32));
7823 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7824 ((u64
) mapping
& 0xffffffff));
7826 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7829 if (!tg3_flag(tp
, 5705_PLUS
))
7831 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7835 static void __tg3_set_rx_mode(struct net_device
*);
7836 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7840 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7841 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7842 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7843 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7845 tw32(HOSTCC_TXCOL_TICKS
, 0);
7846 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7847 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7850 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7851 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7852 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7853 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7855 tw32(HOSTCC_RXCOL_TICKS
, 0);
7856 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7857 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7860 if (!tg3_flag(tp
, 5705_PLUS
)) {
7861 u32 val
= ec
->stats_block_coalesce_usecs
;
7863 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7864 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7866 if (!netif_carrier_ok(tp
->dev
))
7869 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7872 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7875 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7876 tw32(reg
, ec
->rx_coalesce_usecs
);
7877 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7878 tw32(reg
, ec
->rx_max_coalesced_frames
);
7879 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7880 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7882 if (tg3_flag(tp
, ENABLE_TSS
)) {
7883 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7884 tw32(reg
, ec
->tx_coalesce_usecs
);
7885 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7886 tw32(reg
, ec
->tx_max_coalesced_frames
);
7887 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7888 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7892 for (; i
< tp
->irq_max
- 1; i
++) {
7893 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7894 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7895 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7897 if (tg3_flag(tp
, ENABLE_TSS
)) {
7898 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7899 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7900 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7905 /* tp->lock is held. */
7906 static void tg3_rings_reset(struct tg3
*tp
)
7909 u32 stblk
, txrcb
, rxrcb
, limit
;
7910 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7912 /* Disable all transmit rings but the first. */
7913 if (!tg3_flag(tp
, 5705_PLUS
))
7914 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7915 else if (tg3_flag(tp
, 5717_PLUS
))
7916 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7917 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7918 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7920 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7922 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7923 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7924 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7925 BDINFO_FLAGS_DISABLED
);
7928 /* Disable all receive return rings but the first. */
7929 if (tg3_flag(tp
, 5717_PLUS
))
7930 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7931 else if (!tg3_flag(tp
, 5705_PLUS
))
7932 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7933 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7934 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7935 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7937 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7939 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7940 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7941 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7942 BDINFO_FLAGS_DISABLED
);
7944 /* Disable interrupts */
7945 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7947 /* Zero mailbox registers. */
7948 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
7949 for (i
= 1; i
< tp
->irq_max
; i
++) {
7950 tp
->napi
[i
].tx_prod
= 0;
7951 tp
->napi
[i
].tx_cons
= 0;
7952 if (tg3_flag(tp
, ENABLE_TSS
))
7953 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7954 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7955 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7957 if (!tg3_flag(tp
, ENABLE_TSS
))
7958 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7960 tp
->napi
[0].tx_prod
= 0;
7961 tp
->napi
[0].tx_cons
= 0;
7962 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7963 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7966 /* Make sure the NIC-based send BD rings are disabled. */
7967 if (!tg3_flag(tp
, 5705_PLUS
)) {
7968 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7969 for (i
= 0; i
< 16; i
++)
7970 tw32_tx_mbox(mbox
+ i
* 8, 0);
7973 txrcb
= NIC_SRAM_SEND_RCB
;
7974 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7976 /* Clear status block in ram. */
7977 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7979 /* Set status block DMA address */
7980 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7981 ((u64
) tnapi
->status_mapping
>> 32));
7982 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7983 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7985 if (tnapi
->tx_ring
) {
7986 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7987 (TG3_TX_RING_SIZE
<<
7988 BDINFO_FLAGS_MAXLEN_SHIFT
),
7989 NIC_SRAM_TX_BUFFER_DESC
);
7990 txrcb
+= TG3_BDINFO_SIZE
;
7993 if (tnapi
->rx_rcb
) {
7994 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7995 (tp
->rx_ret_ring_mask
+ 1) <<
7996 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
7997 rxrcb
+= TG3_BDINFO_SIZE
;
8000 stblk
= HOSTCC_STATBLCK_RING1
;
8002 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8003 u64 mapping
= (u64
)tnapi
->status_mapping
;
8004 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8005 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8007 /* Clear status block in ram. */
8008 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8010 if (tnapi
->tx_ring
) {
8011 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8012 (TG3_TX_RING_SIZE
<<
8013 BDINFO_FLAGS_MAXLEN_SHIFT
),
8014 NIC_SRAM_TX_BUFFER_DESC
);
8015 txrcb
+= TG3_BDINFO_SIZE
;
8018 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8019 ((tp
->rx_ret_ring_mask
+ 1) <<
8020 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8023 rxrcb
+= TG3_BDINFO_SIZE
;
8027 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8029 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8031 if (!tg3_flag(tp
, 5750_PLUS
) ||
8032 tg3_flag(tp
, 5780_CLASS
) ||
8033 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8034 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8035 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8036 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8037 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8038 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8040 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8042 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8043 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8045 val
= min(nic_rep_thresh
, host_rep_thresh
);
8046 tw32(RCVBDI_STD_THRESH
, val
);
8048 if (tg3_flag(tp
, 57765_PLUS
))
8049 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8051 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8054 if (!tg3_flag(tp
, 5705_PLUS
))
8055 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8057 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
8059 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8061 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8062 tw32(RCVBDI_JUMBO_THRESH
, val
);
8064 if (tg3_flag(tp
, 57765_PLUS
))
8065 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8068 /* tp->lock is held. */
8069 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8071 u32 val
, rdmac_mode
;
8073 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8075 tg3_disable_ints(tp
);
8079 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8081 if (tg3_flag(tp
, INIT_COMPLETE
))
8082 tg3_abort_hw(tp
, 1);
8084 /* Enable MAC control of LPI */
8085 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8086 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8087 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8088 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8090 tw32_f(TG3_CPMU_EEE_CTRL
,
8091 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8093 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8094 TG3_CPMU_EEEMD_LPI_IN_TX
|
8095 TG3_CPMU_EEEMD_LPI_IN_RX
|
8096 TG3_CPMU_EEEMD_EEE_ENABLE
;
8098 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8099 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8101 if (tg3_flag(tp
, ENABLE_APE
))
8102 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8104 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8106 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8107 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8108 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8110 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8111 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8112 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8118 err
= tg3_chip_reset(tp
);
8122 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8124 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8125 val
= tr32(TG3_CPMU_CTRL
);
8126 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8127 tw32(TG3_CPMU_CTRL
, val
);
8129 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8130 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8131 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8132 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8134 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8135 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8136 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8137 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8139 val
= tr32(TG3_CPMU_HST_ACC
);
8140 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8141 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8142 tw32(TG3_CPMU_HST_ACC
, val
);
8145 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8146 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8147 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8148 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8149 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8151 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8152 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8154 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8156 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8157 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8160 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8161 u32 grc_mode
= tr32(GRC_MODE
);
8163 /* Access the lower 1K of PL PCIE block registers. */
8164 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8165 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8167 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8168 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8169 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8171 tw32(GRC_MODE
, grc_mode
);
8174 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
8175 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8176 u32 grc_mode
= tr32(GRC_MODE
);
8178 /* Access the lower 1K of PL PCIE block registers. */
8179 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8180 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8182 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8183 TG3_PCIE_PL_LO_PHYCTL5
);
8184 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8185 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8187 tw32(GRC_MODE
, grc_mode
);
8190 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8191 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8192 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8193 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8196 /* This works around an issue with Athlon chipsets on
8197 * B3 tigon3 silicon. This bit has no effect on any
8198 * other revision. But do not set this on PCI Express
8199 * chips and don't even touch the clocks if the CPMU is present.
8201 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8202 if (!tg3_flag(tp
, PCI_EXPRESS
))
8203 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8204 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8207 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8208 tg3_flag(tp
, PCIX_MODE
)) {
8209 val
= tr32(TG3PCI_PCISTATE
);
8210 val
|= PCISTATE_RETRY_SAME_DMA
;
8211 tw32(TG3PCI_PCISTATE
, val
);
8214 if (tg3_flag(tp
, ENABLE_APE
)) {
8215 /* Allow reads and writes to the
8216 * APE register and memory space.
8218 val
= tr32(TG3PCI_PCISTATE
);
8219 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8220 PCISTATE_ALLOW_APE_SHMEM_WR
|
8221 PCISTATE_ALLOW_APE_PSPACE_WR
;
8222 tw32(TG3PCI_PCISTATE
, val
);
8225 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8226 /* Enable some hw fixes. */
8227 val
= tr32(TG3PCI_MSI_DATA
);
8228 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8229 tw32(TG3PCI_MSI_DATA
, val
);
8232 /* Descriptor ring init may make accesses to the
8233 * NIC SRAM area to setup the TX descriptors, so we
8234 * can only do this after the hardware has been
8235 * successfully reset.
8237 err
= tg3_init_rings(tp
);
8241 if (tg3_flag(tp
, 57765_PLUS
)) {
8242 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8243 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8244 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8245 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8246 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8247 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8248 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8249 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8250 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8251 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8252 /* This value is determined during the probe time DMA
8253 * engine test, tg3_test_dma.
8255 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8258 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8259 GRC_MODE_4X_NIC_SEND_RINGS
|
8260 GRC_MODE_NO_TX_PHDR_CSUM
|
8261 GRC_MODE_NO_RX_PHDR_CSUM
);
8262 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8264 /* Pseudo-header checksum is done by hardware logic and not
8265 * the offload processers, so make the chip do the pseudo-
8266 * header checksums on receive. For transmit it is more
8267 * convenient to do the pseudo-header checksum in software
8268 * as Linux does that on transmit for us in all cases.
8270 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8274 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8276 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8277 val
= tr32(GRC_MISC_CFG
);
8279 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8280 tw32(GRC_MISC_CFG
, val
);
8282 /* Initialize MBUF/DESC pool. */
8283 if (tg3_flag(tp
, 5750_PLUS
)) {
8285 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8286 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8287 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8288 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8290 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8291 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8292 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8293 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8296 fw_len
= tp
->fw_len
;
8297 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8298 tw32(BUFMGR_MB_POOL_ADDR
,
8299 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8300 tw32(BUFMGR_MB_POOL_SIZE
,
8301 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8304 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8305 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8306 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8307 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8308 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8309 tw32(BUFMGR_MB_HIGH_WATER
,
8310 tp
->bufmgr_config
.mbuf_high_water
);
8312 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8313 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8314 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8315 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8316 tw32(BUFMGR_MB_HIGH_WATER
,
8317 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8319 tw32(BUFMGR_DMA_LOW_WATER
,
8320 tp
->bufmgr_config
.dma_low_water
);
8321 tw32(BUFMGR_DMA_HIGH_WATER
,
8322 tp
->bufmgr_config
.dma_high_water
);
8324 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8325 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8326 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8327 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8328 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8329 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8330 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8331 tw32(BUFMGR_MODE
, val
);
8332 for (i
= 0; i
< 2000; i
++) {
8333 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8338 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8342 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8343 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8345 tg3_setup_rxbd_thresholds(tp
);
8347 /* Initialize TG3_BDINFO's at:
8348 * RCVDBDI_STD_BD: standard eth size rx ring
8349 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8350 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8353 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8354 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8355 * ring attribute flags
8356 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8358 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8359 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8361 * The size of each ring is fixed in the firmware, but the location is
8364 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8365 ((u64
) tpr
->rx_std_mapping
>> 32));
8366 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8367 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8368 if (!tg3_flag(tp
, 5717_PLUS
))
8369 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8370 NIC_SRAM_RX_BUFFER_DESC
);
8372 /* Disable the mini ring */
8373 if (!tg3_flag(tp
, 5705_PLUS
))
8374 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8375 BDINFO_FLAGS_DISABLED
);
8377 /* Program the jumbo buffer descriptor ring control
8378 * blocks on those devices that have them.
8380 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8381 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8383 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8384 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8385 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8386 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8387 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8388 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8389 BDINFO_FLAGS_MAXLEN_SHIFT
;
8390 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8391 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8392 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8393 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8394 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8395 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8397 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8398 BDINFO_FLAGS_DISABLED
);
8401 if (tg3_flag(tp
, 57765_PLUS
)) {
8402 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8403 val
= TG3_RX_STD_MAX_SIZE_5700
;
8405 val
= TG3_RX_STD_MAX_SIZE_5717
;
8406 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8407 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8409 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8411 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8413 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8415 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8416 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8418 tpr
->rx_jmb_prod_idx
=
8419 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8420 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8422 tg3_rings_reset(tp
);
8424 /* Initialize MAC address and backoff seed. */
8425 __tg3_set_mac_addr(tp
, 0);
8427 /* MTU + ethernet header + FCS + optional VLAN tag */
8428 tw32(MAC_RX_MTU_SIZE
,
8429 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8431 /* The slot time is changed by tg3_setup_phy if we
8432 * run at gigabit with half duplex.
8434 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8435 (6 << TX_LENGTHS_IPG_SHIFT
) |
8436 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8438 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8439 val
|= tr32(MAC_TX_LENGTHS
) &
8440 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8441 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8443 tw32(MAC_TX_LENGTHS
, val
);
8445 /* Receive rules. */
8446 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8447 tw32(RCVLPC_CONFIG
, 0x0181);
8449 /* Calculate RDMAC_MODE setting early, we need it to determine
8450 * the RCVLPC_STATE_ENABLE mask.
8452 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8453 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8454 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8455 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8456 RDMAC_MODE_LNGREAD_ENAB
);
8458 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8459 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8461 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8462 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8463 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8464 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8465 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8466 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8468 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8469 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8470 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8471 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8472 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8473 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8474 !tg3_flag(tp
, IS_5788
)) {
8475 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8479 if (tg3_flag(tp
, PCI_EXPRESS
))
8480 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8482 if (tg3_flag(tp
, HW_TSO_1
) ||
8483 tg3_flag(tp
, HW_TSO_2
) ||
8484 tg3_flag(tp
, HW_TSO_3
))
8485 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8487 if (tg3_flag(tp
, HW_TSO_3
) ||
8488 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8489 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8490 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8492 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8493 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8495 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8496 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8497 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8498 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8499 tg3_flag(tp
, 57765_PLUS
)) {
8500 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8501 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8502 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8503 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8504 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8505 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8506 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8507 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8508 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8510 tw32(TG3_RDMA_RSRVCTRL_REG
,
8511 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8514 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8515 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8516 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8517 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8518 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8519 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8522 /* Receive/send statistics. */
8523 if (tg3_flag(tp
, 5750_PLUS
)) {
8524 val
= tr32(RCVLPC_STATS_ENABLE
);
8525 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8526 tw32(RCVLPC_STATS_ENABLE
, val
);
8527 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8528 tg3_flag(tp
, TSO_CAPABLE
)) {
8529 val
= tr32(RCVLPC_STATS_ENABLE
);
8530 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8531 tw32(RCVLPC_STATS_ENABLE
, val
);
8533 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8535 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8536 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8537 tw32(SNDDATAI_STATSCTRL
,
8538 (SNDDATAI_SCTRL_ENABLE
|
8539 SNDDATAI_SCTRL_FASTUPD
));
8541 /* Setup host coalescing engine. */
8542 tw32(HOSTCC_MODE
, 0);
8543 for (i
= 0; i
< 2000; i
++) {
8544 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8549 __tg3_set_coalesce(tp
, &tp
->coal
);
8551 if (!tg3_flag(tp
, 5705_PLUS
)) {
8552 /* Status/statistics block address. See tg3_timer,
8553 * the tg3_periodic_fetch_stats call there, and
8554 * tg3_get_stats to see how this works for 5705/5750 chips.
8556 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8557 ((u64
) tp
->stats_mapping
>> 32));
8558 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8559 ((u64
) tp
->stats_mapping
& 0xffffffff));
8560 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8562 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8564 /* Clear statistics and status block memory areas */
8565 for (i
= NIC_SRAM_STATS_BLK
;
8566 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8568 tg3_write_mem(tp
, i
, 0);
8573 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8575 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8576 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8577 if (!tg3_flag(tp
, 5705_PLUS
))
8578 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8580 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8581 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8582 /* reset to prevent losing 1st rx packet intermittently */
8583 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8587 if (tg3_flag(tp
, ENABLE_APE
))
8588 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8591 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8592 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
8593 if (!tg3_flag(tp
, 5705_PLUS
) &&
8594 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8595 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8596 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8597 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8600 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8601 * If TG3_FLAG_IS_NIC is zero, we should read the
8602 * register to preserve the GPIO settings for LOMs. The GPIOs,
8603 * whether used as inputs or outputs, are set by boot code after
8606 if (!tg3_flag(tp
, IS_NIC
)) {
8609 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8610 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8611 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8613 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8614 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8615 GRC_LCLCTRL_GPIO_OUTPUT3
;
8617 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8618 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8620 tp
->grc_local_ctrl
&= ~gpio_mask
;
8621 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8623 /* GPIO1 must be driven high for eeprom write protect */
8624 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8625 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8626 GRC_LCLCTRL_GPIO_OUTPUT1
);
8628 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8631 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8632 val
= tr32(MSGINT_MODE
);
8633 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8634 tw32(MSGINT_MODE
, val
);
8637 if (!tg3_flag(tp
, 5705_PLUS
)) {
8638 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8642 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8643 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8644 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8645 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8646 WDMAC_MODE_LNGREAD_ENAB
);
8648 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8649 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8650 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8651 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8652 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8654 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8655 !tg3_flag(tp
, IS_5788
)) {
8656 val
|= WDMAC_MODE_RX_ACCEL
;
8660 /* Enable host coalescing bug fix */
8661 if (tg3_flag(tp
, 5755_PLUS
))
8662 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8664 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8665 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8667 tw32_f(WDMAC_MODE
, val
);
8670 if (tg3_flag(tp
, PCIX_MODE
)) {
8673 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8675 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8676 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8677 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8678 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8679 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8680 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8682 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8686 tw32_f(RDMAC_MODE
, rdmac_mode
);
8689 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8690 if (!tg3_flag(tp
, 5705_PLUS
))
8691 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8695 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8697 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8699 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8700 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8701 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8702 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8703 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8704 tw32(RCVDBDI_MODE
, val
);
8705 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8706 if (tg3_flag(tp
, HW_TSO_1
) ||
8707 tg3_flag(tp
, HW_TSO_2
) ||
8708 tg3_flag(tp
, HW_TSO_3
))
8709 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8710 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8711 if (tg3_flag(tp
, ENABLE_TSS
))
8712 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8713 tw32(SNDBDI_MODE
, val
);
8714 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8716 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8717 err
= tg3_load_5701_a0_firmware_fix(tp
);
8722 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8723 err
= tg3_load_tso_firmware(tp
);
8728 tp
->tx_mode
= TX_MODE_ENABLE
;
8730 if (tg3_flag(tp
, 5755_PLUS
) ||
8731 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8732 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8734 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8735 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8736 tp
->tx_mode
&= ~val
;
8737 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8740 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8743 if (tg3_flag(tp
, ENABLE_RSS
)) {
8744 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8745 u8
*ent
= (u8
*)&val
;
8747 /* Setup the indirection table */
8748 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8749 int idx
= i
% sizeof(val
);
8751 ent
[idx
] = i
% (tp
->irq_cnt
- 1);
8752 if (idx
== sizeof(val
) - 1) {
8758 /* Setup the "secret" hash key. */
8759 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8760 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8761 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8762 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8763 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8764 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8765 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8766 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8767 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8768 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8771 tp
->rx_mode
= RX_MODE_ENABLE
;
8772 if (tg3_flag(tp
, 5755_PLUS
))
8773 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8775 if (tg3_flag(tp
, ENABLE_RSS
))
8776 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8777 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8778 RX_MODE_RSS_IPV6_HASH_EN
|
8779 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8780 RX_MODE_RSS_IPV4_HASH_EN
|
8781 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8783 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8786 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8788 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8789 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8790 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8793 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8796 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8797 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8798 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8799 /* Set drive transmission level to 1.2V */
8800 /* only if the signal pre-emphasis bit is not set */
8801 val
= tr32(MAC_SERDES_CFG
);
8804 tw32(MAC_SERDES_CFG
, val
);
8806 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8807 tw32(MAC_SERDES_CFG
, 0x616000);
8810 /* Prevent chip from dropping frames when flow control
8813 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8817 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8819 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8820 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8821 /* Use hardware link auto-negotiation */
8822 tg3_flag_set(tp
, HW_AUTONEG
);
8825 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8826 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
8829 tmp
= tr32(SERDES_RX_CTRL
);
8830 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8831 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8832 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8833 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8836 if (!tg3_flag(tp
, USE_PHYLIB
)) {
8837 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8838 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8839 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8840 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8841 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8844 err
= tg3_setup_phy(tp
, 0);
8848 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8849 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8852 /* Clear CRC stats. */
8853 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8854 tg3_writephy(tp
, MII_TG3_TEST1
,
8855 tmp
| MII_TG3_TEST1_CRC_EN
);
8856 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8861 __tg3_set_rx_mode(tp
->dev
);
8863 /* Initialize receive rules. */
8864 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8865 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8866 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8867 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8869 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
8873 if (tg3_flag(tp
, ENABLE_ASF
))
8877 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8879 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8881 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8883 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8885 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8887 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8889 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8891 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8893 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8895 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8897 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8899 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8901 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8903 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8911 if (tg3_flag(tp
, ENABLE_APE
))
8912 /* Write our heartbeat update interval to APE. */
8913 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8914 APE_HOST_HEARTBEAT_INT_DISABLE
);
8916 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8921 /* Called at device open time to get the chip ready for
8922 * packet processing. Invoked with tp->lock held.
8924 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8926 tg3_switch_clocks(tp
);
8928 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8930 return tg3_reset_hw(tp
, reset_phy
);
8933 #define TG3_STAT_ADD32(PSTAT, REG) \
8934 do { u32 __val = tr32(REG); \
8935 (PSTAT)->low += __val; \
8936 if ((PSTAT)->low < __val) \
8937 (PSTAT)->high += 1; \
8940 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8942 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8944 if (!netif_carrier_ok(tp
->dev
))
8947 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8948 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8949 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8950 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8951 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8952 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8953 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8954 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8955 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8956 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8957 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8958 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8959 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8961 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8962 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
8963 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
8964 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
8965 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
8966 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
8967 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
8968 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
8969 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
8970 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
8971 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
8972 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
8973 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
8974 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
8976 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
8977 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
) {
8978 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
8980 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
8981 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
8983 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
8984 sp
->rx_discards
.low
+= val
;
8985 if (sp
->rx_discards
.low
< val
)
8986 sp
->rx_discards
.high
+= 1;
8988 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
8990 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
8993 static void tg3_timer(unsigned long __opaque
)
8995 struct tg3
*tp
= (struct tg3
*) __opaque
;
9000 spin_lock(&tp
->lock
);
9002 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9003 /* All of this garbage is because when using non-tagged
9004 * IRQ status the mailbox/status_block protocol the chip
9005 * uses with the cpu is race prone.
9007 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9008 tw32(GRC_LOCAL_CTRL
,
9009 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9011 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9012 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9015 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9016 tg3_flag_set(tp
, RESTART_TIMER
);
9017 spin_unlock(&tp
->lock
);
9018 schedule_work(&tp
->reset_task
);
9023 /* This part only runs once per second. */
9024 if (!--tp
->timer_counter
) {
9025 if (tg3_flag(tp
, 5705_PLUS
))
9026 tg3_periodic_fetch_stats(tp
);
9028 if (tp
->setlpicnt
&& !--tp
->setlpicnt
) {
9029 u32 val
= tr32(TG3_CPMU_EEE_MODE
);
9030 tw32(TG3_CPMU_EEE_MODE
,
9031 val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
9034 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9038 mac_stat
= tr32(MAC_STATUS
);
9041 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9042 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9044 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9048 tg3_setup_phy(tp
, 0);
9049 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9050 u32 mac_stat
= tr32(MAC_STATUS
);
9053 if (netif_carrier_ok(tp
->dev
) &&
9054 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9057 if (!netif_carrier_ok(tp
->dev
) &&
9058 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9059 MAC_STATUS_SIGNAL_DET
))) {
9063 if (!tp
->serdes_counter
) {
9066 ~MAC_MODE_PORT_MODE_MASK
));
9068 tw32_f(MAC_MODE
, tp
->mac_mode
);
9071 tg3_setup_phy(tp
, 0);
9073 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9074 tg3_flag(tp
, 5780_CLASS
)) {
9075 tg3_serdes_parallel_detect(tp
);
9078 tp
->timer_counter
= tp
->timer_multiplier
;
9081 /* Heartbeat is only sent once every 2 seconds.
9083 * The heartbeat is to tell the ASF firmware that the host
9084 * driver is still alive. In the event that the OS crashes,
9085 * ASF needs to reset the hardware to free up the FIFO space
9086 * that may be filled with rx packets destined for the host.
9087 * If the FIFO is full, ASF will no longer function properly.
9089 * Unintended resets have been reported on real time kernels
9090 * where the timer doesn't run on time. Netpoll will also have
9093 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9094 * to check the ring condition when the heartbeat is expiring
9095 * before doing the reset. This will prevent most unintended
9098 if (!--tp
->asf_counter
) {
9099 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9100 tg3_wait_for_event_ack(tp
);
9102 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9103 FWCMD_NICDRV_ALIVE3
);
9104 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9105 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9106 TG3_FW_UPDATE_TIMEOUT_SEC
);
9108 tg3_generate_fw_event(tp
);
9110 tp
->asf_counter
= tp
->asf_multiplier
;
9113 spin_unlock(&tp
->lock
);
9116 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9117 add_timer(&tp
->timer
);
9120 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9123 unsigned long flags
;
9125 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9127 if (tp
->irq_cnt
== 1)
9128 name
= tp
->dev
->name
;
9130 name
= &tnapi
->irq_lbl
[0];
9131 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9132 name
[IFNAMSIZ
-1] = 0;
9135 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9137 if (tg3_flag(tp
, 1SHOT_MSI
))
9142 if (tg3_flag(tp
, TAGGED_STATUS
))
9143 fn
= tg3_interrupt_tagged
;
9144 flags
= IRQF_SHARED
;
9147 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9150 static int tg3_test_interrupt(struct tg3
*tp
)
9152 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9153 struct net_device
*dev
= tp
->dev
;
9154 int err
, i
, intr_ok
= 0;
9157 if (!netif_running(dev
))
9160 tg3_disable_ints(tp
);
9162 free_irq(tnapi
->irq_vec
, tnapi
);
9165 * Turn off MSI one shot mode. Otherwise this test has no
9166 * observable way to know whether the interrupt was delivered.
9168 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9169 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9170 tw32(MSGINT_MODE
, val
);
9173 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9174 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9178 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9179 tg3_enable_ints(tp
);
9181 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9184 for (i
= 0; i
< 5; i
++) {
9185 u32 int_mbox
, misc_host_ctrl
;
9187 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9188 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9190 if ((int_mbox
!= 0) ||
9191 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9199 tg3_disable_ints(tp
);
9201 free_irq(tnapi
->irq_vec
, tnapi
);
9203 err
= tg3_request_irq(tp
, 0);
9209 /* Reenable MSI one shot mode. */
9210 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9211 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9212 tw32(MSGINT_MODE
, val
);
9220 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9221 * successfully restored
9223 static int tg3_test_msi(struct tg3
*tp
)
9228 if (!tg3_flag(tp
, USING_MSI
))
9231 /* Turn off SERR reporting in case MSI terminates with Master
9234 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9235 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9236 pci_cmd
& ~PCI_COMMAND_SERR
);
9238 err
= tg3_test_interrupt(tp
);
9240 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9245 /* other failures */
9249 /* MSI test failed, go back to INTx mode */
9250 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9251 "to INTx mode. Please report this failure to the PCI "
9252 "maintainer and include system chipset information\n");
9254 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9256 pci_disable_msi(tp
->pdev
);
9258 tg3_flag_clear(tp
, USING_MSI
);
9259 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9261 err
= tg3_request_irq(tp
, 0);
9265 /* Need to reset the chip because the MSI cycle may have terminated
9266 * with Master Abort.
9268 tg3_full_lock(tp
, 1);
9270 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9271 err
= tg3_init_hw(tp
, 1);
9273 tg3_full_unlock(tp
);
9276 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9281 static int tg3_request_firmware(struct tg3
*tp
)
9283 const __be32
*fw_data
;
9285 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9286 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9291 fw_data
= (void *)tp
->fw
->data
;
9293 /* Firmware blob starts with version numbers, followed by
9294 * start address and _full_ length including BSS sections
9295 * (which must be longer than the actual data, of course
9298 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9299 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9300 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9301 tp
->fw_len
, tp
->fw_needed
);
9302 release_firmware(tp
->fw
);
9307 /* We no longer need firmware; we have it. */
9308 tp
->fw_needed
= NULL
;
9312 static bool tg3_enable_msix(struct tg3
*tp
)
9314 int i
, rc
, cpus
= num_online_cpus();
9315 struct msix_entry msix_ent
[tp
->irq_max
];
9318 /* Just fallback to the simpler MSI mode. */
9322 * We want as many rx rings enabled as there are cpus.
9323 * The first MSIX vector only deals with link interrupts, etc,
9324 * so we add one to the number of vectors we are requesting.
9326 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9328 for (i
= 0; i
< tp
->irq_max
; i
++) {
9329 msix_ent
[i
].entry
= i
;
9330 msix_ent
[i
].vector
= 0;
9333 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9336 } else if (rc
!= 0) {
9337 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9339 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9344 for (i
= 0; i
< tp
->irq_max
; i
++)
9345 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9347 netif_set_real_num_tx_queues(tp
->dev
, 1);
9348 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9349 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9350 pci_disable_msix(tp
->pdev
);
9354 if (tp
->irq_cnt
> 1) {
9355 tg3_flag_set(tp
, ENABLE_RSS
);
9357 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9358 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9359 tg3_flag_set(tp
, ENABLE_TSS
);
9360 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9367 static void tg3_ints_init(struct tg3
*tp
)
9369 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9370 !tg3_flag(tp
, TAGGED_STATUS
)) {
9371 /* All MSI supporting chips should support tagged
9372 * status. Assert that this is the case.
9374 netdev_warn(tp
->dev
,
9375 "MSI without TAGGED_STATUS? Not using MSI\n");
9379 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9380 tg3_flag_set(tp
, USING_MSIX
);
9381 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9382 tg3_flag_set(tp
, USING_MSI
);
9384 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9385 u32 msi_mode
= tr32(MSGINT_MODE
);
9386 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9387 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9388 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9391 if (!tg3_flag(tp
, USING_MSIX
)) {
9393 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9394 netif_set_real_num_tx_queues(tp
->dev
, 1);
9395 netif_set_real_num_rx_queues(tp
->dev
, 1);
9399 static void tg3_ints_fini(struct tg3
*tp
)
9401 if (tg3_flag(tp
, USING_MSIX
))
9402 pci_disable_msix(tp
->pdev
);
9403 else if (tg3_flag(tp
, USING_MSI
))
9404 pci_disable_msi(tp
->pdev
);
9405 tg3_flag_clear(tp
, USING_MSI
);
9406 tg3_flag_clear(tp
, USING_MSIX
);
9407 tg3_flag_clear(tp
, ENABLE_RSS
);
9408 tg3_flag_clear(tp
, ENABLE_TSS
);
9411 static int tg3_open(struct net_device
*dev
)
9413 struct tg3
*tp
= netdev_priv(dev
);
9416 if (tp
->fw_needed
) {
9417 err
= tg3_request_firmware(tp
);
9418 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9422 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9423 tg3_flag_clear(tp
, TSO_CAPABLE
);
9424 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9425 netdev_notice(tp
->dev
, "TSO capability restored\n");
9426 tg3_flag_set(tp
, TSO_CAPABLE
);
9430 netif_carrier_off(tp
->dev
);
9432 err
= tg3_power_up(tp
);
9436 tg3_full_lock(tp
, 0);
9438 tg3_disable_ints(tp
);
9439 tg3_flag_clear(tp
, INIT_COMPLETE
);
9441 tg3_full_unlock(tp
);
9444 * Setup interrupts first so we know how
9445 * many NAPI resources to allocate
9449 /* The placement of this call is tied
9450 * to the setup and use of Host TX descriptors.
9452 err
= tg3_alloc_consistent(tp
);
9458 tg3_napi_enable(tp
);
9460 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9461 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9462 err
= tg3_request_irq(tp
, i
);
9464 for (i
--; i
>= 0; i
--)
9465 free_irq(tnapi
->irq_vec
, tnapi
);
9473 tg3_full_lock(tp
, 0);
9475 err
= tg3_init_hw(tp
, 1);
9477 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9480 if (tg3_flag(tp
, TAGGED_STATUS
))
9481 tp
->timer_offset
= HZ
;
9483 tp
->timer_offset
= HZ
/ 10;
9485 BUG_ON(tp
->timer_offset
> HZ
);
9486 tp
->timer_counter
= tp
->timer_multiplier
=
9487 (HZ
/ tp
->timer_offset
);
9488 tp
->asf_counter
= tp
->asf_multiplier
=
9489 ((HZ
/ tp
->timer_offset
) * 2);
9491 init_timer(&tp
->timer
);
9492 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9493 tp
->timer
.data
= (unsigned long) tp
;
9494 tp
->timer
.function
= tg3_timer
;
9497 tg3_full_unlock(tp
);
9502 if (tg3_flag(tp
, USING_MSI
)) {
9503 err
= tg3_test_msi(tp
);
9506 tg3_full_lock(tp
, 0);
9507 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9509 tg3_full_unlock(tp
);
9514 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9515 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9517 tw32(PCIE_TRANSACTION_CFG
,
9518 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9524 tg3_full_lock(tp
, 0);
9526 add_timer(&tp
->timer
);
9527 tg3_flag_set(tp
, INIT_COMPLETE
);
9528 tg3_enable_ints(tp
);
9530 tg3_full_unlock(tp
);
9532 netif_tx_start_all_queues(dev
);
9535 * Reset loopback feature if it was turned on while the device was down
9536 * make sure that it's installed properly now.
9538 if (dev
->features
& NETIF_F_LOOPBACK
)
9539 tg3_set_loopback(dev
, dev
->features
);
9544 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9545 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9546 free_irq(tnapi
->irq_vec
, tnapi
);
9550 tg3_napi_disable(tp
);
9552 tg3_free_consistent(tp
);
9559 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9560 struct rtnl_link_stats64
*);
9561 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9563 static int tg3_close(struct net_device
*dev
)
9566 struct tg3
*tp
= netdev_priv(dev
);
9568 tg3_napi_disable(tp
);
9569 cancel_work_sync(&tp
->reset_task
);
9571 netif_tx_stop_all_queues(dev
);
9573 del_timer_sync(&tp
->timer
);
9577 tg3_full_lock(tp
, 1);
9579 tg3_disable_ints(tp
);
9581 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9583 tg3_flag_clear(tp
, INIT_COMPLETE
);
9585 tg3_full_unlock(tp
);
9587 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9588 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9589 free_irq(tnapi
->irq_vec
, tnapi
);
9594 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9596 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9597 sizeof(tp
->estats_prev
));
9601 tg3_free_consistent(tp
);
9605 netif_carrier_off(tp
->dev
);
9610 static inline u64
get_stat64(tg3_stat64_t
*val
)
9612 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9615 static u64
calc_crc_errors(struct tg3
*tp
)
9617 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9619 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9620 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9621 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9624 spin_lock_bh(&tp
->lock
);
9625 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9626 tg3_writephy(tp
, MII_TG3_TEST1
,
9627 val
| MII_TG3_TEST1_CRC_EN
);
9628 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9631 spin_unlock_bh(&tp
->lock
);
9633 tp
->phy_crc_errors
+= val
;
9635 return tp
->phy_crc_errors
;
9638 return get_stat64(&hw_stats
->rx_fcs_errors
);
9641 #define ESTAT_ADD(member) \
9642 estats->member = old_estats->member + \
9643 get_stat64(&hw_stats->member)
9645 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9647 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9648 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9649 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9654 ESTAT_ADD(rx_octets
);
9655 ESTAT_ADD(rx_fragments
);
9656 ESTAT_ADD(rx_ucast_packets
);
9657 ESTAT_ADD(rx_mcast_packets
);
9658 ESTAT_ADD(rx_bcast_packets
);
9659 ESTAT_ADD(rx_fcs_errors
);
9660 ESTAT_ADD(rx_align_errors
);
9661 ESTAT_ADD(rx_xon_pause_rcvd
);
9662 ESTAT_ADD(rx_xoff_pause_rcvd
);
9663 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9664 ESTAT_ADD(rx_xoff_entered
);
9665 ESTAT_ADD(rx_frame_too_long_errors
);
9666 ESTAT_ADD(rx_jabbers
);
9667 ESTAT_ADD(rx_undersize_packets
);
9668 ESTAT_ADD(rx_in_length_errors
);
9669 ESTAT_ADD(rx_out_length_errors
);
9670 ESTAT_ADD(rx_64_or_less_octet_packets
);
9671 ESTAT_ADD(rx_65_to_127_octet_packets
);
9672 ESTAT_ADD(rx_128_to_255_octet_packets
);
9673 ESTAT_ADD(rx_256_to_511_octet_packets
);
9674 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9675 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9676 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9677 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9678 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9679 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9681 ESTAT_ADD(tx_octets
);
9682 ESTAT_ADD(tx_collisions
);
9683 ESTAT_ADD(tx_xon_sent
);
9684 ESTAT_ADD(tx_xoff_sent
);
9685 ESTAT_ADD(tx_flow_control
);
9686 ESTAT_ADD(tx_mac_errors
);
9687 ESTAT_ADD(tx_single_collisions
);
9688 ESTAT_ADD(tx_mult_collisions
);
9689 ESTAT_ADD(tx_deferred
);
9690 ESTAT_ADD(tx_excessive_collisions
);
9691 ESTAT_ADD(tx_late_collisions
);
9692 ESTAT_ADD(tx_collide_2times
);
9693 ESTAT_ADD(tx_collide_3times
);
9694 ESTAT_ADD(tx_collide_4times
);
9695 ESTAT_ADD(tx_collide_5times
);
9696 ESTAT_ADD(tx_collide_6times
);
9697 ESTAT_ADD(tx_collide_7times
);
9698 ESTAT_ADD(tx_collide_8times
);
9699 ESTAT_ADD(tx_collide_9times
);
9700 ESTAT_ADD(tx_collide_10times
);
9701 ESTAT_ADD(tx_collide_11times
);
9702 ESTAT_ADD(tx_collide_12times
);
9703 ESTAT_ADD(tx_collide_13times
);
9704 ESTAT_ADD(tx_collide_14times
);
9705 ESTAT_ADD(tx_collide_15times
);
9706 ESTAT_ADD(tx_ucast_packets
);
9707 ESTAT_ADD(tx_mcast_packets
);
9708 ESTAT_ADD(tx_bcast_packets
);
9709 ESTAT_ADD(tx_carrier_sense_errors
);
9710 ESTAT_ADD(tx_discards
);
9711 ESTAT_ADD(tx_errors
);
9713 ESTAT_ADD(dma_writeq_full
);
9714 ESTAT_ADD(dma_write_prioq_full
);
9715 ESTAT_ADD(rxbds_empty
);
9716 ESTAT_ADD(rx_discards
);
9717 ESTAT_ADD(rx_errors
);
9718 ESTAT_ADD(rx_threshold_hit
);
9720 ESTAT_ADD(dma_readq_full
);
9721 ESTAT_ADD(dma_read_prioq_full
);
9722 ESTAT_ADD(tx_comp_queue_full
);
9724 ESTAT_ADD(ring_set_send_prod_index
);
9725 ESTAT_ADD(ring_status_update
);
9726 ESTAT_ADD(nic_irqs
);
9727 ESTAT_ADD(nic_avoided_irqs
);
9728 ESTAT_ADD(nic_tx_threshold_hit
);
9733 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9734 struct rtnl_link_stats64
*stats
)
9736 struct tg3
*tp
= netdev_priv(dev
);
9737 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9738 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9743 stats
->rx_packets
= old_stats
->rx_packets
+
9744 get_stat64(&hw_stats
->rx_ucast_packets
) +
9745 get_stat64(&hw_stats
->rx_mcast_packets
) +
9746 get_stat64(&hw_stats
->rx_bcast_packets
);
9748 stats
->tx_packets
= old_stats
->tx_packets
+
9749 get_stat64(&hw_stats
->tx_ucast_packets
) +
9750 get_stat64(&hw_stats
->tx_mcast_packets
) +
9751 get_stat64(&hw_stats
->tx_bcast_packets
);
9753 stats
->rx_bytes
= old_stats
->rx_bytes
+
9754 get_stat64(&hw_stats
->rx_octets
);
9755 stats
->tx_bytes
= old_stats
->tx_bytes
+
9756 get_stat64(&hw_stats
->tx_octets
);
9758 stats
->rx_errors
= old_stats
->rx_errors
+
9759 get_stat64(&hw_stats
->rx_errors
);
9760 stats
->tx_errors
= old_stats
->tx_errors
+
9761 get_stat64(&hw_stats
->tx_errors
) +
9762 get_stat64(&hw_stats
->tx_mac_errors
) +
9763 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9764 get_stat64(&hw_stats
->tx_discards
);
9766 stats
->multicast
= old_stats
->multicast
+
9767 get_stat64(&hw_stats
->rx_mcast_packets
);
9768 stats
->collisions
= old_stats
->collisions
+
9769 get_stat64(&hw_stats
->tx_collisions
);
9771 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9772 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9773 get_stat64(&hw_stats
->rx_undersize_packets
);
9775 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9776 get_stat64(&hw_stats
->rxbds_empty
);
9777 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9778 get_stat64(&hw_stats
->rx_align_errors
);
9779 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9780 get_stat64(&hw_stats
->tx_discards
);
9781 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9782 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9784 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9785 calc_crc_errors(tp
);
9787 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9788 get_stat64(&hw_stats
->rx_discards
);
9790 stats
->rx_dropped
= tp
->rx_dropped
;
9795 static inline u32
calc_crc(unsigned char *buf
, int len
)
9803 for (j
= 0; j
< len
; j
++) {
9806 for (k
= 0; k
< 8; k
++) {
9819 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9821 /* accept or reject all multicast frames */
9822 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9823 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9824 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9825 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9828 static void __tg3_set_rx_mode(struct net_device
*dev
)
9830 struct tg3
*tp
= netdev_priv(dev
);
9833 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9834 RX_MODE_KEEP_VLAN_TAG
);
9836 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9837 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9840 if (!tg3_flag(tp
, ENABLE_ASF
))
9841 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9844 if (dev
->flags
& IFF_PROMISC
) {
9845 /* Promiscuous mode. */
9846 rx_mode
|= RX_MODE_PROMISC
;
9847 } else if (dev
->flags
& IFF_ALLMULTI
) {
9848 /* Accept all multicast. */
9849 tg3_set_multi(tp
, 1);
9850 } else if (netdev_mc_empty(dev
)) {
9851 /* Reject all multicast. */
9852 tg3_set_multi(tp
, 0);
9854 /* Accept one or more multicast(s). */
9855 struct netdev_hw_addr
*ha
;
9856 u32 mc_filter
[4] = { 0, };
9861 netdev_for_each_mc_addr(ha
, dev
) {
9862 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9864 regidx
= (bit
& 0x60) >> 5;
9866 mc_filter
[regidx
] |= (1 << bit
);
9869 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9870 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9871 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9872 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9875 if (rx_mode
!= tp
->rx_mode
) {
9876 tp
->rx_mode
= rx_mode
;
9877 tw32_f(MAC_RX_MODE
, rx_mode
);
9882 static void tg3_set_rx_mode(struct net_device
*dev
)
9884 struct tg3
*tp
= netdev_priv(dev
);
9886 if (!netif_running(dev
))
9889 tg3_full_lock(tp
, 0);
9890 __tg3_set_rx_mode(dev
);
9891 tg3_full_unlock(tp
);
9894 static int tg3_get_regs_len(struct net_device
*dev
)
9896 return TG3_REG_BLK_SIZE
;
9899 static void tg3_get_regs(struct net_device
*dev
,
9900 struct ethtool_regs
*regs
, void *_p
)
9902 struct tg3
*tp
= netdev_priv(dev
);
9906 memset(_p
, 0, TG3_REG_BLK_SIZE
);
9908 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9911 tg3_full_lock(tp
, 0);
9913 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
9915 tg3_full_unlock(tp
);
9918 static int tg3_get_eeprom_len(struct net_device
*dev
)
9920 struct tg3
*tp
= netdev_priv(dev
);
9922 return tp
->nvram_size
;
9925 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9927 struct tg3
*tp
= netdev_priv(dev
);
9930 u32 i
, offset
, len
, b_offset
, b_count
;
9933 if (tg3_flag(tp
, NO_NVRAM
))
9936 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9939 offset
= eeprom
->offset
;
9943 eeprom
->magic
= TG3_EEPROM_MAGIC
;
9946 /* adjustments to start on required 4 byte boundary */
9947 b_offset
= offset
& 3;
9948 b_count
= 4 - b_offset
;
9949 if (b_count
> len
) {
9950 /* i.e. offset=1 len=2 */
9953 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
9956 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
9959 eeprom
->len
+= b_count
;
9962 /* read bytes up to the last 4 byte boundary */
9963 pd
= &data
[eeprom
->len
];
9964 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
9965 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
9970 memcpy(pd
+ i
, &val
, 4);
9975 /* read last bytes not ending on 4 byte boundary */
9976 pd
= &data
[eeprom
->len
];
9978 b_offset
= offset
+ len
- b_count
;
9979 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
9982 memcpy(pd
, &val
, b_count
);
9983 eeprom
->len
+= b_count
;
9988 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
9990 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9992 struct tg3
*tp
= netdev_priv(dev
);
9994 u32 offset
, len
, b_offset
, odd_len
;
9998 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10001 if (tg3_flag(tp
, NO_NVRAM
) ||
10002 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10005 offset
= eeprom
->offset
;
10008 if ((b_offset
= (offset
& 3))) {
10009 /* adjustments to start on required 4 byte boundary */
10010 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10021 /* adjustments to end on required 4 byte boundary */
10023 len
= (len
+ 3) & ~3;
10024 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10030 if (b_offset
|| odd_len
) {
10031 buf
= kmalloc(len
, GFP_KERNEL
);
10035 memcpy(buf
, &start
, 4);
10037 memcpy(buf
+len
-4, &end
, 4);
10038 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10041 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10049 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10051 struct tg3
*tp
= netdev_priv(dev
);
10053 if (tg3_flag(tp
, USE_PHYLIB
)) {
10054 struct phy_device
*phydev
;
10055 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10057 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10058 return phy_ethtool_gset(phydev
, cmd
);
10061 cmd
->supported
= (SUPPORTED_Autoneg
);
10063 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10064 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10065 SUPPORTED_1000baseT_Full
);
10067 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10068 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10069 SUPPORTED_100baseT_Full
|
10070 SUPPORTED_10baseT_Half
|
10071 SUPPORTED_10baseT_Full
|
10073 cmd
->port
= PORT_TP
;
10075 cmd
->supported
|= SUPPORTED_FIBRE
;
10076 cmd
->port
= PORT_FIBRE
;
10079 cmd
->advertising
= tp
->link_config
.advertising
;
10080 if (netif_running(dev
)) {
10081 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10082 cmd
->duplex
= tp
->link_config
.active_duplex
;
10084 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10085 cmd
->duplex
= DUPLEX_INVALID
;
10087 cmd
->phy_address
= tp
->phy_addr
;
10088 cmd
->transceiver
= XCVR_INTERNAL
;
10089 cmd
->autoneg
= tp
->link_config
.autoneg
;
10095 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10097 struct tg3
*tp
= netdev_priv(dev
);
10098 u32 speed
= ethtool_cmd_speed(cmd
);
10100 if (tg3_flag(tp
, USE_PHYLIB
)) {
10101 struct phy_device
*phydev
;
10102 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10104 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10105 return phy_ethtool_sset(phydev
, cmd
);
10108 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10109 cmd
->autoneg
!= AUTONEG_DISABLE
)
10112 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10113 cmd
->duplex
!= DUPLEX_FULL
&&
10114 cmd
->duplex
!= DUPLEX_HALF
)
10117 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10118 u32 mask
= ADVERTISED_Autoneg
|
10120 ADVERTISED_Asym_Pause
;
10122 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10123 mask
|= ADVERTISED_1000baseT_Half
|
10124 ADVERTISED_1000baseT_Full
;
10126 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10127 mask
|= ADVERTISED_100baseT_Half
|
10128 ADVERTISED_100baseT_Full
|
10129 ADVERTISED_10baseT_Half
|
10130 ADVERTISED_10baseT_Full
|
10133 mask
|= ADVERTISED_FIBRE
;
10135 if (cmd
->advertising
& ~mask
)
10138 mask
&= (ADVERTISED_1000baseT_Half
|
10139 ADVERTISED_1000baseT_Full
|
10140 ADVERTISED_100baseT_Half
|
10141 ADVERTISED_100baseT_Full
|
10142 ADVERTISED_10baseT_Half
|
10143 ADVERTISED_10baseT_Full
);
10145 cmd
->advertising
&= mask
;
10147 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10148 if (speed
!= SPEED_1000
)
10151 if (cmd
->duplex
!= DUPLEX_FULL
)
10154 if (speed
!= SPEED_100
&&
10160 tg3_full_lock(tp
, 0);
10162 tp
->link_config
.autoneg
= cmd
->autoneg
;
10163 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10164 tp
->link_config
.advertising
= (cmd
->advertising
|
10165 ADVERTISED_Autoneg
);
10166 tp
->link_config
.speed
= SPEED_INVALID
;
10167 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10169 tp
->link_config
.advertising
= 0;
10170 tp
->link_config
.speed
= speed
;
10171 tp
->link_config
.duplex
= cmd
->duplex
;
10174 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10175 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10176 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10178 if (netif_running(dev
))
10179 tg3_setup_phy(tp
, 1);
10181 tg3_full_unlock(tp
);
10186 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10188 struct tg3
*tp
= netdev_priv(dev
);
10190 strcpy(info
->driver
, DRV_MODULE_NAME
);
10191 strcpy(info
->version
, DRV_MODULE_VERSION
);
10192 strcpy(info
->fw_version
, tp
->fw_ver
);
10193 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10196 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10198 struct tg3
*tp
= netdev_priv(dev
);
10200 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10201 wol
->supported
= WAKE_MAGIC
;
10203 wol
->supported
= 0;
10205 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10206 wol
->wolopts
= WAKE_MAGIC
;
10207 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10210 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10212 struct tg3
*tp
= netdev_priv(dev
);
10213 struct device
*dp
= &tp
->pdev
->dev
;
10215 if (wol
->wolopts
& ~WAKE_MAGIC
)
10217 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10218 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10221 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10223 spin_lock_bh(&tp
->lock
);
10224 if (device_may_wakeup(dp
))
10225 tg3_flag_set(tp
, WOL_ENABLE
);
10227 tg3_flag_clear(tp
, WOL_ENABLE
);
10228 spin_unlock_bh(&tp
->lock
);
10233 static u32
tg3_get_msglevel(struct net_device
*dev
)
10235 struct tg3
*tp
= netdev_priv(dev
);
10236 return tp
->msg_enable
;
10239 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10241 struct tg3
*tp
= netdev_priv(dev
);
10242 tp
->msg_enable
= value
;
10245 static int tg3_nway_reset(struct net_device
*dev
)
10247 struct tg3
*tp
= netdev_priv(dev
);
10250 if (!netif_running(dev
))
10253 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10256 if (tg3_flag(tp
, USE_PHYLIB
)) {
10257 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10259 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10263 spin_lock_bh(&tp
->lock
);
10265 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10266 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10267 ((bmcr
& BMCR_ANENABLE
) ||
10268 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10269 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10273 spin_unlock_bh(&tp
->lock
);
10279 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10281 struct tg3
*tp
= netdev_priv(dev
);
10283 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10284 ering
->rx_mini_max_pending
= 0;
10285 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10286 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10288 ering
->rx_jumbo_max_pending
= 0;
10290 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10292 ering
->rx_pending
= tp
->rx_pending
;
10293 ering
->rx_mini_pending
= 0;
10294 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10295 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10297 ering
->rx_jumbo_pending
= 0;
10299 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10302 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10304 struct tg3
*tp
= netdev_priv(dev
);
10305 int i
, irq_sync
= 0, err
= 0;
10307 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10308 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10309 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10310 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10311 (tg3_flag(tp
, TSO_BUG
) &&
10312 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10315 if (netif_running(dev
)) {
10317 tg3_netif_stop(tp
);
10321 tg3_full_lock(tp
, irq_sync
);
10323 tp
->rx_pending
= ering
->rx_pending
;
10325 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10326 tp
->rx_pending
> 63)
10327 tp
->rx_pending
= 63;
10328 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10330 for (i
= 0; i
< tp
->irq_max
; i
++)
10331 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10333 if (netif_running(dev
)) {
10334 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10335 err
= tg3_restart_hw(tp
, 1);
10337 tg3_netif_start(tp
);
10340 tg3_full_unlock(tp
);
10342 if (irq_sync
&& !err
)
10348 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10350 struct tg3
*tp
= netdev_priv(dev
);
10352 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10354 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10355 epause
->rx_pause
= 1;
10357 epause
->rx_pause
= 0;
10359 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10360 epause
->tx_pause
= 1;
10362 epause
->tx_pause
= 0;
10365 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10367 struct tg3
*tp
= netdev_priv(dev
);
10370 if (tg3_flag(tp
, USE_PHYLIB
)) {
10372 struct phy_device
*phydev
;
10374 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10376 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10377 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10378 (epause
->rx_pause
!= epause
->tx_pause
)))
10381 tp
->link_config
.flowctrl
= 0;
10382 if (epause
->rx_pause
) {
10383 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10385 if (epause
->tx_pause
) {
10386 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10387 newadv
= ADVERTISED_Pause
;
10389 newadv
= ADVERTISED_Pause
|
10390 ADVERTISED_Asym_Pause
;
10391 } else if (epause
->tx_pause
) {
10392 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10393 newadv
= ADVERTISED_Asym_Pause
;
10397 if (epause
->autoneg
)
10398 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10400 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10402 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10403 u32 oldadv
= phydev
->advertising
&
10404 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10405 if (oldadv
!= newadv
) {
10406 phydev
->advertising
&=
10407 ~(ADVERTISED_Pause
|
10408 ADVERTISED_Asym_Pause
);
10409 phydev
->advertising
|= newadv
;
10410 if (phydev
->autoneg
) {
10412 * Always renegotiate the link to
10413 * inform our link partner of our
10414 * flow control settings, even if the
10415 * flow control is forced. Let
10416 * tg3_adjust_link() do the final
10417 * flow control setup.
10419 return phy_start_aneg(phydev
);
10423 if (!epause
->autoneg
)
10424 tg3_setup_flow_control(tp
, 0, 0);
10426 tp
->link_config
.orig_advertising
&=
10427 ~(ADVERTISED_Pause
|
10428 ADVERTISED_Asym_Pause
);
10429 tp
->link_config
.orig_advertising
|= newadv
;
10434 if (netif_running(dev
)) {
10435 tg3_netif_stop(tp
);
10439 tg3_full_lock(tp
, irq_sync
);
10441 if (epause
->autoneg
)
10442 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10444 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10445 if (epause
->rx_pause
)
10446 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10448 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10449 if (epause
->tx_pause
)
10450 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10452 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10454 if (netif_running(dev
)) {
10455 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10456 err
= tg3_restart_hw(tp
, 1);
10458 tg3_netif_start(tp
);
10461 tg3_full_unlock(tp
);
10467 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10471 return TG3_NUM_TEST
;
10473 return TG3_NUM_STATS
;
10475 return -EOPNOTSUPP
;
10479 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10481 switch (stringset
) {
10483 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10486 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10489 WARN_ON(1); /* we need a WARN() */
10494 static int tg3_set_phys_id(struct net_device
*dev
,
10495 enum ethtool_phys_id_state state
)
10497 struct tg3
*tp
= netdev_priv(dev
);
10499 if (!netif_running(tp
->dev
))
10503 case ETHTOOL_ID_ACTIVE
:
10504 return 1; /* cycle on/off once per second */
10506 case ETHTOOL_ID_ON
:
10507 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10508 LED_CTRL_1000MBPS_ON
|
10509 LED_CTRL_100MBPS_ON
|
10510 LED_CTRL_10MBPS_ON
|
10511 LED_CTRL_TRAFFIC_OVERRIDE
|
10512 LED_CTRL_TRAFFIC_BLINK
|
10513 LED_CTRL_TRAFFIC_LED
);
10516 case ETHTOOL_ID_OFF
:
10517 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10518 LED_CTRL_TRAFFIC_OVERRIDE
);
10521 case ETHTOOL_ID_INACTIVE
:
10522 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10529 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10530 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10532 struct tg3
*tp
= netdev_priv(dev
);
10533 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10536 static __be32
* tg3_vpd_readblock(struct tg3
*tp
)
10540 u32 offset
= 0, len
= 0;
10543 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10546 if (magic
== TG3_EEPROM_MAGIC
) {
10547 for (offset
= TG3_NVM_DIR_START
;
10548 offset
< TG3_NVM_DIR_END
;
10549 offset
+= TG3_NVM_DIRENT_SIZE
) {
10550 if (tg3_nvram_read(tp
, offset
, &val
))
10553 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10554 TG3_NVM_DIRTYPE_EXTVPD
)
10558 if (offset
!= TG3_NVM_DIR_END
) {
10559 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10560 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10563 offset
= tg3_nvram_logical_addr(tp
, offset
);
10567 if (!offset
|| !len
) {
10568 offset
= TG3_NVM_VPD_OFF
;
10569 len
= TG3_NVM_VPD_LEN
;
10572 buf
= kmalloc(len
, GFP_KERNEL
);
10576 if (magic
== TG3_EEPROM_MAGIC
) {
10577 for (i
= 0; i
< len
; i
+= 4) {
10578 /* The data is in little-endian format in NVRAM.
10579 * Use the big-endian read routines to preserve
10580 * the byte order as it exists in NVRAM.
10582 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10588 unsigned int pos
= 0;
10590 ptr
= (u8
*)&buf
[0];
10591 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10592 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10594 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10610 #define NVRAM_TEST_SIZE 0x100
10611 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10612 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10613 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10614 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10615 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10617 static int tg3_test_nvram(struct tg3
*tp
)
10621 int i
, j
, k
, err
= 0, size
;
10623 if (tg3_flag(tp
, NO_NVRAM
))
10626 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10629 if (magic
== TG3_EEPROM_MAGIC
)
10630 size
= NVRAM_TEST_SIZE
;
10631 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10632 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10633 TG3_EEPROM_SB_FORMAT_1
) {
10634 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10635 case TG3_EEPROM_SB_REVISION_0
:
10636 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10638 case TG3_EEPROM_SB_REVISION_2
:
10639 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10641 case TG3_EEPROM_SB_REVISION_3
:
10642 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10649 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10650 size
= NVRAM_SELFBOOT_HW_SIZE
;
10654 buf
= kmalloc(size
, GFP_KERNEL
);
10659 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10660 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10667 /* Selfboot format */
10668 magic
= be32_to_cpu(buf
[0]);
10669 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10670 TG3_EEPROM_MAGIC_FW
) {
10671 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10673 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10674 TG3_EEPROM_SB_REVISION_2
) {
10675 /* For rev 2, the csum doesn't include the MBA. */
10676 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10678 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10681 for (i
= 0; i
< size
; i
++)
10694 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10695 TG3_EEPROM_MAGIC_HW
) {
10696 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10697 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10698 u8
*buf8
= (u8
*) buf
;
10700 /* Separate the parity bits and the data bytes. */
10701 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10702 if ((i
== 0) || (i
== 8)) {
10706 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10707 parity
[k
++] = buf8
[i
] & msk
;
10709 } else if (i
== 16) {
10713 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10714 parity
[k
++] = buf8
[i
] & msk
;
10717 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10718 parity
[k
++] = buf8
[i
] & msk
;
10721 data
[j
++] = buf8
[i
];
10725 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10726 u8 hw8
= hweight8(data
[i
]);
10728 if ((hw8
& 0x1) && parity
[i
])
10730 else if (!(hw8
& 0x1) && !parity
[i
])
10739 /* Bootstrap checksum at offset 0x10 */
10740 csum
= calc_crc((unsigned char *) buf
, 0x10);
10741 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10744 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10745 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10746 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10751 buf
= tg3_vpd_readblock(tp
);
10755 i
= pci_vpd_find_tag((u8
*)buf
, 0, TG3_NVM_VPD_LEN
,
10756 PCI_VPD_LRDT_RO_DATA
);
10758 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10762 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> TG3_NVM_VPD_LEN
)
10765 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10766 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10767 PCI_VPD_RO_KEYWORD_CHKSUM
);
10771 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10773 for (i
= 0; i
<= j
; i
++)
10774 csum8
+= ((u8
*)buf
)[i
];
10788 #define TG3_SERDES_TIMEOUT_SEC 2
10789 #define TG3_COPPER_TIMEOUT_SEC 6
10791 static int tg3_test_link(struct tg3
*tp
)
10795 if (!netif_running(tp
->dev
))
10798 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10799 max
= TG3_SERDES_TIMEOUT_SEC
;
10801 max
= TG3_COPPER_TIMEOUT_SEC
;
10803 for (i
= 0; i
< max
; i
++) {
10804 if (netif_carrier_ok(tp
->dev
))
10807 if (msleep_interruptible(1000))
10814 /* Only test the commonly used registers */
10815 static int tg3_test_registers(struct tg3
*tp
)
10817 int i
, is_5705
, is_5750
;
10818 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10822 #define TG3_FL_5705 0x1
10823 #define TG3_FL_NOT_5705 0x2
10824 #define TG3_FL_NOT_5788 0x4
10825 #define TG3_FL_NOT_5750 0x8
10829 /* MAC Control Registers */
10830 { MAC_MODE
, TG3_FL_NOT_5705
,
10831 0x00000000, 0x00ef6f8c },
10832 { MAC_MODE
, TG3_FL_5705
,
10833 0x00000000, 0x01ef6b8c },
10834 { MAC_STATUS
, TG3_FL_NOT_5705
,
10835 0x03800107, 0x00000000 },
10836 { MAC_STATUS
, TG3_FL_5705
,
10837 0x03800100, 0x00000000 },
10838 { MAC_ADDR_0_HIGH
, 0x0000,
10839 0x00000000, 0x0000ffff },
10840 { MAC_ADDR_0_LOW
, 0x0000,
10841 0x00000000, 0xffffffff },
10842 { MAC_RX_MTU_SIZE
, 0x0000,
10843 0x00000000, 0x0000ffff },
10844 { MAC_TX_MODE
, 0x0000,
10845 0x00000000, 0x00000070 },
10846 { MAC_TX_LENGTHS
, 0x0000,
10847 0x00000000, 0x00003fff },
10848 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10849 0x00000000, 0x000007fc },
10850 { MAC_RX_MODE
, TG3_FL_5705
,
10851 0x00000000, 0x000007dc },
10852 { MAC_HASH_REG_0
, 0x0000,
10853 0x00000000, 0xffffffff },
10854 { MAC_HASH_REG_1
, 0x0000,
10855 0x00000000, 0xffffffff },
10856 { MAC_HASH_REG_2
, 0x0000,
10857 0x00000000, 0xffffffff },
10858 { MAC_HASH_REG_3
, 0x0000,
10859 0x00000000, 0xffffffff },
10861 /* Receive Data and Receive BD Initiator Control Registers. */
10862 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10863 0x00000000, 0xffffffff },
10864 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10865 0x00000000, 0xffffffff },
10866 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10867 0x00000000, 0x00000003 },
10868 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10869 0x00000000, 0xffffffff },
10870 { RCVDBDI_STD_BD
+0, 0x0000,
10871 0x00000000, 0xffffffff },
10872 { RCVDBDI_STD_BD
+4, 0x0000,
10873 0x00000000, 0xffffffff },
10874 { RCVDBDI_STD_BD
+8, 0x0000,
10875 0x00000000, 0xffff0002 },
10876 { RCVDBDI_STD_BD
+0xc, 0x0000,
10877 0x00000000, 0xffffffff },
10879 /* Receive BD Initiator Control Registers. */
10880 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10881 0x00000000, 0xffffffff },
10882 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10883 0x00000000, 0x000003ff },
10884 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10885 0x00000000, 0xffffffff },
10887 /* Host Coalescing Control Registers. */
10888 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10889 0x00000000, 0x00000004 },
10890 { HOSTCC_MODE
, TG3_FL_5705
,
10891 0x00000000, 0x000000f6 },
10892 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10893 0x00000000, 0xffffffff },
10894 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10895 0x00000000, 0x000003ff },
10896 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10897 0x00000000, 0xffffffff },
10898 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10899 0x00000000, 0x000003ff },
10900 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
10901 0x00000000, 0xffffffff },
10902 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10903 0x00000000, 0x000000ff },
10904 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
10905 0x00000000, 0xffffffff },
10906 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10907 0x00000000, 0x000000ff },
10908 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10909 0x00000000, 0xffffffff },
10910 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10911 0x00000000, 0xffffffff },
10912 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10913 0x00000000, 0xffffffff },
10914 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10915 0x00000000, 0x000000ff },
10916 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10917 0x00000000, 0xffffffff },
10918 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10919 0x00000000, 0x000000ff },
10920 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
10921 0x00000000, 0xffffffff },
10922 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
10923 0x00000000, 0xffffffff },
10924 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
10925 0x00000000, 0xffffffff },
10926 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
10927 0x00000000, 0xffffffff },
10928 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
10929 0x00000000, 0xffffffff },
10930 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
10931 0xffffffff, 0x00000000 },
10932 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
10933 0xffffffff, 0x00000000 },
10935 /* Buffer Manager Control Registers. */
10936 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
10937 0x00000000, 0x007fff80 },
10938 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
10939 0x00000000, 0x007fffff },
10940 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
10941 0x00000000, 0x0000003f },
10942 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
10943 0x00000000, 0x000001ff },
10944 { BUFMGR_MB_HIGH_WATER
, 0x0000,
10945 0x00000000, 0x000001ff },
10946 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
10947 0xffffffff, 0x00000000 },
10948 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
10949 0xffffffff, 0x00000000 },
10951 /* Mailbox Registers */
10952 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
10953 0x00000000, 0x000001ff },
10954 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
10955 0x00000000, 0x000001ff },
10956 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
10957 0x00000000, 0x000007ff },
10958 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
10959 0x00000000, 0x000001ff },
10961 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10964 is_5705
= is_5750
= 0;
10965 if (tg3_flag(tp
, 5705_PLUS
)) {
10967 if (tg3_flag(tp
, 5750_PLUS
))
10971 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
10972 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
10975 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
10978 if (tg3_flag(tp
, IS_5788
) &&
10979 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
10982 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
10985 offset
= (u32
) reg_tbl
[i
].offset
;
10986 read_mask
= reg_tbl
[i
].read_mask
;
10987 write_mask
= reg_tbl
[i
].write_mask
;
10989 /* Save the original register content */
10990 save_val
= tr32(offset
);
10992 /* Determine the read-only value. */
10993 read_val
= save_val
& read_mask
;
10995 /* Write zero to the register, then make sure the read-only bits
10996 * are not changed and the read/write bits are all zeros.
11000 val
= tr32(offset
);
11002 /* Test the read-only and read/write bits. */
11003 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11006 /* Write ones to all the bits defined by RdMask and WrMask, then
11007 * make sure the read-only bits are not changed and the
11008 * read/write bits are all ones.
11010 tw32(offset
, read_mask
| write_mask
);
11012 val
= tr32(offset
);
11014 /* Test the read-only bits. */
11015 if ((val
& read_mask
) != read_val
)
11018 /* Test the read/write bits. */
11019 if ((val
& write_mask
) != write_mask
)
11022 tw32(offset
, save_val
);
11028 if (netif_msg_hw(tp
))
11029 netdev_err(tp
->dev
,
11030 "Register test failed at offset %x\n", offset
);
11031 tw32(offset
, save_val
);
11035 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11037 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11041 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11042 for (j
= 0; j
< len
; j
+= 4) {
11045 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11046 tg3_read_mem(tp
, offset
+ j
, &val
);
11047 if (val
!= test_pattern
[i
])
11054 static int tg3_test_memory(struct tg3
*tp
)
11056 static struct mem_entry
{
11059 } mem_tbl_570x
[] = {
11060 { 0x00000000, 0x00b50},
11061 { 0x00002000, 0x1c000},
11062 { 0xffffffff, 0x00000}
11063 }, mem_tbl_5705
[] = {
11064 { 0x00000100, 0x0000c},
11065 { 0x00000200, 0x00008},
11066 { 0x00004000, 0x00800},
11067 { 0x00006000, 0x01000},
11068 { 0x00008000, 0x02000},
11069 { 0x00010000, 0x0e000},
11070 { 0xffffffff, 0x00000}
11071 }, mem_tbl_5755
[] = {
11072 { 0x00000200, 0x00008},
11073 { 0x00004000, 0x00800},
11074 { 0x00006000, 0x00800},
11075 { 0x00008000, 0x02000},
11076 { 0x00010000, 0x0c000},
11077 { 0xffffffff, 0x00000}
11078 }, mem_tbl_5906
[] = {
11079 { 0x00000200, 0x00008},
11080 { 0x00004000, 0x00400},
11081 { 0x00006000, 0x00400},
11082 { 0x00008000, 0x01000},
11083 { 0x00010000, 0x01000},
11084 { 0xffffffff, 0x00000}
11085 }, mem_tbl_5717
[] = {
11086 { 0x00000200, 0x00008},
11087 { 0x00010000, 0x0a000},
11088 { 0x00020000, 0x13c00},
11089 { 0xffffffff, 0x00000}
11090 }, mem_tbl_57765
[] = {
11091 { 0x00000200, 0x00008},
11092 { 0x00004000, 0x00800},
11093 { 0x00006000, 0x09800},
11094 { 0x00010000, 0x0a000},
11095 { 0xffffffff, 0x00000}
11097 struct mem_entry
*mem_tbl
;
11101 if (tg3_flag(tp
, 5717_PLUS
))
11102 mem_tbl
= mem_tbl_5717
;
11103 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11104 mem_tbl
= mem_tbl_57765
;
11105 else if (tg3_flag(tp
, 5755_PLUS
))
11106 mem_tbl
= mem_tbl_5755
;
11107 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11108 mem_tbl
= mem_tbl_5906
;
11109 else if (tg3_flag(tp
, 5705_PLUS
))
11110 mem_tbl
= mem_tbl_5705
;
11112 mem_tbl
= mem_tbl_570x
;
11114 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11115 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11123 #define TG3_MAC_LOOPBACK 0
11124 #define TG3_PHY_LOOPBACK 1
11125 #define TG3_TSO_LOOPBACK 2
11127 #define TG3_TSO_MSS 500
11129 #define TG3_TSO_IP_HDR_LEN 20
11130 #define TG3_TSO_TCP_HDR_LEN 20
11131 #define TG3_TSO_TCP_OPT_LEN 12
11133 static const u8 tg3_tso_header
[] = {
11135 0x45, 0x00, 0x00, 0x00,
11136 0x00, 0x00, 0x40, 0x00,
11137 0x40, 0x06, 0x00, 0x00,
11138 0x0a, 0x00, 0x00, 0x01,
11139 0x0a, 0x00, 0x00, 0x02,
11140 0x0d, 0x00, 0xe0, 0x00,
11141 0x00, 0x00, 0x01, 0x00,
11142 0x00, 0x00, 0x02, 0x00,
11143 0x80, 0x10, 0x10, 0x00,
11144 0x14, 0x09, 0x00, 0x00,
11145 0x01, 0x01, 0x08, 0x0a,
11146 0x11, 0x11, 0x11, 0x11,
11147 0x11, 0x11, 0x11, 0x11,
11150 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, int loopback_mode
)
11152 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11153 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11154 struct sk_buff
*skb
, *rx_skb
;
11157 int num_pkts
, tx_len
, rx_len
, i
, err
;
11158 struct tg3_rx_buffer_desc
*desc
;
11159 struct tg3_napi
*tnapi
, *rnapi
;
11160 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11162 tnapi
= &tp
->napi
[0];
11163 rnapi
= &tp
->napi
[0];
11164 if (tp
->irq_cnt
> 1) {
11165 if (tg3_flag(tp
, ENABLE_RSS
))
11166 rnapi
= &tp
->napi
[1];
11167 if (tg3_flag(tp
, ENABLE_TSS
))
11168 tnapi
= &tp
->napi
[1];
11170 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11172 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
11173 /* HW errata - mac loopback fails in some cases on 5780.
11174 * Normal traffic and PHY loopback are not affected by
11175 * errata. Also, the MAC loopback test is deprecated for
11176 * all newer ASIC revisions.
11178 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11179 tg3_flag(tp
, CPMU_PRESENT
))
11182 mac_mode
= tp
->mac_mode
&
11183 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11184 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
11185 if (!tg3_flag(tp
, 5705_PLUS
))
11186 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11187 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
11188 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11190 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11191 tw32(MAC_MODE
, mac_mode
);
11193 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11194 tg3_phy_fet_toggle_apd(tp
, false);
11195 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
11197 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
11199 tg3_phy_toggle_automdix(tp
, 0);
11201 tg3_writephy(tp
, MII_BMCR
, val
);
11204 mac_mode
= tp
->mac_mode
&
11205 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11206 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11207 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
11208 MII_TG3_FET_PTEST_FRC_TX_LINK
|
11209 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
11210 /* The write needs to be flushed for the AC131 */
11211 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11212 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
11213 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11215 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11217 /* reset to prevent losing 1st rx packet intermittently */
11218 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
11219 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
11221 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
11223 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
11224 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
11225 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
11226 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
11227 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
11228 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11229 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
11230 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
11232 tw32(MAC_MODE
, mac_mode
);
11234 /* Wait for link */
11235 for (i
= 0; i
< 100; i
++) {
11236 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11245 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11249 tx_data
= skb_put(skb
, tx_len
);
11250 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11251 memset(tx_data
+ 6, 0x0, 8);
11253 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11255 if (loopback_mode
== TG3_TSO_LOOPBACK
) {
11256 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11258 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11259 TG3_TSO_TCP_OPT_LEN
;
11261 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11262 sizeof(tg3_tso_header
));
11265 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11266 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11268 /* Set the total length field in the IP header */
11269 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11271 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11272 TXD_FLAG_CPU_POST_DMA
);
11274 if (tg3_flag(tp
, HW_TSO_1
) ||
11275 tg3_flag(tp
, HW_TSO_2
) ||
11276 tg3_flag(tp
, HW_TSO_3
)) {
11278 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11279 th
= (struct tcphdr
*)&tx_data
[val
];
11282 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11284 if (tg3_flag(tp
, HW_TSO_3
)) {
11285 mss
|= (hdr_len
& 0xc) << 12;
11286 if (hdr_len
& 0x10)
11287 base_flags
|= 0x00000010;
11288 base_flags
|= (hdr_len
& 0x3e0) << 5;
11289 } else if (tg3_flag(tp
, HW_TSO_2
))
11290 mss
|= hdr_len
<< 9;
11291 else if (tg3_flag(tp
, HW_TSO_1
) ||
11292 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11293 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11295 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11298 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11301 data_off
= ETH_HLEN
;
11304 for (i
= data_off
; i
< tx_len
; i
++)
11305 tx_data
[i
] = (u8
) (i
& 0xff);
11307 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11308 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11309 dev_kfree_skb(skb
);
11313 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11318 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11320 tg3_set_txd(tnapi
, tnapi
->tx_prod
, map
, tx_len
,
11321 base_flags
, (mss
<< 1) | 1);
11325 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11326 tr32_mailbox(tnapi
->prodmbox
);
11330 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11331 for (i
= 0; i
< 35; i
++) {
11332 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11337 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11338 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11339 if ((tx_idx
== tnapi
->tx_prod
) &&
11340 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11344 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
11345 dev_kfree_skb(skb
);
11347 if (tx_idx
!= tnapi
->tx_prod
)
11350 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11354 while (rx_idx
!= rx_start_idx
) {
11355 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11356 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11357 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11359 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11360 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11363 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11366 if (loopback_mode
!= TG3_TSO_LOOPBACK
) {
11367 if (rx_len
!= tx_len
)
11370 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11371 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11374 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11377 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11378 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11379 >> RXD_TCPCSUM_SHIFT
== 0xffff) {
11383 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11384 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11385 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11387 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11388 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11389 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11394 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11395 PCI_DMA_FROMDEVICE
);
11397 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11398 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11405 /* tg3_free_rings will unmap and free the rx_skb */
11410 #define TG3_STD_LOOPBACK_FAILED 1
11411 #define TG3_JMB_LOOPBACK_FAILED 2
11412 #define TG3_TSO_LOOPBACK_FAILED 4
11414 #define TG3_MAC_LOOPBACK_SHIFT 0
11415 #define TG3_PHY_LOOPBACK_SHIFT 4
11416 #define TG3_LOOPBACK_FAILED 0x00000077
11418 static int tg3_test_loopback(struct tg3
*tp
)
11421 u32 eee_cap
, cpmuctrl
= 0;
11423 if (!netif_running(tp
->dev
))
11424 return TG3_LOOPBACK_FAILED
;
11426 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11427 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11429 err
= tg3_reset_hw(tp
, 1);
11431 err
= TG3_LOOPBACK_FAILED
;
11435 if (tg3_flag(tp
, ENABLE_RSS
)) {
11438 /* Reroute all rx packets to the 1st queue */
11439 for (i
= MAC_RSS_INDIR_TBL_0
;
11440 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11444 /* Turn off gphy autopowerdown. */
11445 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11446 tg3_phy_toggle_apd(tp
, false);
11448 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11452 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11454 /* Wait for up to 40 microseconds to acquire lock. */
11455 for (i
= 0; i
< 4; i
++) {
11456 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11457 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11462 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11463 err
= TG3_LOOPBACK_FAILED
;
11467 /* Turn off link-based power management. */
11468 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11469 tw32(TG3_CPMU_CTRL
,
11470 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11471 CPMU_CTRL_LINK_AWARE_MODE
));
11474 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_MAC_LOOPBACK
))
11475 err
|= TG3_STD_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11477 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11478 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_MAC_LOOPBACK
))
11479 err
|= TG3_JMB_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11481 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11482 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11484 /* Release the mutex */
11485 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11488 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11489 !tg3_flag(tp
, USE_PHYLIB
)) {
11490 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_PHY_LOOPBACK
))
11491 err
|= TG3_STD_LOOPBACK_FAILED
<<
11492 TG3_PHY_LOOPBACK_SHIFT
;
11493 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11494 tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_TSO_LOOPBACK
))
11495 err
|= TG3_TSO_LOOPBACK_FAILED
<<
11496 TG3_PHY_LOOPBACK_SHIFT
;
11497 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11498 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_PHY_LOOPBACK
))
11499 err
|= TG3_JMB_LOOPBACK_FAILED
<<
11500 TG3_PHY_LOOPBACK_SHIFT
;
11503 /* Re-enable gphy autopowerdown. */
11504 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11505 tg3_phy_toggle_apd(tp
, true);
11508 tp
->phy_flags
|= eee_cap
;
11513 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11516 struct tg3
*tp
= netdev_priv(dev
);
11518 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11521 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11523 if (tg3_test_nvram(tp
) != 0) {
11524 etest
->flags
|= ETH_TEST_FL_FAILED
;
11527 if (tg3_test_link(tp
) != 0) {
11528 etest
->flags
|= ETH_TEST_FL_FAILED
;
11531 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11532 int err
, err2
= 0, irq_sync
= 0;
11534 if (netif_running(dev
)) {
11536 tg3_netif_stop(tp
);
11540 tg3_full_lock(tp
, irq_sync
);
11542 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11543 err
= tg3_nvram_lock(tp
);
11544 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11545 if (!tg3_flag(tp
, 5705_PLUS
))
11546 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11548 tg3_nvram_unlock(tp
);
11550 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11553 if (tg3_test_registers(tp
) != 0) {
11554 etest
->flags
|= ETH_TEST_FL_FAILED
;
11557 if (tg3_test_memory(tp
) != 0) {
11558 etest
->flags
|= ETH_TEST_FL_FAILED
;
11561 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11562 etest
->flags
|= ETH_TEST_FL_FAILED
;
11564 tg3_full_unlock(tp
);
11566 if (tg3_test_interrupt(tp
) != 0) {
11567 etest
->flags
|= ETH_TEST_FL_FAILED
;
11571 tg3_full_lock(tp
, 0);
11573 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11574 if (netif_running(dev
)) {
11575 tg3_flag_set(tp
, INIT_COMPLETE
);
11576 err2
= tg3_restart_hw(tp
, 1);
11578 tg3_netif_start(tp
);
11581 tg3_full_unlock(tp
);
11583 if (irq_sync
&& !err2
)
11586 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11587 tg3_power_down(tp
);
11591 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11593 struct mii_ioctl_data
*data
= if_mii(ifr
);
11594 struct tg3
*tp
= netdev_priv(dev
);
11597 if (tg3_flag(tp
, USE_PHYLIB
)) {
11598 struct phy_device
*phydev
;
11599 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11601 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11602 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11607 data
->phy_id
= tp
->phy_addr
;
11610 case SIOCGMIIREG
: {
11613 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11614 break; /* We have no PHY */
11616 if (!netif_running(dev
))
11619 spin_lock_bh(&tp
->lock
);
11620 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11621 spin_unlock_bh(&tp
->lock
);
11623 data
->val_out
= mii_regval
;
11629 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11630 break; /* We have no PHY */
11632 if (!netif_running(dev
))
11635 spin_lock_bh(&tp
->lock
);
11636 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11637 spin_unlock_bh(&tp
->lock
);
11645 return -EOPNOTSUPP
;
11648 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11650 struct tg3
*tp
= netdev_priv(dev
);
11652 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11656 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11658 struct tg3
*tp
= netdev_priv(dev
);
11659 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11660 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11662 if (!tg3_flag(tp
, 5705_PLUS
)) {
11663 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11664 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11665 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11666 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11669 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11670 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11671 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11672 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11673 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11674 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11675 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11676 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11677 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11678 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11681 /* No rx interrupts will be generated if both are zero */
11682 if ((ec
->rx_coalesce_usecs
== 0) &&
11683 (ec
->rx_max_coalesced_frames
== 0))
11686 /* No tx interrupts will be generated if both are zero */
11687 if ((ec
->tx_coalesce_usecs
== 0) &&
11688 (ec
->tx_max_coalesced_frames
== 0))
11691 /* Only copy relevant parameters, ignore all others. */
11692 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11693 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11694 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11695 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11696 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11697 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11698 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11699 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11700 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11702 if (netif_running(dev
)) {
11703 tg3_full_lock(tp
, 0);
11704 __tg3_set_coalesce(tp
, &tp
->coal
);
11705 tg3_full_unlock(tp
);
11710 static const struct ethtool_ops tg3_ethtool_ops
= {
11711 .get_settings
= tg3_get_settings
,
11712 .set_settings
= tg3_set_settings
,
11713 .get_drvinfo
= tg3_get_drvinfo
,
11714 .get_regs_len
= tg3_get_regs_len
,
11715 .get_regs
= tg3_get_regs
,
11716 .get_wol
= tg3_get_wol
,
11717 .set_wol
= tg3_set_wol
,
11718 .get_msglevel
= tg3_get_msglevel
,
11719 .set_msglevel
= tg3_set_msglevel
,
11720 .nway_reset
= tg3_nway_reset
,
11721 .get_link
= ethtool_op_get_link
,
11722 .get_eeprom_len
= tg3_get_eeprom_len
,
11723 .get_eeprom
= tg3_get_eeprom
,
11724 .set_eeprom
= tg3_set_eeprom
,
11725 .get_ringparam
= tg3_get_ringparam
,
11726 .set_ringparam
= tg3_set_ringparam
,
11727 .get_pauseparam
= tg3_get_pauseparam
,
11728 .set_pauseparam
= tg3_set_pauseparam
,
11729 .self_test
= tg3_self_test
,
11730 .get_strings
= tg3_get_strings
,
11731 .set_phys_id
= tg3_set_phys_id
,
11732 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11733 .get_coalesce
= tg3_get_coalesce
,
11734 .set_coalesce
= tg3_set_coalesce
,
11735 .get_sset_count
= tg3_get_sset_count
,
11738 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11740 u32 cursize
, val
, magic
;
11742 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11744 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11747 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11748 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11749 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11753 * Size the chip by reading offsets at increasing powers of two.
11754 * When we encounter our validation signature, we know the addressing
11755 * has wrapped around, and thus have our chip size.
11759 while (cursize
< tp
->nvram_size
) {
11760 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11769 tp
->nvram_size
= cursize
;
11772 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11776 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11779 /* Selfboot format */
11780 if (val
!= TG3_EEPROM_MAGIC
) {
11781 tg3_get_eeprom_size(tp
);
11785 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11787 /* This is confusing. We want to operate on the
11788 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11789 * call will read from NVRAM and byteswap the data
11790 * according to the byteswapping settings for all
11791 * other register accesses. This ensures the data we
11792 * want will always reside in the lower 16-bits.
11793 * However, the data in NVRAM is in LE format, which
11794 * means the data from the NVRAM read will always be
11795 * opposite the endianness of the CPU. The 16-bit
11796 * byteswap then brings the data to CPU endianness.
11798 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11802 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11805 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11809 nvcfg1
= tr32(NVRAM_CFG1
);
11810 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11811 tg3_flag_set(tp
, FLASH
);
11813 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11814 tw32(NVRAM_CFG1
, nvcfg1
);
11817 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
11818 tg3_flag(tp
, 5780_CLASS
)) {
11819 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11820 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11821 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11822 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11823 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11825 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11826 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11827 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11829 case FLASH_VENDOR_ATMEL_EEPROM
:
11830 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11831 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11832 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11834 case FLASH_VENDOR_ST
:
11835 tp
->nvram_jedecnum
= JEDEC_ST
;
11836 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11837 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11839 case FLASH_VENDOR_SAIFUN
:
11840 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11841 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11843 case FLASH_VENDOR_SST_SMALL
:
11844 case FLASH_VENDOR_SST_LARGE
:
11845 tp
->nvram_jedecnum
= JEDEC_SST
;
11846 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11850 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11851 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11852 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11856 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11858 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11859 case FLASH_5752PAGE_SIZE_256
:
11860 tp
->nvram_pagesize
= 256;
11862 case FLASH_5752PAGE_SIZE_512
:
11863 tp
->nvram_pagesize
= 512;
11865 case FLASH_5752PAGE_SIZE_1K
:
11866 tp
->nvram_pagesize
= 1024;
11868 case FLASH_5752PAGE_SIZE_2K
:
11869 tp
->nvram_pagesize
= 2048;
11871 case FLASH_5752PAGE_SIZE_4K
:
11872 tp
->nvram_pagesize
= 4096;
11874 case FLASH_5752PAGE_SIZE_264
:
11875 tp
->nvram_pagesize
= 264;
11877 case FLASH_5752PAGE_SIZE_528
:
11878 tp
->nvram_pagesize
= 528;
11883 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
11887 nvcfg1
= tr32(NVRAM_CFG1
);
11889 /* NVRAM protection for TPM */
11890 if (nvcfg1
& (1 << 27))
11891 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11893 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11894 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
11895 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
11896 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11897 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11899 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11900 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11901 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11902 tg3_flag_set(tp
, FLASH
);
11904 case FLASH_5752VENDOR_ST_M45PE10
:
11905 case FLASH_5752VENDOR_ST_M45PE20
:
11906 case FLASH_5752VENDOR_ST_M45PE40
:
11907 tp
->nvram_jedecnum
= JEDEC_ST
;
11908 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11909 tg3_flag_set(tp
, FLASH
);
11913 if (tg3_flag(tp
, FLASH
)) {
11914 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11916 /* For eeprom, set pagesize to maximum eeprom size */
11917 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11919 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11920 tw32(NVRAM_CFG1
, nvcfg1
);
11924 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
11926 u32 nvcfg1
, protect
= 0;
11928 nvcfg1
= tr32(NVRAM_CFG1
);
11930 /* NVRAM protection for TPM */
11931 if (nvcfg1
& (1 << 27)) {
11932 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11936 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11938 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11939 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11940 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11941 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
11942 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11943 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11944 tg3_flag_set(tp
, FLASH
);
11945 tp
->nvram_pagesize
= 264;
11946 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
11947 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
11948 tp
->nvram_size
= (protect
? 0x3e200 :
11949 TG3_NVRAM_SIZE_512KB
);
11950 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
11951 tp
->nvram_size
= (protect
? 0x1f200 :
11952 TG3_NVRAM_SIZE_256KB
);
11954 tp
->nvram_size
= (protect
? 0x1f200 :
11955 TG3_NVRAM_SIZE_128KB
);
11957 case FLASH_5752VENDOR_ST_M45PE10
:
11958 case FLASH_5752VENDOR_ST_M45PE20
:
11959 case FLASH_5752VENDOR_ST_M45PE40
:
11960 tp
->nvram_jedecnum
= JEDEC_ST
;
11961 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11962 tg3_flag_set(tp
, FLASH
);
11963 tp
->nvram_pagesize
= 256;
11964 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
11965 tp
->nvram_size
= (protect
?
11966 TG3_NVRAM_SIZE_64KB
:
11967 TG3_NVRAM_SIZE_128KB
);
11968 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
11969 tp
->nvram_size
= (protect
?
11970 TG3_NVRAM_SIZE_64KB
:
11971 TG3_NVRAM_SIZE_256KB
);
11973 tp
->nvram_size
= (protect
?
11974 TG3_NVRAM_SIZE_128KB
:
11975 TG3_NVRAM_SIZE_512KB
);
11980 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
11984 nvcfg1
= tr32(NVRAM_CFG1
);
11986 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11987 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
11988 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11989 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
11990 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11991 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11992 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11993 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11995 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11996 tw32(NVRAM_CFG1
, nvcfg1
);
11998 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11999 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12000 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12001 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12002 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12003 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12004 tg3_flag_set(tp
, FLASH
);
12005 tp
->nvram_pagesize
= 264;
12007 case FLASH_5752VENDOR_ST_M45PE10
:
12008 case FLASH_5752VENDOR_ST_M45PE20
:
12009 case FLASH_5752VENDOR_ST_M45PE40
:
12010 tp
->nvram_jedecnum
= JEDEC_ST
;
12011 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12012 tg3_flag_set(tp
, FLASH
);
12013 tp
->nvram_pagesize
= 256;
12018 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12020 u32 nvcfg1
, protect
= 0;
12022 nvcfg1
= tr32(NVRAM_CFG1
);
12024 /* NVRAM protection for TPM */
12025 if (nvcfg1
& (1 << 27)) {
12026 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12030 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12032 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12033 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12034 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12035 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12036 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12037 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12038 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12039 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12040 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12041 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12042 tg3_flag_set(tp
, FLASH
);
12043 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12044 tp
->nvram_pagesize
= 256;
12046 case FLASH_5761VENDOR_ST_A_M45PE20
:
12047 case FLASH_5761VENDOR_ST_A_M45PE40
:
12048 case FLASH_5761VENDOR_ST_A_M45PE80
:
12049 case FLASH_5761VENDOR_ST_A_M45PE16
:
12050 case FLASH_5761VENDOR_ST_M_M45PE20
:
12051 case FLASH_5761VENDOR_ST_M_M45PE40
:
12052 case FLASH_5761VENDOR_ST_M_M45PE80
:
12053 case FLASH_5761VENDOR_ST_M_M45PE16
:
12054 tp
->nvram_jedecnum
= JEDEC_ST
;
12055 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12056 tg3_flag_set(tp
, FLASH
);
12057 tp
->nvram_pagesize
= 256;
12062 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12065 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12066 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12067 case FLASH_5761VENDOR_ST_A_M45PE16
:
12068 case FLASH_5761VENDOR_ST_M_M45PE16
:
12069 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12071 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12072 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12073 case FLASH_5761VENDOR_ST_A_M45PE80
:
12074 case FLASH_5761VENDOR_ST_M_M45PE80
:
12075 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12077 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12078 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12079 case FLASH_5761VENDOR_ST_A_M45PE40
:
12080 case FLASH_5761VENDOR_ST_M_M45PE40
:
12081 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12083 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12084 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12085 case FLASH_5761VENDOR_ST_A_M45PE20
:
12086 case FLASH_5761VENDOR_ST_M_M45PE20
:
12087 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12093 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12095 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12096 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12097 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12100 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12104 nvcfg1
= tr32(NVRAM_CFG1
);
12106 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12107 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12108 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12109 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12110 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12111 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12113 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12114 tw32(NVRAM_CFG1
, nvcfg1
);
12116 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12117 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12118 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12119 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12120 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12121 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12122 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12123 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12124 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12125 tg3_flag_set(tp
, FLASH
);
12127 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12128 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12129 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12130 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12131 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12133 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12134 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12135 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12137 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12138 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12139 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12143 case FLASH_5752VENDOR_ST_M45PE10
:
12144 case FLASH_5752VENDOR_ST_M45PE20
:
12145 case FLASH_5752VENDOR_ST_M45PE40
:
12146 tp
->nvram_jedecnum
= JEDEC_ST
;
12147 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12148 tg3_flag_set(tp
, FLASH
);
12150 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12151 case FLASH_5752VENDOR_ST_M45PE10
:
12152 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12154 case FLASH_5752VENDOR_ST_M45PE20
:
12155 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12157 case FLASH_5752VENDOR_ST_M45PE40
:
12158 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12163 tg3_flag_set(tp
, NO_NVRAM
);
12167 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12168 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12169 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12173 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12177 nvcfg1
= tr32(NVRAM_CFG1
);
12179 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12180 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12181 case FLASH_5717VENDOR_MICRO_EEPROM
:
12182 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12183 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12184 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12186 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12187 tw32(NVRAM_CFG1
, nvcfg1
);
12189 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12190 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12191 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12192 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12193 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12194 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12195 case FLASH_5717VENDOR_ATMEL_45USPT
:
12196 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12197 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12198 tg3_flag_set(tp
, FLASH
);
12200 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12201 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12202 /* Detect size with tg3_nvram_get_size() */
12204 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12205 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12206 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12209 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12213 case FLASH_5717VENDOR_ST_M_M25PE10
:
12214 case FLASH_5717VENDOR_ST_A_M25PE10
:
12215 case FLASH_5717VENDOR_ST_M_M45PE10
:
12216 case FLASH_5717VENDOR_ST_A_M45PE10
:
12217 case FLASH_5717VENDOR_ST_M_M25PE20
:
12218 case FLASH_5717VENDOR_ST_A_M25PE20
:
12219 case FLASH_5717VENDOR_ST_M_M45PE20
:
12220 case FLASH_5717VENDOR_ST_A_M45PE20
:
12221 case FLASH_5717VENDOR_ST_25USPT
:
12222 case FLASH_5717VENDOR_ST_45USPT
:
12223 tp
->nvram_jedecnum
= JEDEC_ST
;
12224 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12225 tg3_flag_set(tp
, FLASH
);
12227 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12228 case FLASH_5717VENDOR_ST_M_M25PE20
:
12229 case FLASH_5717VENDOR_ST_M_M45PE20
:
12230 /* Detect size with tg3_nvram_get_size() */
12232 case FLASH_5717VENDOR_ST_A_M25PE20
:
12233 case FLASH_5717VENDOR_ST_A_M45PE20
:
12234 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12237 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12242 tg3_flag_set(tp
, NO_NVRAM
);
12246 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12247 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12248 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12251 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12253 u32 nvcfg1
, nvmpinstrp
;
12255 nvcfg1
= tr32(NVRAM_CFG1
);
12256 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12258 switch (nvmpinstrp
) {
12259 case FLASH_5720_EEPROM_HD
:
12260 case FLASH_5720_EEPROM_LD
:
12261 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12262 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12264 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12265 tw32(NVRAM_CFG1
, nvcfg1
);
12266 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12267 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12269 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12271 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12272 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12273 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12274 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12275 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12276 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12277 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12278 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12279 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12280 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12281 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12282 case FLASH_5720VENDOR_ATMEL_45USPT
:
12283 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12284 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12285 tg3_flag_set(tp
, FLASH
);
12287 switch (nvmpinstrp
) {
12288 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12289 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12290 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12291 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12293 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12294 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12295 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12296 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12298 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12299 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12300 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12303 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12307 case FLASH_5720VENDOR_M_ST_M25PE10
:
12308 case FLASH_5720VENDOR_M_ST_M45PE10
:
12309 case FLASH_5720VENDOR_A_ST_M25PE10
:
12310 case FLASH_5720VENDOR_A_ST_M45PE10
:
12311 case FLASH_5720VENDOR_M_ST_M25PE20
:
12312 case FLASH_5720VENDOR_M_ST_M45PE20
:
12313 case FLASH_5720VENDOR_A_ST_M25PE20
:
12314 case FLASH_5720VENDOR_A_ST_M45PE20
:
12315 case FLASH_5720VENDOR_M_ST_M25PE40
:
12316 case FLASH_5720VENDOR_M_ST_M45PE40
:
12317 case FLASH_5720VENDOR_A_ST_M25PE40
:
12318 case FLASH_5720VENDOR_A_ST_M45PE40
:
12319 case FLASH_5720VENDOR_M_ST_M25PE80
:
12320 case FLASH_5720VENDOR_M_ST_M45PE80
:
12321 case FLASH_5720VENDOR_A_ST_M25PE80
:
12322 case FLASH_5720VENDOR_A_ST_M45PE80
:
12323 case FLASH_5720VENDOR_ST_25USPT
:
12324 case FLASH_5720VENDOR_ST_45USPT
:
12325 tp
->nvram_jedecnum
= JEDEC_ST
;
12326 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12327 tg3_flag_set(tp
, FLASH
);
12329 switch (nvmpinstrp
) {
12330 case FLASH_5720VENDOR_M_ST_M25PE20
:
12331 case FLASH_5720VENDOR_M_ST_M45PE20
:
12332 case FLASH_5720VENDOR_A_ST_M25PE20
:
12333 case FLASH_5720VENDOR_A_ST_M45PE20
:
12334 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12336 case FLASH_5720VENDOR_M_ST_M25PE40
:
12337 case FLASH_5720VENDOR_M_ST_M45PE40
:
12338 case FLASH_5720VENDOR_A_ST_M25PE40
:
12339 case FLASH_5720VENDOR_A_ST_M45PE40
:
12340 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12342 case FLASH_5720VENDOR_M_ST_M25PE80
:
12343 case FLASH_5720VENDOR_M_ST_M45PE80
:
12344 case FLASH_5720VENDOR_A_ST_M25PE80
:
12345 case FLASH_5720VENDOR_A_ST_M45PE80
:
12346 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12349 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12354 tg3_flag_set(tp
, NO_NVRAM
);
12358 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12359 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12360 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12363 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12364 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12366 tw32_f(GRC_EEPROM_ADDR
,
12367 (EEPROM_ADDR_FSM_RESET
|
12368 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12369 EEPROM_ADDR_CLKPERD_SHIFT
)));
12373 /* Enable seeprom accesses. */
12374 tw32_f(GRC_LOCAL_CTRL
,
12375 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12378 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12379 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12380 tg3_flag_set(tp
, NVRAM
);
12382 if (tg3_nvram_lock(tp
)) {
12383 netdev_warn(tp
->dev
,
12384 "Cannot get nvram lock, %s failed\n",
12388 tg3_enable_nvram_access(tp
);
12390 tp
->nvram_size
= 0;
12392 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12393 tg3_get_5752_nvram_info(tp
);
12394 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12395 tg3_get_5755_nvram_info(tp
);
12396 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12397 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12398 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12399 tg3_get_5787_nvram_info(tp
);
12400 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12401 tg3_get_5761_nvram_info(tp
);
12402 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12403 tg3_get_5906_nvram_info(tp
);
12404 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12405 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12406 tg3_get_57780_nvram_info(tp
);
12407 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12408 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12409 tg3_get_5717_nvram_info(tp
);
12410 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12411 tg3_get_5720_nvram_info(tp
);
12413 tg3_get_nvram_info(tp
);
12415 if (tp
->nvram_size
== 0)
12416 tg3_get_nvram_size(tp
);
12418 tg3_disable_nvram_access(tp
);
12419 tg3_nvram_unlock(tp
);
12422 tg3_flag_clear(tp
, NVRAM
);
12423 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12425 tg3_get_eeprom_size(tp
);
12429 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12430 u32 offset
, u32 len
, u8
*buf
)
12435 for (i
= 0; i
< len
; i
+= 4) {
12441 memcpy(&data
, buf
+ i
, 4);
12444 * The SEEPROM interface expects the data to always be opposite
12445 * the native endian format. We accomplish this by reversing
12446 * all the operations that would have been performed on the
12447 * data from a call to tg3_nvram_read_be32().
12449 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12451 val
= tr32(GRC_EEPROM_ADDR
);
12452 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12454 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12456 tw32(GRC_EEPROM_ADDR
, val
|
12457 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12458 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12459 EEPROM_ADDR_START
|
12460 EEPROM_ADDR_WRITE
);
12462 for (j
= 0; j
< 1000; j
++) {
12463 val
= tr32(GRC_EEPROM_ADDR
);
12465 if (val
& EEPROM_ADDR_COMPLETE
)
12469 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12478 /* offset and length are dword aligned */
12479 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12483 u32 pagesize
= tp
->nvram_pagesize
;
12484 u32 pagemask
= pagesize
- 1;
12488 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12494 u32 phy_addr
, page_off
, size
;
12496 phy_addr
= offset
& ~pagemask
;
12498 for (j
= 0; j
< pagesize
; j
+= 4) {
12499 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12500 (__be32
*) (tmp
+ j
));
12507 page_off
= offset
& pagemask
;
12514 memcpy(tmp
+ page_off
, buf
, size
);
12516 offset
= offset
+ (pagesize
- page_off
);
12518 tg3_enable_nvram_access(tp
);
12521 * Before we can erase the flash page, we need
12522 * to issue a special "write enable" command.
12524 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12526 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12529 /* Erase the target page */
12530 tw32(NVRAM_ADDR
, phy_addr
);
12532 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12533 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12535 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12538 /* Issue another write enable to start the write. */
12539 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12541 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12544 for (j
= 0; j
< pagesize
; j
+= 4) {
12547 data
= *((__be32
*) (tmp
+ j
));
12549 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12551 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12553 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12557 nvram_cmd
|= NVRAM_CMD_FIRST
;
12558 else if (j
== (pagesize
- 4))
12559 nvram_cmd
|= NVRAM_CMD_LAST
;
12561 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12568 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12569 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12576 /* offset and length are dword aligned */
12577 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12582 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12583 u32 page_off
, phy_addr
, nvram_cmd
;
12586 memcpy(&data
, buf
+ i
, 4);
12587 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12589 page_off
= offset
% tp
->nvram_pagesize
;
12591 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12593 tw32(NVRAM_ADDR
, phy_addr
);
12595 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12597 if (page_off
== 0 || i
== 0)
12598 nvram_cmd
|= NVRAM_CMD_FIRST
;
12599 if (page_off
== (tp
->nvram_pagesize
- 4))
12600 nvram_cmd
|= NVRAM_CMD_LAST
;
12602 if (i
== (len
- 4))
12603 nvram_cmd
|= NVRAM_CMD_LAST
;
12605 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12606 !tg3_flag(tp
, 5755_PLUS
) &&
12607 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12608 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12610 if ((ret
= tg3_nvram_exec_cmd(tp
,
12611 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12616 if (!tg3_flag(tp
, FLASH
)) {
12617 /* We always do complete word writes to eeprom. */
12618 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12621 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12627 /* offset and length are dword aligned */
12628 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12632 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12633 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12634 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12638 if (!tg3_flag(tp
, NVRAM
)) {
12639 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12643 ret
= tg3_nvram_lock(tp
);
12647 tg3_enable_nvram_access(tp
);
12648 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12649 tw32(NVRAM_WRITE1
, 0x406);
12651 grc_mode
= tr32(GRC_MODE
);
12652 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12654 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12655 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12658 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12662 grc_mode
= tr32(GRC_MODE
);
12663 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12665 tg3_disable_nvram_access(tp
);
12666 tg3_nvram_unlock(tp
);
12669 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12670 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12677 struct subsys_tbl_ent
{
12678 u16 subsys_vendor
, subsys_devid
;
12682 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12683 /* Broadcom boards. */
12684 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12685 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12686 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12687 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12688 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12689 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12690 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12691 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12692 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12693 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12694 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12695 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12696 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12697 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12698 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12699 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12700 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12701 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12702 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12703 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12704 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12705 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12708 { TG3PCI_SUBVENDOR_ID_3COM
,
12709 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12710 { TG3PCI_SUBVENDOR_ID_3COM
,
12711 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12712 { TG3PCI_SUBVENDOR_ID_3COM
,
12713 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12714 { TG3PCI_SUBVENDOR_ID_3COM
,
12715 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12716 { TG3PCI_SUBVENDOR_ID_3COM
,
12717 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12720 { TG3PCI_SUBVENDOR_ID_DELL
,
12721 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12722 { TG3PCI_SUBVENDOR_ID_DELL
,
12723 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12724 { TG3PCI_SUBVENDOR_ID_DELL
,
12725 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12726 { TG3PCI_SUBVENDOR_ID_DELL
,
12727 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12729 /* Compaq boards. */
12730 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12731 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12732 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12733 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12734 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12735 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12736 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12737 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12738 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12739 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12742 { TG3PCI_SUBVENDOR_ID_IBM
,
12743 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12746 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12750 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12751 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12752 tp
->pdev
->subsystem_vendor
) &&
12753 (subsys_id_to_phy_id
[i
].subsys_devid
==
12754 tp
->pdev
->subsystem_device
))
12755 return &subsys_id_to_phy_id
[i
];
12760 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12765 /* On some early chips the SRAM cannot be accessed in D3hot state,
12766 * so need make sure we're in D0.
12768 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
12769 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
12770 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
12773 /* Make sure register accesses (indirect or otherwise)
12774 * will function correctly.
12776 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
12777 tp
->misc_host_ctrl
);
12779 /* The memory arbiter has to be enabled in order for SRAM accesses
12780 * to succeed. Normally on powerup the tg3 chip firmware will make
12781 * sure it is enabled, but other entities such as system netboot
12782 * code might disable it.
12784 val
= tr32(MEMARB_MODE
);
12785 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
12787 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12788 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12790 /* Assume an onboard device and WOL capable by default. */
12791 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12792 tg3_flag_set(tp
, WOL_CAP
);
12794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12795 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12796 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12797 tg3_flag_set(tp
, IS_NIC
);
12799 val
= tr32(VCPU_CFGSHDW
);
12800 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12801 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12802 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12803 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12804 tg3_flag_set(tp
, WOL_ENABLE
);
12805 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12810 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12811 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12812 u32 nic_cfg
, led_cfg
;
12813 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12814 int eeprom_phy_serdes
= 0;
12816 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12817 tp
->nic_sram_data_cfg
= nic_cfg
;
12819 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12820 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12821 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
12822 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
12823 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
12824 (ver
> 0) && (ver
< 0x100))
12825 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12827 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12828 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12830 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12831 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12832 eeprom_phy_serdes
= 1;
12834 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12835 if (nic_phy_id
!= 0) {
12836 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12837 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12839 eeprom_phy_id
= (id1
>> 16) << 10;
12840 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12841 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12845 tp
->phy_id
= eeprom_phy_id
;
12846 if (eeprom_phy_serdes
) {
12847 if (!tg3_flag(tp
, 5705_PLUS
))
12848 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12850 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12853 if (tg3_flag(tp
, 5750_PLUS
))
12854 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12855 SHASTA_EXT_LED_MODE_MASK
);
12857 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12861 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12862 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12865 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12866 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12869 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12870 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12872 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12873 * read on some older 5700/5701 bootcode.
12875 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12877 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12879 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12883 case SHASTA_EXT_LED_SHARED
:
12884 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12885 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12886 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12887 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12888 LED_CTRL_MODE_PHY_2
);
12891 case SHASTA_EXT_LED_MAC
:
12892 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12895 case SHASTA_EXT_LED_COMBO
:
12896 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12897 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12898 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12899 LED_CTRL_MODE_PHY_2
);
12904 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12905 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12906 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12907 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12909 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
12910 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12912 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
12913 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12914 if ((tp
->pdev
->subsystem_vendor
==
12915 PCI_VENDOR_ID_ARIMA
) &&
12916 (tp
->pdev
->subsystem_device
== 0x205a ||
12917 tp
->pdev
->subsystem_device
== 0x2063))
12918 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12920 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12921 tg3_flag_set(tp
, IS_NIC
);
12924 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
12925 tg3_flag_set(tp
, ENABLE_ASF
);
12926 if (tg3_flag(tp
, 5750_PLUS
))
12927 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
12930 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
12931 tg3_flag(tp
, 5750_PLUS
))
12932 tg3_flag_set(tp
, ENABLE_APE
);
12934 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
12935 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
12936 tg3_flag_clear(tp
, WOL_CAP
);
12938 if (tg3_flag(tp
, WOL_CAP
) &&
12939 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
12940 tg3_flag_set(tp
, WOL_ENABLE
);
12941 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12944 if (cfg2
& (1 << 17))
12945 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
12947 /* serdes signal pre-emphasis in register 0x590 set by */
12948 /* bootcode if bit 18 is set */
12949 if (cfg2
& (1 << 18))
12950 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
12952 if ((tg3_flag(tp
, 57765_PLUS
) ||
12953 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
12954 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
12955 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
12956 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
12958 if (tg3_flag(tp
, PCI_EXPRESS
) &&
12959 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
12960 !tg3_flag(tp
, 57765_PLUS
)) {
12963 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
12964 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
12965 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12968 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
12969 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
12970 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
12971 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
12972 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
12973 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
12976 if (tg3_flag(tp
, WOL_CAP
))
12977 device_set_wakeup_enable(&tp
->pdev
->dev
,
12978 tg3_flag(tp
, WOL_ENABLE
));
12980 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
12983 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
12988 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
12989 tw32(OTP_CTRL
, cmd
);
12991 /* Wait for up to 1 ms for command to execute. */
12992 for (i
= 0; i
< 100; i
++) {
12993 val
= tr32(OTP_STATUS
);
12994 if (val
& OTP_STATUS_CMD_DONE
)
12999 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13002 /* Read the gphy configuration from the OTP region of the chip. The gphy
13003 * configuration is a 32-bit value that straddles the alignment boundary.
13004 * We do two 32-bit reads and then shift and merge the results.
13006 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13008 u32 bhalf_otp
, thalf_otp
;
13010 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13012 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13015 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13017 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13020 thalf_otp
= tr32(OTP_READ_DATA
);
13022 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13024 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13027 bhalf_otp
= tr32(OTP_READ_DATA
);
13029 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13032 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13034 u32 adv
= ADVERTISED_Autoneg
|
13037 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13038 adv
|= ADVERTISED_1000baseT_Half
|
13039 ADVERTISED_1000baseT_Full
;
13041 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13042 adv
|= ADVERTISED_100baseT_Half
|
13043 ADVERTISED_100baseT_Full
|
13044 ADVERTISED_10baseT_Half
|
13045 ADVERTISED_10baseT_Full
|
13048 adv
|= ADVERTISED_FIBRE
;
13050 tp
->link_config
.advertising
= adv
;
13051 tp
->link_config
.speed
= SPEED_INVALID
;
13052 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13053 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13054 tp
->link_config
.active_speed
= SPEED_INVALID
;
13055 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13056 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13057 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13058 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13061 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13063 u32 hw_phy_id_1
, hw_phy_id_2
;
13064 u32 hw_phy_id
, hw_phy_id_masked
;
13067 /* flow control autonegotiation is default behavior */
13068 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13069 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13071 if (tg3_flag(tp
, USE_PHYLIB
))
13072 return tg3_phy_init(tp
);
13074 /* Reading the PHY ID register can conflict with ASF
13075 * firmware access to the PHY hardware.
13078 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13079 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13081 /* Now read the physical PHY_ID from the chip and verify
13082 * that it is sane. If it doesn't look good, we fall back
13083 * to either the hard-coded table based PHY_ID and failing
13084 * that the value found in the eeprom area.
13086 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13087 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13089 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13090 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13091 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13093 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13096 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13097 tp
->phy_id
= hw_phy_id
;
13098 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13099 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13101 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13103 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13104 /* Do nothing, phy ID already set up in
13105 * tg3_get_eeprom_hw_cfg().
13108 struct subsys_tbl_ent
*p
;
13110 /* No eeprom signature? Try the hardcoded
13111 * subsys device table.
13113 p
= tg3_lookup_by_subsys(tp
);
13117 tp
->phy_id
= p
->phy_id
;
13119 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13120 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13124 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13125 ((tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13126 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13127 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13128 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13129 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13131 tg3_phy_init_link_config(tp
);
13133 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13134 !tg3_flag(tp
, ENABLE_APE
) &&
13135 !tg3_flag(tp
, ENABLE_ASF
)) {
13136 u32 bmsr
, adv_reg
, tg3_ctrl
, mask
;
13138 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13139 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13140 (bmsr
& BMSR_LSTATUS
))
13141 goto skip_phy_reset
;
13143 err
= tg3_phy_reset(tp
);
13147 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
13148 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
13149 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
13151 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
13152 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
13153 MII_TG3_CTRL_ADV_1000_FULL
);
13154 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
13155 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
13156 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
13157 MII_TG3_CTRL_ENABLE_AS_MASTER
);
13160 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13161 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13162 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13163 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13164 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
13166 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13167 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
13169 tg3_writephy(tp
, MII_BMCR
,
13170 BMCR_ANENABLE
| BMCR_ANRESTART
);
13172 tg3_phy_set_wirespeed(tp
);
13174 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
13175 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13176 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
13180 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13181 err
= tg3_init_5401phy_dsp(tp
);
13185 err
= tg3_init_5401phy_dsp(tp
);
13191 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13194 unsigned int block_end
, rosize
, len
;
13197 vpd_data
= (u8
*)tg3_vpd_readblock(tp
);
13201 i
= pci_vpd_find_tag(vpd_data
, 0, TG3_NVM_VPD_LEN
,
13202 PCI_VPD_LRDT_RO_DATA
);
13204 goto out_not_found
;
13206 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13207 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13208 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13210 if (block_end
> TG3_NVM_VPD_LEN
)
13211 goto out_not_found
;
13213 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13214 PCI_VPD_RO_KEYWORD_MFR_ID
);
13216 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13218 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13219 if (j
+ len
> block_end
|| len
!= 4 ||
13220 memcmp(&vpd_data
[j
], "1028", 4))
13223 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13224 PCI_VPD_RO_KEYWORD_VENDOR0
);
13228 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13230 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13231 if (j
+ len
> block_end
)
13234 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13235 strncat(tp
->fw_ver
, " bc ", TG3_NVM_VPD_LEN
- len
- 1);
13239 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13240 PCI_VPD_RO_KEYWORD_PARTNO
);
13242 goto out_not_found
;
13244 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13246 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13247 if (len
> TG3_BPN_SIZE
||
13248 (len
+ i
) > TG3_NVM_VPD_LEN
)
13249 goto out_not_found
;
13251 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13255 if (tp
->board_part_number
[0])
13259 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13260 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13261 strcpy(tp
->board_part_number
, "BCM5717");
13262 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13263 strcpy(tp
->board_part_number
, "BCM5718");
13266 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13267 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13268 strcpy(tp
->board_part_number
, "BCM57780");
13269 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13270 strcpy(tp
->board_part_number
, "BCM57760");
13271 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13272 strcpy(tp
->board_part_number
, "BCM57790");
13273 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13274 strcpy(tp
->board_part_number
, "BCM57788");
13277 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13278 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13279 strcpy(tp
->board_part_number
, "BCM57761");
13280 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13281 strcpy(tp
->board_part_number
, "BCM57765");
13282 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13283 strcpy(tp
->board_part_number
, "BCM57781");
13284 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13285 strcpy(tp
->board_part_number
, "BCM57785");
13286 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13287 strcpy(tp
->board_part_number
, "BCM57791");
13288 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13289 strcpy(tp
->board_part_number
, "BCM57795");
13292 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13293 strcpy(tp
->board_part_number
, "BCM95906");
13296 strcpy(tp
->board_part_number
, "none");
13300 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13304 if (tg3_nvram_read(tp
, offset
, &val
) ||
13305 (val
& 0xfc000000) != 0x0c000000 ||
13306 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13313 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13315 u32 val
, offset
, start
, ver_offset
;
13317 bool newver
= false;
13319 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13320 tg3_nvram_read(tp
, 0x4, &start
))
13323 offset
= tg3_nvram_logical_addr(tp
, offset
);
13325 if (tg3_nvram_read(tp
, offset
, &val
))
13328 if ((val
& 0xfc000000) == 0x0c000000) {
13329 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13336 dst_off
= strlen(tp
->fw_ver
);
13339 if (TG3_VER_SIZE
- dst_off
< 16 ||
13340 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13343 offset
= offset
+ ver_offset
- start
;
13344 for (i
= 0; i
< 16; i
+= 4) {
13346 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13349 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13354 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13357 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13358 TG3_NVM_BCVER_MAJSFT
;
13359 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13360 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13361 "v%d.%02d", major
, minor
);
13365 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13367 u32 val
, major
, minor
;
13369 /* Use native endian representation */
13370 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13373 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13374 TG3_NVM_HWSB_CFG1_MAJSFT
;
13375 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13376 TG3_NVM_HWSB_CFG1_MINSFT
;
13378 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13381 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13383 u32 offset
, major
, minor
, build
;
13385 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13387 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13390 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13391 case TG3_EEPROM_SB_REVISION_0
:
13392 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13394 case TG3_EEPROM_SB_REVISION_2
:
13395 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13397 case TG3_EEPROM_SB_REVISION_3
:
13398 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13400 case TG3_EEPROM_SB_REVISION_4
:
13401 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13403 case TG3_EEPROM_SB_REVISION_5
:
13404 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13406 case TG3_EEPROM_SB_REVISION_6
:
13407 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13413 if (tg3_nvram_read(tp
, offset
, &val
))
13416 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13417 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13418 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13419 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13420 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13422 if (minor
> 99 || build
> 26)
13425 offset
= strlen(tp
->fw_ver
);
13426 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13427 " v%d.%02d", major
, minor
);
13430 offset
= strlen(tp
->fw_ver
);
13431 if (offset
< TG3_VER_SIZE
- 1)
13432 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13436 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13438 u32 val
, offset
, start
;
13441 for (offset
= TG3_NVM_DIR_START
;
13442 offset
< TG3_NVM_DIR_END
;
13443 offset
+= TG3_NVM_DIRENT_SIZE
) {
13444 if (tg3_nvram_read(tp
, offset
, &val
))
13447 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13451 if (offset
== TG3_NVM_DIR_END
)
13454 if (!tg3_flag(tp
, 5705_PLUS
))
13455 start
= 0x08000000;
13456 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13459 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13460 !tg3_fw_img_is_valid(tp
, offset
) ||
13461 tg3_nvram_read(tp
, offset
+ 8, &val
))
13464 offset
+= val
- start
;
13466 vlen
= strlen(tp
->fw_ver
);
13468 tp
->fw_ver
[vlen
++] = ',';
13469 tp
->fw_ver
[vlen
++] = ' ';
13471 for (i
= 0; i
< 4; i
++) {
13473 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13476 offset
+= sizeof(v
);
13478 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13479 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13483 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13488 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13494 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13497 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13498 if (apedata
!= APE_SEG_SIG_MAGIC
)
13501 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13502 if (!(apedata
& APE_FW_STATUS_READY
))
13505 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13507 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13508 tg3_flag_set(tp
, APE_HAS_NCSI
);
13514 vlen
= strlen(tp
->fw_ver
);
13516 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13518 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13519 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13520 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13521 (apedata
& APE_FW_VERSION_BLDMSK
));
13524 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13527 bool vpd_vers
= false;
13529 if (tp
->fw_ver
[0] != 0)
13532 if (tg3_flag(tp
, NO_NVRAM
)) {
13533 strcat(tp
->fw_ver
, "sb");
13537 if (tg3_nvram_read(tp
, 0, &val
))
13540 if (val
== TG3_EEPROM_MAGIC
)
13541 tg3_read_bc_ver(tp
);
13542 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13543 tg3_read_sb_ver(tp
, val
);
13544 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13545 tg3_read_hwsb_ver(tp
);
13549 if (!tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || vpd_vers
)
13552 tg3_read_mgmtfw_ver(tp
);
13555 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13558 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13560 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13562 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13563 return TG3_RX_RET_MAX_SIZE_5717
;
13564 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13565 return TG3_RX_RET_MAX_SIZE_5700
;
13567 return TG3_RX_RET_MAX_SIZE_5705
;
13570 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13571 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13572 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13573 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13577 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13580 u32 pci_state_reg
, grc_misc_cfg
;
13585 /* Force memory write invalidate off. If we leave it on,
13586 * then on 5700_BX chips we have to enable a workaround.
13587 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13588 * to match the cacheline size. The Broadcom driver have this
13589 * workaround but turns MWI off all the times so never uses
13590 * it. This seems to suggest that the workaround is insufficient.
13592 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13593 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13594 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13596 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13597 * has the register indirect write enable bit set before
13598 * we try to access any of the MMIO registers. It is also
13599 * critical that the PCI-X hw workaround situation is decided
13600 * before that as well.
13602 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13605 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13606 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13607 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13608 u32 prod_id_asic_rev
;
13610 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13611 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13612 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13613 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13614 pci_read_config_dword(tp
->pdev
,
13615 TG3PCI_GEN2_PRODID_ASICREV
,
13616 &prod_id_asic_rev
);
13617 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13618 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13619 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13620 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13621 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13622 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13623 pci_read_config_dword(tp
->pdev
,
13624 TG3PCI_GEN15_PRODID_ASICREV
,
13625 &prod_id_asic_rev
);
13627 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13628 &prod_id_asic_rev
);
13630 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13633 /* Wrong chip ID in 5752 A0. This code can be removed later
13634 * as A0 is not in production.
13636 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13637 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13639 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13640 * we need to disable memory and use config. cycles
13641 * only to access all registers. The 5702/03 chips
13642 * can mistakenly decode the special cycles from the
13643 * ICH chipsets as memory write cycles, causing corruption
13644 * of register and memory space. Only certain ICH bridges
13645 * will drive special cycles with non-zero data during the
13646 * address phase which can fall within the 5703's address
13647 * range. This is not an ICH bug as the PCI spec allows
13648 * non-zero address during special cycles. However, only
13649 * these ICH bridges are known to drive non-zero addresses
13650 * during special cycles.
13652 * Since special cycles do not cross PCI bridges, we only
13653 * enable this workaround if the 5703 is on the secondary
13654 * bus of these ICH bridges.
13656 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13657 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13658 static struct tg3_dev_id
{
13662 } ich_chipsets
[] = {
13663 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13665 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13667 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13669 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13673 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13674 struct pci_dev
*bridge
= NULL
;
13676 while (pci_id
->vendor
!= 0) {
13677 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13683 if (pci_id
->rev
!= PCI_ANY_ID
) {
13684 if (bridge
->revision
> pci_id
->rev
)
13687 if (bridge
->subordinate
&&
13688 (bridge
->subordinate
->number
==
13689 tp
->pdev
->bus
->number
)) {
13690 tg3_flag_set(tp
, ICH_WORKAROUND
);
13691 pci_dev_put(bridge
);
13697 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
13698 static struct tg3_dev_id
{
13701 } bridge_chipsets
[] = {
13702 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13703 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13706 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13707 struct pci_dev
*bridge
= NULL
;
13709 while (pci_id
->vendor
!= 0) {
13710 bridge
= pci_get_device(pci_id
->vendor
,
13717 if (bridge
->subordinate
&&
13718 (bridge
->subordinate
->number
<=
13719 tp
->pdev
->bus
->number
) &&
13720 (bridge
->subordinate
->subordinate
>=
13721 tp
->pdev
->bus
->number
)) {
13722 tg3_flag_set(tp
, 5701_DMA_BUG
);
13723 pci_dev_put(bridge
);
13729 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13730 * DMA addresses > 40-bit. This bridge may have other additional
13731 * 57xx devices behind it in some 4-port NIC designs for example.
13732 * Any tg3 device found behind the bridge will also need the 40-bit
13735 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13736 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13737 tg3_flag_set(tp
, 5780_CLASS
);
13738 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13739 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13741 struct pci_dev
*bridge
= NULL
;
13744 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13745 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13747 if (bridge
&& bridge
->subordinate
&&
13748 (bridge
->subordinate
->number
<=
13749 tp
->pdev
->bus
->number
) &&
13750 (bridge
->subordinate
->subordinate
>=
13751 tp
->pdev
->bus
->number
)) {
13752 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13753 pci_dev_put(bridge
);
13759 /* Initialize misc host control in PCI block. */
13760 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13761 MISC_HOST_CTRL_CHIPREV
);
13762 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13763 tp
->misc_host_ctrl
);
13765 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13766 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
13767 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13768 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13769 tp
->pdev_peer
= tg3_find_peer(tp
);
13771 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13772 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13773 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13774 tg3_flag_set(tp
, 5717_PLUS
);
13776 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13777 tg3_flag(tp
, 5717_PLUS
))
13778 tg3_flag_set(tp
, 57765_PLUS
);
13780 /* Intentionally exclude ASIC_REV_5906 */
13781 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13782 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13783 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13784 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13785 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13786 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13787 tg3_flag(tp
, 57765_PLUS
))
13788 tg3_flag_set(tp
, 5755_PLUS
);
13790 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13791 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13792 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13793 tg3_flag(tp
, 5755_PLUS
) ||
13794 tg3_flag(tp
, 5780_CLASS
))
13795 tg3_flag_set(tp
, 5750_PLUS
);
13797 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
13798 tg3_flag(tp
, 5750_PLUS
))
13799 tg3_flag_set(tp
, 5705_PLUS
);
13801 /* 5700 B0 chips do not support checksumming correctly due
13802 * to hardware bugs.
13804 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
13805 u32 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
13807 if (tg3_flag(tp
, 5755_PLUS
))
13808 features
|= NETIF_F_IPV6_CSUM
;
13809 tp
->dev
->features
|= features
;
13810 tp
->dev
->hw_features
|= features
;
13811 tp
->dev
->vlan_features
|= features
;
13814 /* Determine TSO capabilities */
13815 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13816 ; /* Do nothing. HW bug. */
13817 else if (tg3_flag(tp
, 57765_PLUS
))
13818 tg3_flag_set(tp
, HW_TSO_3
);
13819 else if (tg3_flag(tp
, 5755_PLUS
) ||
13820 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13821 tg3_flag_set(tp
, HW_TSO_2
);
13822 else if (tg3_flag(tp
, 5750_PLUS
)) {
13823 tg3_flag_set(tp
, HW_TSO_1
);
13824 tg3_flag_set(tp
, TSO_BUG
);
13825 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13826 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13827 tg3_flag_clear(tp
, TSO_BUG
);
13828 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13829 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13830 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13831 tg3_flag_set(tp
, TSO_BUG
);
13832 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13833 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13835 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13840 if (tg3_flag(tp
, 5750_PLUS
)) {
13841 tg3_flag_set(tp
, SUPPORT_MSI
);
13842 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13843 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13844 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13845 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13846 tp
->pdev_peer
== tp
->pdev
))
13847 tg3_flag_clear(tp
, SUPPORT_MSI
);
13849 if (tg3_flag(tp
, 5755_PLUS
) ||
13850 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13851 tg3_flag_set(tp
, 1SHOT_MSI
);
13854 if (tg3_flag(tp
, 57765_PLUS
)) {
13855 tg3_flag_set(tp
, SUPPORT_MSIX
);
13856 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13860 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13861 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13862 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13863 tg3_flag_set(tp
, SHORT_DMA_BUG
);
13864 else if (!tg3_flag(tp
, 5755_PLUS
)) {
13865 tg3_flag_set(tp
, 4G_DMA_BNDRY_BUG
);
13866 tg3_flag_set(tp
, 40BIT_DMA_LIMIT_BUG
);
13869 if (tg3_flag(tp
, 5717_PLUS
))
13870 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
13872 if (tg3_flag(tp
, 57765_PLUS
) &&
13873 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5719
)
13874 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
13876 if (!tg3_flag(tp
, 5705_PLUS
) ||
13877 tg3_flag(tp
, 5780_CLASS
) ||
13878 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
13879 tg3_flag_set(tp
, JUMBO_CAPABLE
);
13881 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13884 tp
->pcie_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
);
13885 if (tp
->pcie_cap
!= 0) {
13888 tg3_flag_set(tp
, PCI_EXPRESS
);
13890 tp
->pcie_readrq
= 4096;
13891 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13892 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13893 tp
->pcie_readrq
= 2048;
13895 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13897 pci_read_config_word(tp
->pdev
,
13898 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
13900 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13901 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13902 tg3_flag_clear(tp
, HW_TSO_2
);
13903 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13904 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13905 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13906 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13907 tg3_flag_set(tp
, CLKREQ_BUG
);
13908 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13909 tg3_flag_set(tp
, L1PLLPD_EN
);
13911 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13912 tg3_flag_set(tp
, PCI_EXPRESS
);
13913 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
13914 tg3_flag(tp
, 5780_CLASS
)) {
13915 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13916 if (!tp
->pcix_cap
) {
13917 dev_err(&tp
->pdev
->dev
,
13918 "Cannot find PCI-X capability, aborting\n");
13922 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
13923 tg3_flag_set(tp
, PCIX_MODE
);
13926 /* If we have an AMD 762 or VIA K8T800 chipset, write
13927 * reordering to the mailbox registers done by the host
13928 * controller can cause major troubles. We read back from
13929 * every mailbox register write to force the writes to be
13930 * posted to the chip in order.
13932 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
13933 !tg3_flag(tp
, PCI_EXPRESS
))
13934 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
13936 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
13937 &tp
->pci_cacheline_sz
);
13938 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13939 &tp
->pci_lat_timer
);
13940 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13941 tp
->pci_lat_timer
< 64) {
13942 tp
->pci_lat_timer
= 64;
13943 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13944 tp
->pci_lat_timer
);
13947 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
13948 /* 5700 BX chips need to have their TX producer index
13949 * mailboxes written twice to workaround a bug.
13951 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
13953 /* If we are in PCI-X mode, enable register write workaround.
13955 * The workaround is to use indirect register accesses
13956 * for all chip writes not to mailbox registers.
13958 if (tg3_flag(tp
, PCIX_MODE
)) {
13961 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
13963 /* The chip can have it's power management PCI config
13964 * space registers clobbered due to this bug.
13965 * So explicitly force the chip into D0 here.
13967 pci_read_config_dword(tp
->pdev
,
13968 tp
->pm_cap
+ PCI_PM_CTRL
,
13970 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
13971 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
13972 pci_write_config_dword(tp
->pdev
,
13973 tp
->pm_cap
+ PCI_PM_CTRL
,
13976 /* Also, force SERR#/PERR# in PCI command. */
13977 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13978 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
13979 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13983 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
13984 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
13985 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
13986 tg3_flag_set(tp
, PCI_32BIT
);
13988 /* Chip-specific fixup from Broadcom driver */
13989 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
13990 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
13991 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
13992 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
13995 /* Default fast path register access methods */
13996 tp
->read32
= tg3_read32
;
13997 tp
->write32
= tg3_write32
;
13998 tp
->read32_mbox
= tg3_read32
;
13999 tp
->write32_mbox
= tg3_write32
;
14000 tp
->write32_tx_mbox
= tg3_write32
;
14001 tp
->write32_rx_mbox
= tg3_write32
;
14003 /* Various workaround register access methods */
14004 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14005 tp
->write32
= tg3_write_indirect_reg32
;
14006 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14007 (tg3_flag(tp
, PCI_EXPRESS
) &&
14008 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14010 * Back to back register writes can cause problems on these
14011 * chips, the workaround is to read back all reg writes
14012 * except those to mailbox regs.
14014 * See tg3_write_indirect_reg32().
14016 tp
->write32
= tg3_write_flush_reg32
;
14019 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14020 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14021 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14022 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14025 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14026 tp
->read32
= tg3_read_indirect_reg32
;
14027 tp
->write32
= tg3_write_indirect_reg32
;
14028 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14029 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14030 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14031 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14036 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14037 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14038 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14040 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14041 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14042 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14043 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14044 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14047 if (tp
->write32
== tg3_write_indirect_reg32
||
14048 (tg3_flag(tp
, PCIX_MODE
) &&
14049 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14050 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14051 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14053 /* Get eeprom hw config before calling tg3_set_power_state().
14054 * In particular, the TG3_FLAG_IS_NIC flag must be
14055 * determined before calling tg3_set_power_state() so that
14056 * we know whether or not to switch out of Vaux power.
14057 * When the flag is set, it means that GPIO1 is used for eeprom
14058 * write protect and also implies that it is a LOM where GPIOs
14059 * are not used to switch power.
14061 tg3_get_eeprom_hw_cfg(tp
);
14063 if (tg3_flag(tp
, ENABLE_APE
)) {
14064 /* Allow reads and writes to the
14065 * APE register and memory space.
14067 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14068 PCISTATE_ALLOW_APE_SHMEM_WR
|
14069 PCISTATE_ALLOW_APE_PSPACE_WR
;
14070 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14074 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14075 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14076 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14077 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14078 tg3_flag(tp
, 57765_PLUS
))
14079 tg3_flag_set(tp
, CPMU_PRESENT
);
14081 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
14082 * GPIO1 driven high will bring 5700's external PHY out of reset.
14083 * It is also used as eeprom write protect on LOMs.
14085 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14086 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
14087 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14088 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14089 GRC_LCLCTRL_GPIO_OUTPUT1
);
14090 /* Unused GPIO3 must be driven as output on 5752 because there
14091 * are no pull-up resistors on unused GPIO pins.
14093 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14094 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14096 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14097 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14098 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
14099 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14101 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14102 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14103 /* Turn off the debug UART. */
14104 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14105 if (tg3_flag(tp
, IS_NIC
))
14106 /* Keep VMain power. */
14107 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14108 GRC_LCLCTRL_GPIO_OUTPUT0
;
14111 /* Force the chip into D0. */
14112 err
= tg3_power_up(tp
);
14114 dev_err(&tp
->pdev
->dev
, "Transition to D0 failed\n");
14118 /* Derive initial jumbo mode from MTU assigned in
14119 * ether_setup() via the alloc_etherdev() call
14121 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14122 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14124 /* Determine WakeOnLan speed to use. */
14125 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14126 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14127 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14128 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14129 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14131 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14134 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14135 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14137 /* A few boards don't want Ethernet@WireSpeed phy feature */
14138 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
14139 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
14140 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14141 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14142 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14143 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14144 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14146 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14147 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14148 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14149 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14150 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14152 if (tg3_flag(tp
, 5705_PLUS
) &&
14153 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14154 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14155 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14156 !tg3_flag(tp
, 57765_PLUS
)) {
14157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14158 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14159 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14160 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14161 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14162 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14163 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14164 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14165 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14167 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14170 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14171 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14172 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14173 if (tp
->phy_otp
== 0)
14174 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14177 if (tg3_flag(tp
, CPMU_PRESENT
))
14178 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14180 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14182 tp
->coalesce_mode
= 0;
14183 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14184 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14185 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14187 /* Set these bits to enable statistics workaround. */
14188 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14189 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14190 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14191 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14192 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14195 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14196 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14197 tg3_flag_set(tp
, USE_PHYLIB
);
14199 err
= tg3_mdio_init(tp
);
14203 /* Initialize data/descriptor byte/word swapping. */
14204 val
= tr32(GRC_MODE
);
14205 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14206 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14207 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14208 GRC_MODE_B2HRX_ENABLE
|
14209 GRC_MODE_HTX2B_ENABLE
|
14210 GRC_MODE_HOST_STACKUP
);
14212 val
&= GRC_MODE_HOST_STACKUP
;
14214 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14216 tg3_switch_clocks(tp
);
14218 /* Clear this out for sanity. */
14219 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14221 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14223 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14224 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14225 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14227 if (chiprevid
== CHIPREV_ID_5701_A0
||
14228 chiprevid
== CHIPREV_ID_5701_B0
||
14229 chiprevid
== CHIPREV_ID_5701_B2
||
14230 chiprevid
== CHIPREV_ID_5701_B5
) {
14231 void __iomem
*sram_base
;
14233 /* Write some dummy words into the SRAM status block
14234 * area, see if it reads back correctly. If the return
14235 * value is bad, force enable the PCIX workaround.
14237 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14239 writel(0x00000000, sram_base
);
14240 writel(0x00000000, sram_base
+ 4);
14241 writel(0xffffffff, sram_base
+ 4);
14242 if (readl(sram_base
) != 0x00000000)
14243 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14248 tg3_nvram_init(tp
);
14250 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14251 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14253 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14254 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14255 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14256 tg3_flag_set(tp
, IS_5788
);
14258 if (!tg3_flag(tp
, IS_5788
) &&
14259 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
14260 tg3_flag_set(tp
, TAGGED_STATUS
);
14261 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14262 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14263 HOSTCC_MODE_CLRTICK_TXBD
);
14265 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14266 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14267 tp
->misc_host_ctrl
);
14270 /* Preserve the APE MAC_MODE bits */
14271 if (tg3_flag(tp
, ENABLE_APE
))
14272 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14274 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
14276 /* these are limited to 10/100 only */
14277 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14278 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14279 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14280 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14281 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14282 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14283 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14284 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14285 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14286 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14287 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14288 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14289 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14290 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14291 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14292 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14294 err
= tg3_phy_probe(tp
);
14296 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14297 /* ... but do not return immediately ... */
14302 tg3_read_fw_ver(tp
);
14304 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14305 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14307 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14308 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14310 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14313 /* 5700 {AX,BX} chips have a broken status block link
14314 * change bit implementation, so we must use the
14315 * status register in those cases.
14317 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14318 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14320 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14322 /* The led_ctrl is set during tg3_phy_probe, here we might
14323 * have to force the link status polling mechanism based
14324 * upon subsystem IDs.
14326 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14327 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14328 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14329 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14330 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14333 /* For all SERDES we poll the MAC status register. */
14334 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14335 tg3_flag_set(tp
, POLL_SERDES
);
14337 tg3_flag_clear(tp
, POLL_SERDES
);
14339 tp
->rx_offset
= NET_IP_ALIGN
;
14340 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14341 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14342 tg3_flag(tp
, PCIX_MODE
)) {
14344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14345 tp
->rx_copy_thresh
= ~(u16
)0;
14349 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14350 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14351 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14353 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14355 /* Increment the rx prod index on the rx std ring by at most
14356 * 8 for these chips to workaround hw errata.
14358 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14359 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14360 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14361 tp
->rx_std_max_post
= 8;
14363 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14364 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14365 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14370 #ifdef CONFIG_SPARC
14371 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14373 struct net_device
*dev
= tp
->dev
;
14374 struct pci_dev
*pdev
= tp
->pdev
;
14375 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14376 const unsigned char *addr
;
14379 addr
= of_get_property(dp
, "local-mac-address", &len
);
14380 if (addr
&& len
== 6) {
14381 memcpy(dev
->dev_addr
, addr
, 6);
14382 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14388 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14390 struct net_device
*dev
= tp
->dev
;
14392 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14393 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14398 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14400 struct net_device
*dev
= tp
->dev
;
14401 u32 hi
, lo
, mac_offset
;
14404 #ifdef CONFIG_SPARC
14405 if (!tg3_get_macaddr_sparc(tp
))
14410 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
14411 tg3_flag(tp
, 5780_CLASS
)) {
14412 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14414 if (tg3_nvram_lock(tp
))
14415 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14417 tg3_nvram_unlock(tp
);
14418 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14419 if (PCI_FUNC(tp
->pdev
->devfn
) & 1)
14421 if (PCI_FUNC(tp
->pdev
->devfn
) > 1)
14422 mac_offset
+= 0x18c;
14423 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14426 /* First try to get it from MAC address mailbox. */
14427 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14428 if ((hi
>> 16) == 0x484b) {
14429 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14430 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14432 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14433 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14434 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14435 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14436 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14438 /* Some old bootcode may report a 0 MAC address in SRAM */
14439 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14442 /* Next, try NVRAM. */
14443 if (!tg3_flag(tp
, NO_NVRAM
) &&
14444 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14445 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14446 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14447 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14449 /* Finally just fetch it out of the MAC control regs. */
14451 hi
= tr32(MAC_ADDR_0_HIGH
);
14452 lo
= tr32(MAC_ADDR_0_LOW
);
14454 dev
->dev_addr
[5] = lo
& 0xff;
14455 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14456 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14457 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14458 dev
->dev_addr
[1] = hi
& 0xff;
14459 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14463 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14464 #ifdef CONFIG_SPARC
14465 if (!tg3_get_default_macaddr_sparc(tp
))
14470 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14474 #define BOUNDARY_SINGLE_CACHELINE 1
14475 #define BOUNDARY_MULTI_CACHELINE 2
14477 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14479 int cacheline_size
;
14483 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14485 cacheline_size
= 1024;
14487 cacheline_size
= (int) byte
* 4;
14489 /* On 5703 and later chips, the boundary bits have no
14492 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14493 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14494 !tg3_flag(tp
, PCI_EXPRESS
))
14497 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14498 goal
= BOUNDARY_MULTI_CACHELINE
;
14500 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14501 goal
= BOUNDARY_SINGLE_CACHELINE
;
14507 if (tg3_flag(tp
, 57765_PLUS
)) {
14508 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14515 /* PCI controllers on most RISC systems tend to disconnect
14516 * when a device tries to burst across a cache-line boundary.
14517 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14519 * Unfortunately, for PCI-E there are only limited
14520 * write-side controls for this, and thus for reads
14521 * we will still get the disconnects. We'll also waste
14522 * these PCI cycles for both read and write for chips
14523 * other than 5700 and 5701 which do not implement the
14526 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14527 switch (cacheline_size
) {
14532 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14533 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14534 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14536 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14537 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14542 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14543 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14547 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14548 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14551 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14552 switch (cacheline_size
) {
14556 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14557 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14558 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14564 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14565 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14569 switch (cacheline_size
) {
14571 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14572 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14573 DMA_RWCTRL_WRITE_BNDRY_16
);
14578 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14579 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14580 DMA_RWCTRL_WRITE_BNDRY_32
);
14585 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14586 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14587 DMA_RWCTRL_WRITE_BNDRY_64
);
14592 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14593 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14594 DMA_RWCTRL_WRITE_BNDRY_128
);
14599 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14600 DMA_RWCTRL_WRITE_BNDRY_256
);
14603 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14604 DMA_RWCTRL_WRITE_BNDRY_512
);
14608 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14609 DMA_RWCTRL_WRITE_BNDRY_1024
);
14618 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14620 struct tg3_internal_buffer_desc test_desc
;
14621 u32 sram_dma_descs
;
14624 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14626 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14627 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14628 tw32(RDMAC_STATUS
, 0);
14629 tw32(WDMAC_STATUS
, 0);
14631 tw32(BUFMGR_MODE
, 0);
14632 tw32(FTQ_RESET
, 0);
14634 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14635 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14636 test_desc
.nic_mbuf
= 0x00002100;
14637 test_desc
.len
= size
;
14640 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14641 * the *second* time the tg3 driver was getting loaded after an
14644 * Broadcom tells me:
14645 * ...the DMA engine is connected to the GRC block and a DMA
14646 * reset may affect the GRC block in some unpredictable way...
14647 * The behavior of resets to individual blocks has not been tested.
14649 * Broadcom noted the GRC reset will also reset all sub-components.
14652 test_desc
.cqid_sqid
= (13 << 8) | 2;
14654 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14657 test_desc
.cqid_sqid
= (16 << 8) | 7;
14659 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14662 test_desc
.flags
= 0x00000005;
14664 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14667 val
= *(((u32
*)&test_desc
) + i
);
14668 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14669 sram_dma_descs
+ (i
* sizeof(u32
)));
14670 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14672 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14675 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14677 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14680 for (i
= 0; i
< 40; i
++) {
14684 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14686 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14687 if ((val
& 0xffff) == sram_dma_descs
) {
14698 #define TEST_BUFFER_SIZE 0x2000
14700 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14701 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14705 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14707 dma_addr_t buf_dma
;
14708 u32
*buf
, saved_dma_rwctrl
;
14711 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14712 &buf_dma
, GFP_KERNEL
);
14718 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14719 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14721 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14723 if (tg3_flag(tp
, 57765_PLUS
))
14726 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14727 /* DMA read watermark not used on PCIE */
14728 tp
->dma_rwctrl
|= 0x00180000;
14729 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14730 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14731 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14732 tp
->dma_rwctrl
|= 0x003f0000;
14734 tp
->dma_rwctrl
|= 0x003f000f;
14736 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14737 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14738 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14739 u32 read_water
= 0x7;
14741 /* If the 5704 is behind the EPB bridge, we can
14742 * do the less restrictive ONE_DMA workaround for
14743 * better performance.
14745 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14746 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14747 tp
->dma_rwctrl
|= 0x8000;
14748 else if (ccval
== 0x6 || ccval
== 0x7)
14749 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14751 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14753 /* Set bit 23 to enable PCIX hw bug fix */
14755 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14756 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14758 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14759 /* 5780 always in PCIX mode */
14760 tp
->dma_rwctrl
|= 0x00144000;
14761 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14762 /* 5714 always in PCIX mode */
14763 tp
->dma_rwctrl
|= 0x00148000;
14765 tp
->dma_rwctrl
|= 0x001b000f;
14769 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14770 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14771 tp
->dma_rwctrl
&= 0xfffffff0;
14773 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14774 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14775 /* Remove this if it causes problems for some boards. */
14776 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14778 /* On 5700/5701 chips, we need to set this bit.
14779 * Otherwise the chip will issue cacheline transactions
14780 * to streamable DMA memory with not all the byte
14781 * enables turned on. This is an error on several
14782 * RISC PCI controllers, in particular sparc64.
14784 * On 5703/5704 chips, this bit has been reassigned
14785 * a different meaning. In particular, it is used
14786 * on those chips to enable a PCI-X workaround.
14788 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14791 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14794 /* Unneeded, already done by tg3_get_invariants. */
14795 tg3_switch_clocks(tp
);
14798 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14799 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14802 /* It is best to perform DMA test with maximum write burst size
14803 * to expose the 5700/5701 write DMA bug.
14805 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14806 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14807 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14812 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14815 /* Send the buffer to the chip. */
14816 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14818 dev_err(&tp
->pdev
->dev
,
14819 "%s: Buffer write failed. err = %d\n",
14825 /* validate data reached card RAM correctly. */
14826 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14828 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14829 if (le32_to_cpu(val
) != p
[i
]) {
14830 dev_err(&tp
->pdev
->dev
,
14831 "%s: Buffer corrupted on device! "
14832 "(%d != %d)\n", __func__
, val
, i
);
14833 /* ret = -ENODEV here? */
14838 /* Now read it back. */
14839 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14841 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14842 "err = %d\n", __func__
, ret
);
14847 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14851 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14852 DMA_RWCTRL_WRITE_BNDRY_16
) {
14853 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14854 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14855 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14858 dev_err(&tp
->pdev
->dev
,
14859 "%s: Buffer corrupted on read back! "
14860 "(%d != %d)\n", __func__
, p
[i
], i
);
14866 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14872 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14873 DMA_RWCTRL_WRITE_BNDRY_16
) {
14874 /* DMA test passed without adjusting DMA boundary,
14875 * now look for chipsets that are known to expose the
14876 * DMA bug without failing the test.
14878 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14879 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14880 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14882 /* Safe to use the calculated DMA boundary. */
14883 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14886 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14890 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14895 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14897 if (tg3_flag(tp
, 57765_PLUS
)) {
14898 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14899 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14900 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14901 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14902 tp
->bufmgr_config
.mbuf_high_water
=
14903 DEFAULT_MB_HIGH_WATER_57765
;
14905 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14906 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14907 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14908 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
14909 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14910 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
14911 } else if (tg3_flag(tp
, 5705_PLUS
)) {
14912 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14913 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14914 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14915 DEFAULT_MB_MACRX_LOW_WATER_5705
;
14916 tp
->bufmgr_config
.mbuf_high_water
=
14917 DEFAULT_MB_HIGH_WATER_5705
;
14918 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14919 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14920 DEFAULT_MB_MACRX_LOW_WATER_5906
;
14921 tp
->bufmgr_config
.mbuf_high_water
=
14922 DEFAULT_MB_HIGH_WATER_5906
;
14925 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14926 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
14927 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14928 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
14929 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14930 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
14932 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14933 DEFAULT_MB_RDMA_LOW_WATER
;
14934 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14935 DEFAULT_MB_MACRX_LOW_WATER
;
14936 tp
->bufmgr_config
.mbuf_high_water
=
14937 DEFAULT_MB_HIGH_WATER
;
14939 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14940 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
14941 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14942 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
14943 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14944 DEFAULT_MB_HIGH_WATER_JUMBO
;
14947 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
14948 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
14951 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
14953 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
14954 case TG3_PHY_ID_BCM5400
: return "5400";
14955 case TG3_PHY_ID_BCM5401
: return "5401";
14956 case TG3_PHY_ID_BCM5411
: return "5411";
14957 case TG3_PHY_ID_BCM5701
: return "5701";
14958 case TG3_PHY_ID_BCM5703
: return "5703";
14959 case TG3_PHY_ID_BCM5704
: return "5704";
14960 case TG3_PHY_ID_BCM5705
: return "5705";
14961 case TG3_PHY_ID_BCM5750
: return "5750";
14962 case TG3_PHY_ID_BCM5752
: return "5752";
14963 case TG3_PHY_ID_BCM5714
: return "5714";
14964 case TG3_PHY_ID_BCM5780
: return "5780";
14965 case TG3_PHY_ID_BCM5755
: return "5755";
14966 case TG3_PHY_ID_BCM5787
: return "5787";
14967 case TG3_PHY_ID_BCM5784
: return "5784";
14968 case TG3_PHY_ID_BCM5756
: return "5722/5756";
14969 case TG3_PHY_ID_BCM5906
: return "5906";
14970 case TG3_PHY_ID_BCM5761
: return "5761";
14971 case TG3_PHY_ID_BCM5718C
: return "5718C";
14972 case TG3_PHY_ID_BCM5718S
: return "5718S";
14973 case TG3_PHY_ID_BCM57765
: return "57765";
14974 case TG3_PHY_ID_BCM5719C
: return "5719C";
14975 case TG3_PHY_ID_BCM5720C
: return "5720C";
14976 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
14977 case 0: return "serdes";
14978 default: return "unknown";
14982 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
14984 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14985 strcpy(str
, "PCI Express");
14987 } else if (tg3_flag(tp
, PCIX_MODE
)) {
14988 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
14990 strcpy(str
, "PCIX:");
14992 if ((clock_ctrl
== 7) ||
14993 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
14994 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
14995 strcat(str
, "133MHz");
14996 else if (clock_ctrl
== 0)
14997 strcat(str
, "33MHz");
14998 else if (clock_ctrl
== 2)
14999 strcat(str
, "50MHz");
15000 else if (clock_ctrl
== 4)
15001 strcat(str
, "66MHz");
15002 else if (clock_ctrl
== 6)
15003 strcat(str
, "100MHz");
15005 strcpy(str
, "PCI:");
15006 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15007 strcat(str
, "66MHz");
15009 strcat(str
, "33MHz");
15011 if (tg3_flag(tp
, PCI_32BIT
))
15012 strcat(str
, ":32-bit");
15014 strcat(str
, ":64-bit");
15018 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15020 struct pci_dev
*peer
;
15021 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15023 for (func
= 0; func
< 8; func
++) {
15024 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15025 if (peer
&& peer
!= tp
->pdev
)
15029 /* 5704 can be configured in single-port mode, set peer to
15030 * tp->pdev in that case.
15038 * We don't need to keep the refcount elevated; there's no way
15039 * to remove one half of this device without removing the other
15046 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15048 struct ethtool_coalesce
*ec
= &tp
->coal
;
15050 memset(ec
, 0, sizeof(*ec
));
15051 ec
->cmd
= ETHTOOL_GCOALESCE
;
15052 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15053 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15054 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15055 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15056 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15057 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15058 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15059 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15060 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15062 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15063 HOSTCC_MODE_CLRTICK_TXBD
)) {
15064 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15065 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15066 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15067 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15070 if (tg3_flag(tp
, 5705_PLUS
)) {
15071 ec
->rx_coalesce_usecs_irq
= 0;
15072 ec
->tx_coalesce_usecs_irq
= 0;
15073 ec
->stats_block_coalesce_usecs
= 0;
15077 static const struct net_device_ops tg3_netdev_ops
= {
15078 .ndo_open
= tg3_open
,
15079 .ndo_stop
= tg3_close
,
15080 .ndo_start_xmit
= tg3_start_xmit
,
15081 .ndo_get_stats64
= tg3_get_stats64
,
15082 .ndo_validate_addr
= eth_validate_addr
,
15083 .ndo_set_multicast_list
= tg3_set_rx_mode
,
15084 .ndo_set_mac_address
= tg3_set_mac_addr
,
15085 .ndo_do_ioctl
= tg3_ioctl
,
15086 .ndo_tx_timeout
= tg3_tx_timeout
,
15087 .ndo_change_mtu
= tg3_change_mtu
,
15088 .ndo_fix_features
= tg3_fix_features
,
15089 .ndo_set_features
= tg3_set_features
,
15090 #ifdef CONFIG_NET_POLL_CONTROLLER
15091 .ndo_poll_controller
= tg3_poll_controller
,
15095 static const struct net_device_ops tg3_netdev_ops_dma_bug
= {
15096 .ndo_open
= tg3_open
,
15097 .ndo_stop
= tg3_close
,
15098 .ndo_start_xmit
= tg3_start_xmit_dma_bug
,
15099 .ndo_get_stats64
= tg3_get_stats64
,
15100 .ndo_validate_addr
= eth_validate_addr
,
15101 .ndo_set_multicast_list
= tg3_set_rx_mode
,
15102 .ndo_set_mac_address
= tg3_set_mac_addr
,
15103 .ndo_do_ioctl
= tg3_ioctl
,
15104 .ndo_tx_timeout
= tg3_tx_timeout
,
15105 .ndo_change_mtu
= tg3_change_mtu
,
15106 .ndo_set_features
= tg3_set_features
,
15107 #ifdef CONFIG_NET_POLL_CONTROLLER
15108 .ndo_poll_controller
= tg3_poll_controller
,
15112 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15113 const struct pci_device_id
*ent
)
15115 struct net_device
*dev
;
15117 int i
, err
, pm_cap
;
15118 u32 sndmbx
, rcvmbx
, intmbx
;
15120 u64 dma_mask
, persist_dma_mask
;
15121 u32 hw_features
= 0;
15123 printk_once(KERN_INFO
"%s\n", version
);
15125 err
= pci_enable_device(pdev
);
15127 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15131 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15133 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15134 goto err_out_disable_pdev
;
15137 pci_set_master(pdev
);
15139 /* Find power-management capability. */
15140 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15142 dev_err(&pdev
->dev
,
15143 "Cannot find Power Management capability, aborting\n");
15145 goto err_out_free_res
;
15148 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15150 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15152 goto err_out_free_res
;
15155 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15157 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15159 tp
= netdev_priv(dev
);
15162 tp
->pm_cap
= pm_cap
;
15163 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15164 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15167 tp
->msg_enable
= tg3_debug
;
15169 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15171 /* The word/byte swap controls here control register access byte
15172 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15175 tp
->misc_host_ctrl
=
15176 MISC_HOST_CTRL_MASK_PCI_INT
|
15177 MISC_HOST_CTRL_WORD_SWAP
|
15178 MISC_HOST_CTRL_INDIR_ACCESS
|
15179 MISC_HOST_CTRL_PCISTATE_RW
;
15181 /* The NONFRM (non-frame) byte/word swap controls take effect
15182 * on descriptor entries, anything which isn't packet data.
15184 * The StrongARM chips on the board (one for tx, one for rx)
15185 * are running in big-endian mode.
15187 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15188 GRC_MODE_WSWAP_NONFRM_DATA
);
15189 #ifdef __BIG_ENDIAN
15190 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15192 spin_lock_init(&tp
->lock
);
15193 spin_lock_init(&tp
->indirect_lock
);
15194 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15196 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15198 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15200 goto err_out_free_dev
;
15203 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15204 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15206 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15207 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15208 dev
->irq
= pdev
->irq
;
15210 err
= tg3_get_invariants(tp
);
15212 dev_err(&pdev
->dev
,
15213 "Problem fetching invariants of chip, aborting\n");
15214 goto err_out_iounmap
;
15217 if (tg3_flag(tp
, 5755_PLUS
) && !tg3_flag(tp
, 5717_PLUS
))
15218 dev
->netdev_ops
= &tg3_netdev_ops
;
15220 dev
->netdev_ops
= &tg3_netdev_ops_dma_bug
;
15223 /* The EPB bridge inside 5714, 5715, and 5780 and any
15224 * device behind the EPB cannot support DMA addresses > 40-bit.
15225 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15226 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15227 * do DMA address check in tg3_start_xmit().
15229 if (tg3_flag(tp
, IS_5788
))
15230 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15231 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15232 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15233 #ifdef CONFIG_HIGHMEM
15234 dma_mask
= DMA_BIT_MASK(64);
15237 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15239 /* Configure DMA attributes. */
15240 if (dma_mask
> DMA_BIT_MASK(32)) {
15241 err
= pci_set_dma_mask(pdev
, dma_mask
);
15243 dev
->features
|= NETIF_F_HIGHDMA
;
15244 err
= pci_set_consistent_dma_mask(pdev
,
15247 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15248 "DMA for consistent allocations\n");
15249 goto err_out_iounmap
;
15253 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15254 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15256 dev_err(&pdev
->dev
,
15257 "No usable DMA configuration, aborting\n");
15258 goto err_out_iounmap
;
15262 tg3_init_bufmgr_config(tp
);
15264 /* Selectively allow TSO based on operating conditions */
15265 if ((tg3_flag(tp
, HW_TSO_1
) ||
15266 tg3_flag(tp
, HW_TSO_2
) ||
15267 tg3_flag(tp
, HW_TSO_3
)) ||
15268 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
15269 tg3_flag_set(tp
, TSO_CAPABLE
);
15271 tg3_flag_clear(tp
, TSO_CAPABLE
);
15272 tg3_flag_clear(tp
, TSO_BUG
);
15273 tp
->fw_needed
= NULL
;
15276 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
15277 tp
->fw_needed
= FIRMWARE_TG3
;
15279 /* TSO is on by default on chips that support hardware TSO.
15280 * Firmware TSO on older chips gives lower performance, so it
15281 * is off by default, but can be enabled using ethtool.
15283 if ((tg3_flag(tp
, HW_TSO_1
) ||
15284 tg3_flag(tp
, HW_TSO_2
) ||
15285 tg3_flag(tp
, HW_TSO_3
)) &&
15286 (dev
->features
& NETIF_F_IP_CSUM
))
15287 hw_features
|= NETIF_F_TSO
;
15288 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15289 if (dev
->features
& NETIF_F_IPV6_CSUM
)
15290 hw_features
|= NETIF_F_TSO6
;
15291 if (tg3_flag(tp
, HW_TSO_3
) ||
15292 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15293 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15294 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15295 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15296 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15297 hw_features
|= NETIF_F_TSO_ECN
;
15300 dev
->hw_features
|= hw_features
;
15301 dev
->features
|= hw_features
;
15302 dev
->vlan_features
|= hw_features
;
15305 * Add loopback capability only for a subset of devices that support
15306 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15307 * loopback for the remaining devices.
15309 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15310 !tg3_flag(tp
, CPMU_PRESENT
))
15311 /* Add the loopback capability */
15312 dev
->hw_features
|= NETIF_F_LOOPBACK
;
15314 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15315 !tg3_flag(tp
, TSO_CAPABLE
) &&
15316 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15317 tg3_flag_set(tp
, MAX_RXPEND_64
);
15318 tp
->rx_pending
= 63;
15321 err
= tg3_get_device_address(tp
);
15323 dev_err(&pdev
->dev
,
15324 "Could not obtain valid ethernet address, aborting\n");
15325 goto err_out_iounmap
;
15328 if (tg3_flag(tp
, ENABLE_APE
)) {
15329 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15330 if (!tp
->aperegs
) {
15331 dev_err(&pdev
->dev
,
15332 "Cannot map APE registers, aborting\n");
15334 goto err_out_iounmap
;
15337 tg3_ape_lock_init(tp
);
15339 if (tg3_flag(tp
, ENABLE_ASF
))
15340 tg3_read_dash_ver(tp
);
15344 * Reset chip in case UNDI or EFI driver did not shutdown
15345 * DMA self test will enable WDMAC and we'll see (spurious)
15346 * pending DMA on the PCI bus at that point.
15348 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15349 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15350 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15351 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15354 err
= tg3_test_dma(tp
);
15356 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15357 goto err_out_apeunmap
;
15360 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15361 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15362 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15363 for (i
= 0; i
< tp
->irq_max
; i
++) {
15364 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15367 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15369 tnapi
->int_mbox
= intmbx
;
15375 tnapi
->consmbox
= rcvmbx
;
15376 tnapi
->prodmbox
= sndmbx
;
15379 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15381 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15383 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15387 * If we support MSIX, we'll be using RSS. If we're using
15388 * RSS, the first vector only handles link interrupts and the
15389 * remaining vectors handle rx and tx interrupts. Reuse the
15390 * mailbox values for the next iteration. The values we setup
15391 * above are still useful for the single vectored mode.
15406 pci_set_drvdata(pdev
, dev
);
15408 err
= register_netdev(dev
);
15410 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15411 goto err_out_apeunmap
;
15414 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15415 tp
->board_part_number
,
15416 tp
->pci_chip_rev_id
,
15417 tg3_bus_string(tp
, str
),
15420 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15421 struct phy_device
*phydev
;
15422 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15424 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15425 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15429 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15430 ethtype
= "10/100Base-TX";
15431 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15432 ethtype
= "1000Base-SX";
15434 ethtype
= "10/100/1000Base-T";
15436 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15437 "(WireSpeed[%d], EEE[%d])\n",
15438 tg3_phy_string(tp
), ethtype
,
15439 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15440 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15443 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15444 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15445 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15446 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15447 tg3_flag(tp
, ENABLE_ASF
) != 0,
15448 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15449 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15451 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15452 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15454 pci_save_state(pdev
);
15460 iounmap(tp
->aperegs
);
15461 tp
->aperegs
= NULL
;
15474 pci_release_regions(pdev
);
15476 err_out_disable_pdev
:
15477 pci_disable_device(pdev
);
15478 pci_set_drvdata(pdev
, NULL
);
15482 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15484 struct net_device
*dev
= pci_get_drvdata(pdev
);
15487 struct tg3
*tp
= netdev_priv(dev
);
15490 release_firmware(tp
->fw
);
15492 cancel_work_sync(&tp
->reset_task
);
15494 if (!tg3_flag(tp
, USE_PHYLIB
)) {
15499 unregister_netdev(dev
);
15501 iounmap(tp
->aperegs
);
15502 tp
->aperegs
= NULL
;
15509 pci_release_regions(pdev
);
15510 pci_disable_device(pdev
);
15511 pci_set_drvdata(pdev
, NULL
);
15515 #ifdef CONFIG_PM_SLEEP
15516 static int tg3_suspend(struct device
*device
)
15518 struct pci_dev
*pdev
= to_pci_dev(device
);
15519 struct net_device
*dev
= pci_get_drvdata(pdev
);
15520 struct tg3
*tp
= netdev_priv(dev
);
15523 if (!netif_running(dev
))
15526 flush_work_sync(&tp
->reset_task
);
15528 tg3_netif_stop(tp
);
15530 del_timer_sync(&tp
->timer
);
15532 tg3_full_lock(tp
, 1);
15533 tg3_disable_ints(tp
);
15534 tg3_full_unlock(tp
);
15536 netif_device_detach(dev
);
15538 tg3_full_lock(tp
, 0);
15539 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15540 tg3_flag_clear(tp
, INIT_COMPLETE
);
15541 tg3_full_unlock(tp
);
15543 err
= tg3_power_down_prepare(tp
);
15547 tg3_full_lock(tp
, 0);
15549 tg3_flag_set(tp
, INIT_COMPLETE
);
15550 err2
= tg3_restart_hw(tp
, 1);
15554 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15555 add_timer(&tp
->timer
);
15557 netif_device_attach(dev
);
15558 tg3_netif_start(tp
);
15561 tg3_full_unlock(tp
);
15570 static int tg3_resume(struct device
*device
)
15572 struct pci_dev
*pdev
= to_pci_dev(device
);
15573 struct net_device
*dev
= pci_get_drvdata(pdev
);
15574 struct tg3
*tp
= netdev_priv(dev
);
15577 if (!netif_running(dev
))
15580 netif_device_attach(dev
);
15582 tg3_full_lock(tp
, 0);
15584 tg3_flag_set(tp
, INIT_COMPLETE
);
15585 err
= tg3_restart_hw(tp
, 1);
15589 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15590 add_timer(&tp
->timer
);
15592 tg3_netif_start(tp
);
15595 tg3_full_unlock(tp
);
15603 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15604 #define TG3_PM_OPS (&tg3_pm_ops)
15608 #define TG3_PM_OPS NULL
15610 #endif /* CONFIG_PM_SLEEP */
15613 * tg3_io_error_detected - called when PCI error is detected
15614 * @pdev: Pointer to PCI device
15615 * @state: The current pci connection state
15617 * This function is called after a PCI bus error affecting
15618 * this device has been detected.
15620 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15621 pci_channel_state_t state
)
15623 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15624 struct tg3
*tp
= netdev_priv(netdev
);
15625 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15627 netdev_info(netdev
, "PCI I/O error detected\n");
15631 if (!netif_running(netdev
))
15636 tg3_netif_stop(tp
);
15638 del_timer_sync(&tp
->timer
);
15639 tg3_flag_clear(tp
, RESTART_TIMER
);
15641 /* Want to make sure that the reset task doesn't run */
15642 cancel_work_sync(&tp
->reset_task
);
15643 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15644 tg3_flag_clear(tp
, RESTART_TIMER
);
15646 netif_device_detach(netdev
);
15648 /* Clean up software state, even if MMIO is blocked */
15649 tg3_full_lock(tp
, 0);
15650 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15651 tg3_full_unlock(tp
);
15654 if (state
== pci_channel_io_perm_failure
)
15655 err
= PCI_ERS_RESULT_DISCONNECT
;
15657 pci_disable_device(pdev
);
15665 * tg3_io_slot_reset - called after the pci bus has been reset.
15666 * @pdev: Pointer to PCI device
15668 * Restart the card from scratch, as if from a cold-boot.
15669 * At this point, the card has exprienced a hard reset,
15670 * followed by fixups by BIOS, and has its config space
15671 * set up identically to what it was at cold boot.
15673 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15675 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15676 struct tg3
*tp
= netdev_priv(netdev
);
15677 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15682 if (pci_enable_device(pdev
)) {
15683 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15687 pci_set_master(pdev
);
15688 pci_restore_state(pdev
);
15689 pci_save_state(pdev
);
15691 if (!netif_running(netdev
)) {
15692 rc
= PCI_ERS_RESULT_RECOVERED
;
15696 err
= tg3_power_up(tp
);
15698 netdev_err(netdev
, "Failed to restore register access.\n");
15702 rc
= PCI_ERS_RESULT_RECOVERED
;
15711 * tg3_io_resume - called when traffic can start flowing again.
15712 * @pdev: Pointer to PCI device
15714 * This callback is called when the error recovery driver tells
15715 * us that its OK to resume normal operation.
15717 static void tg3_io_resume(struct pci_dev
*pdev
)
15719 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15720 struct tg3
*tp
= netdev_priv(netdev
);
15725 if (!netif_running(netdev
))
15728 tg3_full_lock(tp
, 0);
15729 tg3_flag_set(tp
, INIT_COMPLETE
);
15730 err
= tg3_restart_hw(tp
, 1);
15731 tg3_full_unlock(tp
);
15733 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15737 netif_device_attach(netdev
);
15739 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15740 add_timer(&tp
->timer
);
15742 tg3_netif_start(tp
);
15750 static struct pci_error_handlers tg3_err_handler
= {
15751 .error_detected
= tg3_io_error_detected
,
15752 .slot_reset
= tg3_io_slot_reset
,
15753 .resume
= tg3_io_resume
15756 static struct pci_driver tg3_driver
= {
15757 .name
= DRV_MODULE_NAME
,
15758 .id_table
= tg3_pci_tbl
,
15759 .probe
= tg3_init_one
,
15760 .remove
= __devexit_p(tg3_remove_one
),
15761 .err_handler
= &tg3_err_handler
,
15762 .driver
.pm
= TG3_PM_OPS
,
15765 static int __init
tg3_init(void)
15767 return pci_register_driver(&tg3_driver
);
15770 static void __exit
tg3_cleanup(void)
15772 pci_unregister_driver(&tg3_driver
);
15775 module_init(tg3_init
);
15776 module_exit(tg3_cleanup
);