Remove *_fill_nic() calls, and directly set nic->ioaddr and nic->irqno .
[gpxe.git] / src / drivers / net / tg3.c
blob2188def42061a59685e912375d8e1fc6e2c8a89d
1 /* $Id$
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002 Jeff Garzik (jgarzik@mandrakesoft.com)
6 * Copyright (C) 2003 Eric Biederman (ebiederman@lnxi.com) [etherboot port]
7 */
9 /* 11-13-2003 timlegge Fix Issue with NetGear GA302T
10 * 11-18-2003 ebiederm Generalize NetGear Fix to what the code was supposed to be.
11 * 01-06-2005 Alf (Frederic Olivie) Add Dell bcm 5751 (0x1677) support
12 * 04-15-2005 Martin Vogt Add Fujitsu Siemens Computer (FSC) 0x1734 bcm 5751 0x105d support
15 #include "etherboot.h"
16 #include "nic.h"
17 #include <errno.h>
18 #include <gpxe/pci.h>
19 #include <gpxe/ethernet.h>
20 #include "timer.h"
21 #include "string.h"
22 #include "tg3.h"
24 #define SUPPORT_COPPER_PHY 1
25 #define SUPPORT_FIBER_PHY 1
26 #define SUPPORT_LINK_REPORT 1
27 #define SUPPORT_PARTNO_STR 1
28 #define SUPPORT_PHY_STR 1
30 static struct tg3 tg3;
32 /* These numbers seem to be hard coded in the NIC firmware somehow.
33 * You can't change the ring sizes, but you can change where you place
34 * them in the NIC onboard memory.
36 #define TG3_RX_RING_SIZE 512
37 #define TG3_DEF_RX_RING_PENDING 20 /* RX_RING_PENDING seems to be o.k. at 20 and 200 */
38 #define TG3_RX_RCB_RING_SIZE 1024
40 /* (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ? \
41 512 : 1024) */
42 #define TG3_TX_RING_SIZE 512
43 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
45 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RING_SIZE)
46 #define TG3_RX_RCB_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RCB_RING_SIZE)
48 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * TG3_TX_RING_SIZE)
49 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
50 #define PREV_TX(N) (((N) - 1) & (TG3_TX_RING_SIZE - 1))
52 #define RX_PKT_BUF_SZ (1536 + 2 + 64)
54 struct eth_frame {
55 uint8_t dst_addr[ETH_ALEN];
56 uint8_t src_addr[ETH_ALEN];
57 uint16_t type;
58 uint8_t data [ETH_FRAME_LEN - ETH_HLEN];
61 struct bss {
62 struct tg3_rx_buffer_desc rx_std[TG3_RX_RING_SIZE];
63 struct tg3_rx_buffer_desc rx_rcb[TG3_RX_RCB_RING_SIZE];
64 struct tg3_tx_buffer_desc tx_ring[TG3_TX_RING_SIZE];
65 struct tg3_hw_status hw_status;
66 struct tg3_hw_stats hw_stats;
67 unsigned char rx_bufs[TG3_DEF_RX_RING_PENDING][RX_PKT_BUF_SZ];
68 struct eth_frame tx_frame[2];
69 } tg3_bss __shared;
71 /**
72 * pci_save_state - save the PCI configuration space of a device before suspending
73 * @dev: - PCI device that we're dealing with
74 * @buffer: - buffer to hold config space context
76 * @buffer must be large enough to hold the entire PCI 2.2 config space
77 * (>= 64 bytes).
79 static int pci_save_state(struct pci_device *dev, uint32_t *buffer)
81 int i;
82 for (i = 0; i < 16; i++)
83 pci_read_config_dword(dev, i * 4,&buffer[i]);
84 return 0;
87 /**
88 * pci_restore_state - Restore the saved state of a PCI device
89 * @dev: - PCI device that we're dealing with
90 * @buffer: - saved PCI config space
93 static int pci_restore_state(struct pci_device *dev, uint32_t *buffer)
95 int i;
97 for (i = 0; i < 16; i++)
98 pci_write_config_dword(dev,i * 4, buffer[i]);
99 return 0;
102 static void tg3_write_indirect_reg32(uint32_t off, uint32_t val)
104 pci_write_config_dword(tg3.pdev, TG3PCI_REG_BASE_ADDR, off);
105 pci_write_config_dword(tg3.pdev, TG3PCI_REG_DATA, val);
108 #define tw32(reg,val) tg3_write_indirect_reg32((reg),(val))
109 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tg3.regs + (reg))
110 #define tw16(reg,val) writew(((val) & 0xffff), tg3.regs + (reg))
111 #define tw8(reg,val) writeb(((val) & 0xff), tg3.regs + (reg))
112 #define tr32(reg) readl(tg3.regs + (reg))
113 #define tr16(reg) readw(tg3.regs + (reg))
114 #define tr8(reg) readb(tg3.regs + (reg))
116 static void tw32_carefully(uint32_t reg, uint32_t val)
118 tw32(reg, val);
119 tr32(reg);
120 udelay(100);
123 static void tw32_mailbox2(uint32_t reg, uint32_t val)
125 tw32_mailbox(reg, val);
126 tr32(reg);
129 static void tg3_write_mem(uint32_t off, uint32_t val)
131 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
132 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
134 /* Always leave this as zero. */
135 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
138 static void tg3_read_mem(uint32_t off, uint32_t *val)
140 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
141 pci_read_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
143 /* Always leave this as zero. */
144 pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
147 static void tg3_disable_ints(struct tg3 *tp)
149 tw32(TG3PCI_MISC_HOST_CTRL,
150 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
151 tw32_mailbox2(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
154 static void tg3_switch_clocks(struct tg3 *tp)
156 uint32_t orig_clock_ctrl, clock_ctrl;
158 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
160 orig_clock_ctrl = clock_ctrl;
161 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE | 0x1f);
162 tp->pci_clock_ctrl = clock_ctrl;
164 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
165 (!((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
166 && (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) &&
167 (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE)!=0) {
168 tw32_carefully(TG3PCI_CLOCK_CTRL,
169 clock_ctrl | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
170 tw32_carefully(TG3PCI_CLOCK_CTRL,
171 clock_ctrl | (CLOCK_CTRL_ALTCLK));
173 tw32_carefully(TG3PCI_CLOCK_CTRL, clock_ctrl);
176 #define PHY_BUSY_LOOPS 5000
178 static int tg3_readphy(struct tg3 *tp, int reg, uint32_t *val)
180 uint32_t frame_val;
181 int loops, ret;
183 tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
185 *val = 0xffffffff;
187 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
188 MI_COM_PHY_ADDR_MASK);
189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
190 MI_COM_REG_ADDR_MASK);
191 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
193 tw32_carefully(MAC_MI_COM, frame_val);
195 loops = PHY_BUSY_LOOPS;
196 while (loops-- > 0) {
197 udelay(10);
198 frame_val = tr32(MAC_MI_COM);
200 if ((frame_val & MI_COM_BUSY) == 0) {
201 udelay(5);
202 frame_val = tr32(MAC_MI_COM);
203 break;
207 ret = -EBUSY;
208 if (loops > 0) {
209 *val = frame_val & MI_COM_DATA_MASK;
210 ret = 0;
213 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
215 return ret;
218 static int tg3_writephy(struct tg3 *tp, int reg, uint32_t val)
220 uint32_t frame_val;
221 int loops, ret;
223 tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
225 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
226 MI_COM_PHY_ADDR_MASK);
227 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
228 MI_COM_REG_ADDR_MASK);
229 frame_val |= (val & MI_COM_DATA_MASK);
230 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
232 tw32_carefully(MAC_MI_COM, frame_val);
234 loops = PHY_BUSY_LOOPS;
235 while (loops-- > 0) {
236 udelay(10);
237 frame_val = tr32(MAC_MI_COM);
238 if ((frame_val & MI_COM_BUSY) == 0) {
239 udelay(5);
240 frame_val = tr32(MAC_MI_COM);
241 break;
245 ret = -EBUSY;
246 if (loops > 0)
247 ret = 0;
249 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
251 return ret;
254 static int tg3_writedsp(struct tg3 *tp, uint16_t addr, uint16_t val)
256 int err;
257 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, addr);
258 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
259 return err;
263 static void tg3_phy_set_wirespeed(struct tg3 *tp)
265 uint32_t val;
267 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
268 return;
270 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
271 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
272 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
275 static int tg3_bmcr_reset(struct tg3 *tp)
277 uint32_t phy_control;
278 int limit, err;
280 /* OK, reset it, and poll the BMCR_RESET bit until it
281 * clears or we time out.
283 phy_control = BMCR_RESET;
284 err = tg3_writephy(tp, MII_BMCR, phy_control);
285 if (err != 0)
286 return -EBUSY;
288 limit = 5000;
289 while (limit--) {
290 err = tg3_readphy(tp, MII_BMCR, &phy_control);
291 if (err != 0)
292 return -EBUSY;
294 if ((phy_control & BMCR_RESET) == 0) {
295 udelay(40);
296 break;
298 udelay(10);
300 if (limit <= 0)
301 return -EBUSY;
303 return 0;
306 static int tg3_wait_macro_done(struct tg3 *tp)
308 int limit = 100;
310 while (limit--) {
311 uint32_t tmp32;
313 tg3_readphy(tp, 0x16, &tmp32);
314 if ((tmp32 & 0x1000) == 0)
315 break;
317 if (limit <= 0)
318 return -EBUSY;
320 return 0;
323 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
325 static const uint32_t test_pat[4][6] = {
326 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
327 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
328 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
329 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
331 int chan;
333 for (chan = 0; chan < 4; chan++) {
334 int i;
336 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
337 (chan * 0x2000) | 0x0200);
338 tg3_writephy(tp, 0x16, 0x0002);
340 for (i = 0; i < 6; i++)
341 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
342 test_pat[chan][i]);
344 tg3_writephy(tp, 0x16, 0x0202);
345 if (tg3_wait_macro_done(tp)) {
346 *resetp = 1;
347 return -EBUSY;
350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
351 (chan * 0x2000) | 0x0200);
352 tg3_writephy(tp, 0x16, 0x0082);
353 if (tg3_wait_macro_done(tp)) {
354 *resetp = 1;
355 return -EBUSY;
358 tg3_writephy(tp, 0x16, 0x0802);
359 if (tg3_wait_macro_done(tp)) {
360 *resetp = 1;
361 return -EBUSY;
364 for (i = 0; i < 6; i += 2) {
365 uint32_t low, high;
367 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
368 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
369 if (tg3_wait_macro_done(tp)) {
370 *resetp = 1;
371 return -EBUSY;
373 low &= 0x7fff;
374 high &= 0x000f;
375 if (low != test_pat[chan][i] ||
376 high != test_pat[chan][i+1]) {
377 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
378 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
379 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
381 return -EBUSY;
386 return 0;
389 static int tg3_phy_reset_chanpat(struct tg3 *tp)
391 int chan;
393 for (chan = 0; chan < 4; chan++) {
394 int i;
396 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
397 (chan * 0x2000) | 0x0200);
398 tg3_writephy(tp, 0x16, 0x0002);
399 for (i = 0; i < 6; i++)
400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
401 tg3_writephy(tp, 0x16, 0x0202);
402 if (tg3_wait_macro_done(tp))
403 return -EBUSY;
406 return 0;
409 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
411 uint32_t reg32, phy9_orig;
412 int retries, do_phy_reset, err;
414 retries = 10;
415 do_phy_reset = 1;
416 do {
417 if (do_phy_reset) {
418 err = tg3_bmcr_reset(tp);
419 if (err)
420 return err;
421 do_phy_reset = 0;
424 /* Disable transmitter and interrupt. */
425 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
426 reg32 |= 0x3000;
427 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
429 /* Set full-duplex, 1000 mbps. */
430 tg3_writephy(tp, MII_BMCR,
431 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
433 /* Set to master mode. */
434 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
435 tg3_writephy(tp, MII_TG3_CTRL,
436 (MII_TG3_CTRL_AS_MASTER |
437 MII_TG3_CTRL_ENABLE_AS_MASTER));
439 /* Enable SM_DSP_CLOCK and 6dB. */
440 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
442 /* Block the PHY control access. */
443 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
444 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
446 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
447 if (!err)
448 break;
449 } while (--retries);
451 err = tg3_phy_reset_chanpat(tp);
452 if (err)
453 return err;
455 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
456 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
458 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
459 tg3_writephy(tp, 0x16, 0x0000);
461 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
463 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
465 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
466 reg32 &= ~0x3000;
467 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
469 return err;
472 /* This will reset the tigon3 PHY if there is no valid
473 * link.
475 static int tg3_phy_reset(struct tg3 *tp)
477 uint32_t phy_status;
478 int err;
480 err = tg3_readphy(tp, MII_BMSR, &phy_status);
481 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
482 if (err != 0)
483 return -EBUSY;
485 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
486 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
488 err = tg3_phy_reset_5703_4_5(tp);
489 if (err)
490 return err;
491 goto out;
493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
494 // Taken from Broadcom's source code
495 tg3_writephy(tp, 0x18, 0x0c00);
496 tg3_writephy(tp, 0x17, 0x000a);
497 tg3_writephy(tp, 0x15, 0x310b);
498 tg3_writephy(tp, 0x17, 0x201f);
499 tg3_writephy(tp, 0x15, 0x9506);
500 tg3_writephy(tp, 0x17, 0x401f);
501 tg3_writephy(tp, 0x15, 0x14e2);
502 tg3_writephy(tp, 0x18, 0x0400);
504 err = tg3_bmcr_reset(tp);
505 if (err)
506 return err;
507 out:
508 tg3_phy_set_wirespeed(tp);
509 return 0;
512 static void tg3_set_power_state_0(struct tg3 *tp)
514 uint16_t power_control;
515 int pm = tp->pm_cap;
517 /* Make sure register accesses (indirect or otherwise)
518 * will function correctly.
520 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
522 pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
524 power_control |= PCI_PM_CTRL_PME_STATUS;
525 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
526 power_control |= 0;
527 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
529 tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
531 return;
535 #if SUPPORT_LINK_REPORT
536 static void tg3_link_report(struct tg3 *tp)
538 if (!tp->carrier_ok) {
539 printf("Link is down.\n");
540 } else {
541 printf("Link is up at %d Mbps, %s duplex. %s %s %s\n",
542 (tp->link_config.active_speed == SPEED_1000 ?
543 1000 :
544 (tp->link_config.active_speed == SPEED_100 ?
545 100 : 10)),
546 (tp->link_config.active_duplex == DUPLEX_FULL ?
547 "full" : "half"),
548 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "TX" : "",
549 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "RX" : "",
550 (tp->tg3_flags & (TG3_FLAG_TX_PAUSE |TG3_FLAG_RX_PAUSE)) ? "flow control" : "");
553 #else
554 #define tg3_link_report(tp)
555 #endif
557 static void tg3_setup_flow_control(struct tg3 *tp, uint32_t local_adv, uint32_t remote_adv)
559 uint32_t new_tg3_flags = 0;
561 if (local_adv & ADVERTISE_PAUSE_CAP) {
562 if (local_adv & ADVERTISE_PAUSE_ASYM) {
563 if (remote_adv & LPA_PAUSE_CAP)
564 new_tg3_flags |=
565 (TG3_FLAG_RX_PAUSE |
566 TG3_FLAG_TX_PAUSE);
567 else if (remote_adv & LPA_PAUSE_ASYM)
568 new_tg3_flags |=
569 (TG3_FLAG_RX_PAUSE);
570 } else {
571 if (remote_adv & LPA_PAUSE_CAP)
572 new_tg3_flags |=
573 (TG3_FLAG_RX_PAUSE |
574 TG3_FLAG_TX_PAUSE);
576 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
577 if ((remote_adv & LPA_PAUSE_CAP) &&
578 (remote_adv & LPA_PAUSE_ASYM))
579 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
582 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
583 tp->tg3_flags |= new_tg3_flags;
585 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
586 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
587 else
588 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
590 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
591 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
592 else
593 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
596 #if SUPPORT_COPPER_PHY
597 static void tg3_aux_stat_to_speed_duplex(
598 struct tg3 *tp __unused, uint32_t val, uint8_t *speed, uint8_t *duplex)
600 static const uint8_t map[] = {
601 [0] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
602 [MII_TG3_AUX_STAT_10HALF >> 8] = (SPEED_10 << 2) | DUPLEX_HALF,
603 [MII_TG3_AUX_STAT_10FULL >> 8] = (SPEED_10 << 2) | DUPLEX_FULL,
604 [MII_TG3_AUX_STAT_100HALF >> 8] = (SPEED_100 << 2) | DUPLEX_HALF,
605 [MII_TG3_AUX_STAT_100_4 >> 8] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
606 [MII_TG3_AUX_STAT_100FULL >> 8] = (SPEED_100 << 2) | DUPLEX_FULL,
607 [MII_TG3_AUX_STAT_1000HALF >> 8] = (SPEED_1000 << 2) | DUPLEX_HALF,
608 [MII_TG3_AUX_STAT_1000FULL >> 8] = (SPEED_1000 << 2) | DUPLEX_FULL,
610 uint8_t result;
611 result = map[(val & MII_TG3_AUX_STAT_SPDMASK) >> 8];
612 *speed = result >> 2;
613 *duplex = result & 3;
616 static int tg3_phy_copper_begin(struct tg3 *tp)
618 uint32_t new_adv;
620 tp->link_config.advertising =
621 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
622 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
623 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
624 ADVERTISED_Autoneg | ADVERTISED_MII);
626 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) {
627 tp->link_config.advertising &=
628 ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
631 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
632 if (tp->link_config.advertising & ADVERTISED_10baseT_Half) {
633 new_adv |= ADVERTISE_10HALF;
635 if (tp->link_config.advertising & ADVERTISED_10baseT_Full) {
636 new_adv |= ADVERTISE_10FULL;
638 if (tp->link_config.advertising & ADVERTISED_100baseT_Half) {
639 new_adv |= ADVERTISE_100HALF;
641 if (tp->link_config.advertising & ADVERTISED_100baseT_Full) {
642 new_adv |= ADVERTISE_100FULL;
644 tg3_writephy(tp, MII_ADVERTISE, new_adv);
646 if (tp->link_config.advertising &
647 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
648 new_adv = 0;
649 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) {
650 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
652 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) {
653 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
655 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
656 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
657 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
658 new_adv |= (MII_TG3_CTRL_AS_MASTER |
659 MII_TG3_CTRL_ENABLE_AS_MASTER);
661 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
662 } else {
663 tg3_writephy(tp, MII_TG3_CTRL, 0);
666 tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
668 return 0;
671 static int tg3_init_5401phy_dsp(struct tg3 *tp)
673 int err;
675 /* Turn off tap power management. */
676 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
678 err |= tg3_writedsp(tp, 0x0012, 0x1804);
679 err |= tg3_writedsp(tp, 0x0013, 0x1204);
680 err |= tg3_writedsp(tp, 0x8006, 0x0132);
681 err |= tg3_writedsp(tp, 0x8006, 0x0232);
682 err |= tg3_writedsp(tp, 0x201f, 0x0a20);
684 udelay(40);
686 return err;
689 static int tg3_setup_copper_phy(struct tg3 *tp)
691 int current_link_up;
692 uint32_t bmsr, dummy;
693 int i, err;
695 tw32_carefully(MAC_STATUS,
696 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED
697 | MAC_STATUS_LNKSTATE_CHANGED));
699 tp->mi_mode = MAC_MI_MODE_BASE;
700 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
702 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
704 /* Some third-party PHYs need to be reset on link going
705 * down.
707 if ( ( (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
708 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
709 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)) &&
710 (tp->carrier_ok)) {
711 tg3_readphy(tp, MII_BMSR, &bmsr);
712 tg3_readphy(tp, MII_BMSR, &bmsr);
713 if (!(bmsr & BMSR_LSTATUS))
714 tg3_phy_reset(tp);
717 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
718 tg3_readphy(tp, MII_BMSR, &bmsr);
719 tg3_readphy(tp, MII_BMSR, &bmsr);
721 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
722 bmsr = 0;
724 if (!(bmsr & BMSR_LSTATUS)) {
725 err = tg3_init_5401phy_dsp(tp);
726 if (err)
727 return err;
729 tg3_readphy(tp, MII_BMSR, &bmsr);
730 for (i = 0; i < 1000; i++) {
731 udelay(10);
732 tg3_readphy(tp, MII_BMSR, &bmsr);
733 if (bmsr & BMSR_LSTATUS) {
734 udelay(40);
735 break;
739 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
740 !(bmsr & BMSR_LSTATUS) &&
741 tp->link_config.active_speed == SPEED_1000) {
742 err = tg3_phy_reset(tp);
743 if (!err)
744 err = tg3_init_5401phy_dsp(tp);
745 if (err)
746 return err;
749 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
750 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
751 /* 5701 {A0,B0} CRC bug workaround */
752 tg3_writephy(tp, 0x15, 0x0a75);
753 tg3_writephy(tp, 0x1c, 0x8c68);
754 tg3_writephy(tp, 0x1c, 0x8d68);
755 tg3_writephy(tp, 0x1c, 0x8c68);
758 /* Clear pending interrupts... */
759 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
760 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
762 tg3_writephy(tp, MII_TG3_IMASK, ~0);
764 if (tp->led_mode == led_mode_three_link)
765 tg3_writephy(tp, MII_TG3_EXT_CTRL,
766 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
767 else
768 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
770 current_link_up = 0;
772 tg3_readphy(tp, MII_BMSR, &bmsr);
773 tg3_readphy(tp, MII_BMSR, &bmsr);
775 if (bmsr & BMSR_LSTATUS) {
776 uint32_t aux_stat, bmcr;
778 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
779 for (i = 0; i < 2000; i++) {
780 udelay(10);
781 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
782 if (aux_stat)
783 break;
786 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
787 &tp->link_config.active_speed,
788 &tp->link_config.active_duplex);
789 tg3_readphy(tp, MII_BMCR, &bmcr);
790 tg3_readphy(tp, MII_BMCR, &bmcr);
791 if (bmcr & BMCR_ANENABLE) {
792 uint32_t gig_ctrl;
794 current_link_up = 1;
796 /* Force autoneg restart if we are exiting
797 * low power mode.
799 tg3_readphy(tp, MII_TG3_CTRL, &gig_ctrl);
800 if (!(gig_ctrl & (MII_TG3_CTRL_ADV_1000_HALF |
801 MII_TG3_CTRL_ADV_1000_FULL))) {
802 current_link_up = 0;
804 } else {
805 current_link_up = 0;
809 if (current_link_up == 1 &&
810 (tp->link_config.active_duplex == DUPLEX_FULL)) {
811 uint32_t local_adv, remote_adv;
813 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
814 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
816 tg3_readphy(tp, MII_LPA, &remote_adv);
817 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
819 /* If we are not advertising full pause capability,
820 * something is wrong. Bring the link down and reconfigure.
822 if (local_adv != ADVERTISE_PAUSE_CAP) {
823 current_link_up = 0;
824 } else {
825 tg3_setup_flow_control(tp, local_adv, remote_adv);
829 if (current_link_up == 0) {
830 uint32_t tmp;
832 tg3_phy_copper_begin(tp);
834 tg3_readphy(tp, MII_BMSR, &tmp);
835 tg3_readphy(tp, MII_BMSR, &tmp);
836 if (tmp & BMSR_LSTATUS)
837 current_link_up = 1;
840 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
841 if (current_link_up == 1) {
842 if (tp->link_config.active_speed == SPEED_100 ||
843 tp->link_config.active_speed == SPEED_10)
844 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
845 else
846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
847 } else
848 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
850 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
851 if (tp->link_config.active_duplex == DUPLEX_HALF)
852 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
854 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
856 if ((tp->led_mode == led_mode_link10) ||
857 (current_link_up == 1 &&
858 tp->link_config.active_speed == SPEED_10))
859 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
860 } else {
861 if (current_link_up == 1)
862 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
863 tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
866 /* ??? Without this setting Netgear GA302T PHY does not
867 * ??? send/receive packets...
868 * With this other PHYs cannot bring up the link
870 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
871 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
872 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
873 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
876 tw32_carefully(MAC_MODE, tp->mac_mode);
878 /* Link change polled. */
879 tw32_carefully(MAC_EVENT, 0);
881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
882 current_link_up == 1 &&
883 tp->link_config.active_speed == SPEED_1000 &&
884 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
885 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
886 udelay(120);
887 tw32_carefully(MAC_STATUS,
888 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
889 tg3_write_mem(
890 NIC_SRAM_FIRMWARE_MBOX,
891 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
894 if (current_link_up != tp->carrier_ok) {
895 tp->carrier_ok = current_link_up;
896 tg3_link_report(tp);
899 return 0;
901 #else
902 #define tg3_setup_copper_phy(TP) (-EINVAL)
903 #endif /* SUPPORT_COPPER_PHY */
905 #if SUPPORT_FIBER_PHY
906 struct tg3_fiber_aneginfo {
907 int state;
908 #define ANEG_STATE_UNKNOWN 0
909 #define ANEG_STATE_AN_ENABLE 1
910 #define ANEG_STATE_RESTART_INIT 2
911 #define ANEG_STATE_RESTART 3
912 #define ANEG_STATE_DISABLE_LINK_OK 4
913 #define ANEG_STATE_ABILITY_DETECT_INIT 5
914 #define ANEG_STATE_ABILITY_DETECT 6
915 #define ANEG_STATE_ACK_DETECT_INIT 7
916 #define ANEG_STATE_ACK_DETECT 8
917 #define ANEG_STATE_COMPLETE_ACK_INIT 9
918 #define ANEG_STATE_COMPLETE_ACK 10
919 #define ANEG_STATE_IDLE_DETECT_INIT 11
920 #define ANEG_STATE_IDLE_DETECT 12
921 #define ANEG_STATE_LINK_OK 13
922 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
923 #define ANEG_STATE_NEXT_PAGE_WAIT 15
925 uint32_t flags;
926 #define MR_AN_ENABLE 0x00000001
927 #define MR_RESTART_AN 0x00000002
928 #define MR_AN_COMPLETE 0x00000004
929 #define MR_PAGE_RX 0x00000008
930 #define MR_NP_LOADED 0x00000010
931 #define MR_TOGGLE_TX 0x00000020
932 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
933 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
934 #define MR_LP_ADV_SYM_PAUSE 0x00000100
935 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
936 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
937 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
938 #define MR_LP_ADV_NEXT_PAGE 0x00001000
939 #define MR_TOGGLE_RX 0x00002000
940 #define MR_NP_RX 0x00004000
942 #define MR_LINK_OK 0x80000000
944 unsigned long link_time, cur_time;
946 uint32_t ability_match_cfg;
947 int ability_match_count;
949 char ability_match, idle_match, ack_match;
951 uint32_t txconfig, rxconfig;
952 #define ANEG_CFG_NP 0x00000080
953 #define ANEG_CFG_ACK 0x00000040
954 #define ANEG_CFG_RF2 0x00000020
955 #define ANEG_CFG_RF1 0x00000010
956 #define ANEG_CFG_PS2 0x00000001
957 #define ANEG_CFG_PS1 0x00008000
958 #define ANEG_CFG_HD 0x00004000
959 #define ANEG_CFG_FD 0x00002000
960 #define ANEG_CFG_INVAL 0x00001f06
963 #define ANEG_OK 0
964 #define ANEG_DONE 1
965 #define ANEG_TIMER_ENAB 2
966 #define ANEG_FAILED -1
968 #define ANEG_STATE_SETTLE_TIME 10000
970 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
971 struct tg3_fiber_aneginfo *ap)
973 unsigned long delta;
974 uint32_t rx_cfg_reg;
975 int ret;
977 if (ap->state == ANEG_STATE_UNKNOWN) {
978 ap->rxconfig = 0;
979 ap->link_time = 0;
980 ap->cur_time = 0;
981 ap->ability_match_cfg = 0;
982 ap->ability_match_count = 0;
983 ap->ability_match = 0;
984 ap->idle_match = 0;
985 ap->ack_match = 0;
987 ap->cur_time++;
989 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
990 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
992 if (rx_cfg_reg != ap->ability_match_cfg) {
993 ap->ability_match_cfg = rx_cfg_reg;
994 ap->ability_match = 0;
995 ap->ability_match_count = 0;
996 } else {
997 if (++ap->ability_match_count > 1) {
998 ap->ability_match = 1;
999 ap->ability_match_cfg = rx_cfg_reg;
1002 if (rx_cfg_reg & ANEG_CFG_ACK)
1003 ap->ack_match = 1;
1004 else
1005 ap->ack_match = 0;
1007 ap->idle_match = 0;
1008 } else {
1009 ap->idle_match = 1;
1010 ap->ability_match_cfg = 0;
1011 ap->ability_match_count = 0;
1012 ap->ability_match = 0;
1013 ap->ack_match = 0;
1015 rx_cfg_reg = 0;
1018 ap->rxconfig = rx_cfg_reg;
1019 ret = ANEG_OK;
1021 switch(ap->state) {
1022 case ANEG_STATE_UNKNOWN:
1023 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1024 ap->state = ANEG_STATE_AN_ENABLE;
1026 /* fallthru */
1027 case ANEG_STATE_AN_ENABLE:
1028 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1029 if (ap->flags & MR_AN_ENABLE) {
1030 ap->link_time = 0;
1031 ap->cur_time = 0;
1032 ap->ability_match_cfg = 0;
1033 ap->ability_match_count = 0;
1034 ap->ability_match = 0;
1035 ap->idle_match = 0;
1036 ap->ack_match = 0;
1038 ap->state = ANEG_STATE_RESTART_INIT;
1039 } else {
1040 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1042 break;
1044 case ANEG_STATE_RESTART_INIT:
1045 ap->link_time = ap->cur_time;
1046 ap->flags &= ~(MR_NP_LOADED);
1047 ap->txconfig = 0;
1048 tw32(MAC_TX_AUTO_NEG, 0);
1049 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1050 tw32_carefully(MAC_MODE, tp->mac_mode);
1052 ret = ANEG_TIMER_ENAB;
1053 ap->state = ANEG_STATE_RESTART;
1055 /* fallthru */
1056 case ANEG_STATE_RESTART:
1057 delta = ap->cur_time - ap->link_time;
1058 if (delta > ANEG_STATE_SETTLE_TIME) {
1059 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1060 } else {
1061 ret = ANEG_TIMER_ENAB;
1063 break;
1065 case ANEG_STATE_DISABLE_LINK_OK:
1066 ret = ANEG_DONE;
1067 break;
1069 case ANEG_STATE_ABILITY_DETECT_INIT:
1070 ap->flags &= ~(MR_TOGGLE_TX);
1071 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1072 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1073 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1074 tw32_carefully(MAC_MODE, tp->mac_mode);
1076 ap->state = ANEG_STATE_ABILITY_DETECT;
1077 break;
1079 case ANEG_STATE_ABILITY_DETECT:
1080 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1081 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1083 break;
1085 case ANEG_STATE_ACK_DETECT_INIT:
1086 ap->txconfig |= ANEG_CFG_ACK;
1087 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1088 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1089 tw32_carefully(MAC_MODE, tp->mac_mode);
1091 ap->state = ANEG_STATE_ACK_DETECT;
1093 /* fallthru */
1094 case ANEG_STATE_ACK_DETECT:
1095 if (ap->ack_match != 0) {
1096 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1097 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1098 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1099 } else {
1100 ap->state = ANEG_STATE_AN_ENABLE;
1102 } else if (ap->ability_match != 0 &&
1103 ap->rxconfig == 0) {
1104 ap->state = ANEG_STATE_AN_ENABLE;
1106 break;
1108 case ANEG_STATE_COMPLETE_ACK_INIT:
1109 if (ap->rxconfig & ANEG_CFG_INVAL) {
1110 ret = ANEG_FAILED;
1111 break;
1113 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1114 MR_LP_ADV_HALF_DUPLEX |
1115 MR_LP_ADV_SYM_PAUSE |
1116 MR_LP_ADV_ASYM_PAUSE |
1117 MR_LP_ADV_REMOTE_FAULT1 |
1118 MR_LP_ADV_REMOTE_FAULT2 |
1119 MR_LP_ADV_NEXT_PAGE |
1120 MR_TOGGLE_RX |
1121 MR_NP_RX);
1122 if (ap->rxconfig & ANEG_CFG_FD)
1123 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1124 if (ap->rxconfig & ANEG_CFG_HD)
1125 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1126 if (ap->rxconfig & ANEG_CFG_PS1)
1127 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1128 if (ap->rxconfig & ANEG_CFG_PS2)
1129 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1130 if (ap->rxconfig & ANEG_CFG_RF1)
1131 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1132 if (ap->rxconfig & ANEG_CFG_RF2)
1133 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1134 if (ap->rxconfig & ANEG_CFG_NP)
1135 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1137 ap->link_time = ap->cur_time;
1139 ap->flags ^= (MR_TOGGLE_TX);
1140 if (ap->rxconfig & 0x0008)
1141 ap->flags |= MR_TOGGLE_RX;
1142 if (ap->rxconfig & ANEG_CFG_NP)
1143 ap->flags |= MR_NP_RX;
1144 ap->flags |= MR_PAGE_RX;
1146 ap->state = ANEG_STATE_COMPLETE_ACK;
1147 ret = ANEG_TIMER_ENAB;
1148 break;
1150 case ANEG_STATE_COMPLETE_ACK:
1151 if (ap->ability_match != 0 &&
1152 ap->rxconfig == 0) {
1153 ap->state = ANEG_STATE_AN_ENABLE;
1154 break;
1156 delta = ap->cur_time - ap->link_time;
1157 if (delta > ANEG_STATE_SETTLE_TIME) {
1158 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1159 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1160 } else {
1161 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1162 !(ap->flags & MR_NP_RX)) {
1163 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1164 } else {
1165 ret = ANEG_FAILED;
1169 break;
1171 case ANEG_STATE_IDLE_DETECT_INIT:
1172 ap->link_time = ap->cur_time;
1173 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1174 tw32_carefully(MAC_MODE, tp->mac_mode);
1176 ap->state = ANEG_STATE_IDLE_DETECT;
1177 ret = ANEG_TIMER_ENAB;
1178 break;
1180 case ANEG_STATE_IDLE_DETECT:
1181 if (ap->ability_match != 0 &&
1182 ap->rxconfig == 0) {
1183 ap->state = ANEG_STATE_AN_ENABLE;
1184 break;
1186 delta = ap->cur_time - ap->link_time;
1187 if (delta > ANEG_STATE_SETTLE_TIME) {
1188 /* XXX another gem from the Broadcom driver :( */
1189 ap->state = ANEG_STATE_LINK_OK;
1191 break;
1193 case ANEG_STATE_LINK_OK:
1194 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1195 ret = ANEG_DONE;
1196 break;
1198 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1199 /* ??? unimplemented */
1200 break;
1202 case ANEG_STATE_NEXT_PAGE_WAIT:
1203 /* ??? unimplemented */
1204 break;
1206 default:
1207 ret = ANEG_FAILED;
1208 break;
1211 return ret;
1214 static int tg3_setup_fiber_phy(struct tg3 *tp)
1216 uint32_t orig_pause_cfg;
1217 uint16_t orig_active_speed;
1218 uint8_t orig_active_duplex;
1219 int current_link_up;
1220 int i;
1222 orig_pause_cfg =
1223 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1224 TG3_FLAG_TX_PAUSE));
1225 orig_active_speed = tp->link_config.active_speed;
1226 orig_active_duplex = tp->link_config.active_duplex;
1228 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1229 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1230 tw32_carefully(MAC_MODE, tp->mac_mode);
1232 /* Reset when initting first time or we have a link. */
1233 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1234 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1235 /* Set PLL lock range. */
1236 tg3_writephy(tp, 0x16, 0x8007);
1238 /* SW reset */
1239 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1241 /* Wait for reset to complete. */
1242 mdelay(5);
1244 /* Config mode; select PMA/Ch 1 regs. */
1245 tg3_writephy(tp, 0x10, 0x8411);
1247 /* Enable auto-lock and comdet, select txclk for tx. */
1248 tg3_writephy(tp, 0x11, 0x0a10);
1250 tg3_writephy(tp, 0x18, 0x00a0);
1251 tg3_writephy(tp, 0x16, 0x41ff);
1253 /* Assert and deassert POR. */
1254 tg3_writephy(tp, 0x13, 0x0400);
1255 udelay(40);
1256 tg3_writephy(tp, 0x13, 0x0000);
1258 tg3_writephy(tp, 0x11, 0x0a50);
1259 udelay(40);
1260 tg3_writephy(tp, 0x11, 0x0a10);
1262 /* Wait for signal to stabilize */
1263 mdelay(150);
1265 /* Deselect the channel register so we can read the PHYID
1266 * later.
1268 tg3_writephy(tp, 0x10, 0x8011);
1271 /* Disable link change interrupt. */
1272 tw32_carefully(MAC_EVENT, 0);
1274 current_link_up = 0;
1275 if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1276 if (!(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1277 struct tg3_fiber_aneginfo aninfo;
1278 int status = ANEG_FAILED;
1279 unsigned int tick;
1280 uint32_t tmp;
1282 memset(&aninfo, 0, sizeof(aninfo));
1283 aninfo.flags |= (MR_AN_ENABLE);
1285 tw32(MAC_TX_AUTO_NEG, 0);
1287 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1288 tw32_carefully(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1290 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1292 aninfo.state = ANEG_STATE_UNKNOWN;
1293 aninfo.cur_time = 0;
1294 tick = 0;
1295 while (++tick < 195000) {
1296 status = tg3_fiber_aneg_smachine(tp, &aninfo);
1297 if (status == ANEG_DONE ||
1298 status == ANEG_FAILED)
1299 break;
1301 udelay(1);
1304 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1305 tw32_carefully(MAC_MODE, tp->mac_mode);
1307 if (status == ANEG_DONE &&
1308 (aninfo.flags &
1309 (MR_AN_COMPLETE | MR_LINK_OK |
1310 MR_LP_ADV_FULL_DUPLEX))) {
1311 uint32_t local_adv, remote_adv;
1313 local_adv = ADVERTISE_PAUSE_CAP;
1314 remote_adv = 0;
1315 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1316 remote_adv |= LPA_PAUSE_CAP;
1317 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1318 remote_adv |= LPA_PAUSE_ASYM;
1320 tg3_setup_flow_control(tp, local_adv, remote_adv);
1322 tp->tg3_flags |=
1323 TG3_FLAG_GOT_SERDES_FLOWCTL;
1324 current_link_up = 1;
1326 for (i = 0; i < 60; i++) {
1327 udelay(20);
1328 tw32_carefully(MAC_STATUS,
1329 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1330 if ((tr32(MAC_STATUS) &
1331 (MAC_STATUS_SYNC_CHANGED |
1332 MAC_STATUS_CFG_CHANGED)) == 0)
1333 break;
1335 if (current_link_up == 0 &&
1336 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1337 current_link_up = 1;
1339 } else {
1340 /* Forcing 1000FD link up. */
1341 current_link_up = 1;
1345 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1346 tw32_carefully(MAC_MODE, tp->mac_mode);
1348 tp->hw_status->status =
1349 (SD_STATUS_UPDATED |
1350 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1352 for (i = 0; i < 100; i++) {
1353 udelay(20);
1354 tw32_carefully(MAC_STATUS,
1355 (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1356 if ((tr32(MAC_STATUS) &
1357 (MAC_STATUS_SYNC_CHANGED |
1358 MAC_STATUS_CFG_CHANGED)) == 0)
1359 break;
1362 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
1363 current_link_up = 0;
1365 if (current_link_up == 1) {
1366 tp->link_config.active_speed = SPEED_1000;
1367 tp->link_config.active_duplex = DUPLEX_FULL;
1368 } else {
1369 tp->link_config.active_speed = SPEED_INVALID;
1370 tp->link_config.active_duplex = DUPLEX_INVALID;
1373 if (current_link_up != tp->carrier_ok) {
1374 tp->carrier_ok = current_link_up;
1375 tg3_link_report(tp);
1376 } else {
1377 uint32_t now_pause_cfg =
1378 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1379 TG3_FLAG_TX_PAUSE);
1380 if (orig_pause_cfg != now_pause_cfg ||
1381 orig_active_speed != tp->link_config.active_speed ||
1382 orig_active_duplex != tp->link_config.active_duplex)
1383 tg3_link_report(tp);
1386 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
1387 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
1388 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
1389 tw32_carefully(MAC_MODE, tp->mac_mode);
1393 return 0;
1395 #else
1396 #define tg3_setup_fiber_phy(TP) (-EINVAL)
1397 #endif /* SUPPORT_FIBER_PHY */
1399 static int tg3_setup_phy(struct tg3 *tp)
1401 int err;
1403 if (tp->phy_id == PHY_ID_SERDES) {
1404 err = tg3_setup_fiber_phy(tp);
1405 } else {
1406 err = tg3_setup_copper_phy(tp);
1409 if (tp->link_config.active_speed == SPEED_1000 &&
1410 tp->link_config.active_duplex == DUPLEX_HALF)
1411 tw32(MAC_TX_LENGTHS,
1412 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1413 (6 << TX_LENGTHS_IPG_SHIFT) |
1414 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1415 else
1416 tw32(MAC_TX_LENGTHS,
1417 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1418 (6 << TX_LENGTHS_IPG_SHIFT) |
1419 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1421 return err;
1425 #define MAX_WAIT_CNT 1000
1427 /* To stop a block, clear the enable bit and poll till it
1428 * clears.
1430 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, uint32_t enable_bit)
1432 unsigned int i;
1433 uint32_t val;
1435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1436 switch(ofs) {
1437 case RCVLSC_MODE:
1438 case DMAC_MODE:
1439 case MBFREE_MODE:
1440 case BUFMGR_MODE:
1441 case MEMARB_MODE:
1442 /* We can't enable/disable these bits of the
1443 * 5705, just say success.
1445 return 0;
1446 default:
1447 break;
1450 val = tr32(ofs);
1451 val &= ~enable_bit;
1452 tw32(ofs, val);
1453 tr32(ofs);
1455 for (i = 0; i < MAX_WAIT_CNT; i++) {
1456 udelay(100);
1457 val = tr32(ofs);
1458 if ((val & enable_bit) == 0)
1459 break;
1462 if (i == MAX_WAIT_CNT) {
1463 printf( "tg3_stop_block timed out, ofs=%#lx enable_bit=%3lx\n",
1464 ofs, enable_bit );
1465 return -ENODEV;
1468 return 0;
1471 static int tg3_abort_hw(struct tg3 *tp)
1473 int i, err;
1475 tg3_disable_ints(tp);
1477 tp->rx_mode &= ~RX_MODE_ENABLE;
1478 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
1480 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1481 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1482 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1483 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1484 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1485 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1487 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1488 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1489 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1490 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1491 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1492 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1493 if (err)
1494 goto out;
1496 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1497 tw32_carefully(MAC_MODE, tp->mac_mode);
1499 tp->tx_mode &= ~TX_MODE_ENABLE;
1500 tw32_carefully(MAC_TX_MODE, tp->tx_mode);
1502 for (i = 0; i < MAX_WAIT_CNT; i++) {
1503 udelay(100);
1504 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1505 break;
1507 if (i >= MAX_WAIT_CNT) {
1508 printf("tg3_abort_hw timed out TX_MODE_ENABLE will not clear MAC_TX_MODE=%x\n",
1509 (unsigned int) tr32(MAC_TX_MODE));
1510 return -ENODEV;
1513 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1514 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1515 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1517 tw32(FTQ_RESET, 0xffffffff);
1518 tw32(FTQ_RESET, 0x00000000);
1520 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1521 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1522 if (err)
1523 goto out;
1525 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1527 out:
1528 return err;
1531 static void tg3_chip_reset(struct tg3 *tp)
1533 uint32_t val;
1535 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
1536 /* Force NVRAM to settle.
1537 * This deals with a chip bug which can result in EEPROM
1538 * corruption.
1540 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
1541 int i;
1543 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1544 for (i = 0; i < 100000; i++) {
1545 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1546 break;
1547 udelay(10);
1551 /* In Etherboot we don't need to worry about the 5701
1552 * REG_WRITE_BUG because we do all register writes indirectly.
1555 // Alf: here patched
1556 /* do the reset */
1557 val = GRC_MISC_CFG_CORECLK_RESET;
1558 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
1559 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1560 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1563 // Alf : Please VALIDATE THIS.
1564 // It is necessary in my case (5751) to prevent a reboot, but
1565 // I have no idea about a side effect on any other version.
1566 // It appears to be what's done in tigon3.c from Broadcom
1567 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1568 tw32(GRC_MISC_CFG, 0x20000000) ;
1569 val |= 0x20000000 ;
1572 tw32(GRC_MISC_CFG, val);
1574 /* Flush PCI posted writes. The normal MMIO registers
1575 * are inaccessible at this time so this is the only
1576 * way to make this reliably. I tried to use indirect
1577 * register read/write but this upset some 5701 variants.
1579 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1581 udelay(120);
1583 /* Re-enable indirect register accesses. */
1584 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1585 tp->misc_host_ctrl);
1587 /* Set MAX PCI retry to zero. */
1588 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1589 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1590 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
1591 val |= PCISTATE_RETRY_SAME_DMA;
1592 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1594 pci_restore_state(tp->pdev, tp->pci_cfg_state);
1596 /* Make sure PCI-X relaxed ordering bit is clear. */
1597 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
1598 val &= ~PCIX_CAPS_RELAXED_ORDERING;
1599 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
1601 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1603 if (((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0) &&
1604 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
1605 tp->pci_clock_ctrl |=
1606 (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE);
1607 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1610 tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
1613 static void tg3_stop_fw(struct tg3 *tp)
1615 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
1616 uint32_t val;
1617 int i;
1619 tg3_write_mem(NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1620 val = tr32(GRC_RX_CPU_EVENT);
1621 val |= (1 << 14);
1622 tw32(GRC_RX_CPU_EVENT, val);
1624 /* Wait for RX cpu to ACK the event. */
1625 for (i = 0; i < 100; i++) {
1626 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
1627 break;
1628 udelay(1);
1633 static int tg3_restart_fw(struct tg3 *tp, uint32_t state)
1635 uint32_t val;
1636 int i;
1638 tg3_write_mem(NIC_SRAM_FIRMWARE_MBOX,
1639 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1640 /* Wait for firmware initialization to complete. */
1641 for (i = 0; i < 100000; i++) {
1642 tg3_read_mem(NIC_SRAM_FIRMWARE_MBOX, &val);
1643 if (val == (uint32_t) ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1644 break;
1645 udelay(10);
1647 if (i >= 100000 &&
1648 !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
1649 printf ( "Firmware will not restart magic=%#lx\n",
1650 val );
1651 return -ENODEV;
1653 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1654 state = DRV_STATE_SUSPEND;
1657 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
1658 (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)) {
1659 // Enable PCIE bug fix
1660 tg3_read_mem(0x7c00, &val);
1661 tg3_write_mem(0x7c00, val | 0x02000000);
1663 tg3_write_mem(NIC_SRAM_FW_DRV_STATE_MBOX, state);
1664 return 0;
1667 static int tg3_halt(struct tg3 *tp)
1669 tg3_stop_fw(tp);
1670 tg3_abort_hw(tp);
1671 tg3_chip_reset(tp);
1672 return tg3_restart_fw(tp, DRV_STATE_UNLOAD);
1675 static void __tg3_set_mac_addr(struct tg3 *tp)
1677 uint32_t addr_high, addr_low;
1678 int i;
1680 addr_high = ((tp->nic->node_addr[0] << 8) |
1681 tp->nic->node_addr[1]);
1682 addr_low = ((tp->nic->node_addr[2] << 24) |
1683 (tp->nic->node_addr[3] << 16) |
1684 (tp->nic->node_addr[4] << 8) |
1685 (tp->nic->node_addr[5] << 0));
1686 for (i = 0; i < 4; i++) {
1687 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1688 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1691 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
1692 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
1693 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)) {
1694 for(i = 0; i < 12; i++) {
1695 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1696 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1699 addr_high = (tp->nic->node_addr[0] +
1700 tp->nic->node_addr[1] +
1701 tp->nic->node_addr[2] +
1702 tp->nic->node_addr[3] +
1703 tp->nic->node_addr[4] +
1704 tp->nic->node_addr[5]) &
1705 TX_BACKOFF_SEED_MASK;
1706 tw32(MAC_TX_BACKOFF_SEED, addr_high);
1709 static void tg3_set_bdinfo(struct tg3 *tp, uint32_t bdinfo_addr,
1710 dma_addr_t mapping, uint32_t maxlen_flags,
1711 uint32_t nic_addr)
1713 tg3_write_mem((bdinfo_addr +
1714 TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1715 ((uint64_t) mapping >> 32));
1716 tg3_write_mem((bdinfo_addr +
1717 TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1718 ((uint64_t) mapping & 0xffffffff));
1719 tg3_write_mem((bdinfo_addr +
1720 TG3_BDINFO_MAXLEN_FLAGS),
1721 maxlen_flags);
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1723 tg3_write_mem((bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr);
1728 static void tg3_init_rings(struct tg3 *tp)
1730 unsigned i;
1732 /* Zero out the tg3 variables */
1733 memset(&tg3_bss, 0, sizeof(tg3_bss));
1734 tp->rx_std = &tg3_bss.rx_std[0];
1735 tp->rx_rcb = &tg3_bss.rx_rcb[0];
1736 tp->tx_ring = &tg3_bss.tx_ring[0];
1737 tp->hw_status = &tg3_bss.hw_status;
1738 tp->hw_stats = &tg3_bss.hw_stats;
1739 tp->mac_mode = 0;
1742 /* Initialize tx/rx rings for packet processing.
1744 * The chip has been shut down and the driver detached from
1745 * the networking, so no interrupts or new tx packets will
1746 * end up in the driver.
1749 /* Initialize invariants of the rings, we only set this
1750 * stuff once. This works because the card does not
1751 * write into the rx buffer posting rings.
1753 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
1754 struct tg3_rx_buffer_desc *rxd;
1756 rxd = &tp->rx_std[i];
1757 rxd->idx_len = (RX_PKT_BUF_SZ - 2 - 64) << RXD_LEN_SHIFT;
1758 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
1759 rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT));
1761 /* Note where the receive buffer for the ring is placed */
1762 rxd->addr_hi = 0;
1763 rxd->addr_lo = virt_to_bus(
1764 &tg3_bss.rx_bufs[i%TG3_DEF_RX_RING_PENDING][2]);
1768 #define TG3_WRITE_SETTINGS(TABLE) \
1769 do { \
1770 const uint32_t *_table, *_end; \
1771 _table = TABLE; \
1772 _end = _table + sizeof(TABLE)/sizeof(TABLE[0]); \
1773 for(; _table < _end; _table += 2) { \
1774 tw32(_table[0], _table[1]); \
1776 } while(0)
1779 /* initialize/reset the tg3 */
1780 static int tg3_setup_hw(struct tg3 *tp)
1782 uint32_t val, rdmac_mode;
1783 int i, err, limit;
1785 /* Simply don't support setups with extremly buggy firmware in etherboot */
1786 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
1787 printf("Error 5701_A0 firmware bug detected\n");
1788 return -EINVAL;
1791 tg3_disable_ints(tp);
1793 /* Originally this was all in tg3_init_hw */
1795 /* Force the chip into D0. */
1796 tg3_set_power_state_0(tp);
1798 tg3_switch_clocks(tp);
1800 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
1802 // This should go somewhere else
1803 #define T3_PCIE_CAPABILITY_ID_REG 0xD0
1804 #define T3_PCIE_CAPABILITY_ID 0x10
1805 #define T3_PCIE_CAPABILITY_REG 0xD2
1807 /* Originally this was all in tg3_reset_hw */
1809 tg3_stop_fw(tp);
1811 /* No need to call tg3_abort_hw here, it is called before tg3_setup_hw. */
1813 tg3_chip_reset(tp);
1815 tw32(GRC_MODE, tp->grc_mode); /* Redundant? */
1817 err = tg3_restart_fw(tp, DRV_STATE_START);
1818 if (err)
1819 return err;
1821 if (tp->phy_id == PHY_ID_SERDES) {
1822 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
1824 tw32_carefully(MAC_MODE, tp->mac_mode);
1827 /* This works around an issue with Athlon chipsets on
1828 * B3 tigon3 silicon. This bit has no effect on any
1829 * other revision.
1830 * Alf: Except 5750 ! (which reboots)
1833 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
1834 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1835 tw32_carefully(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1837 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1838 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
1839 val = tr32(TG3PCI_PCISTATE);
1840 val |= PCISTATE_RETRY_SAME_DMA;
1841 tw32(TG3PCI_PCISTATE, val);
1844 /* Descriptor ring init may make accesses to the
1845 * NIC SRAM area to setup the TX descriptors, so we
1846 * can only do this after the hardware has been
1847 * successfully reset.
1849 tg3_init_rings(tp);
1851 /* Clear statistics/status block in chip */
1852 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1853 for (i = NIC_SRAM_STATS_BLK;
1854 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
1855 i += sizeof(uint32_t)) {
1856 tg3_write_mem(i, 0);
1857 udelay(40);
1861 /* This value is determined during the probe time DMA
1862 * engine test, tg3_setup_dma.
1864 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
1866 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
1867 GRC_MODE_4X_NIC_SEND_RINGS |
1868 GRC_MODE_NO_TX_PHDR_CSUM |
1869 GRC_MODE_NO_RX_PHDR_CSUM);
1870 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
1871 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1872 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
1874 tw32(GRC_MODE,
1875 tp->grc_mode |
1876 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
1878 /* Setup the timer prescalar register. Clock is always 66Mhz. */
1879 tw32(GRC_MISC_CFG,
1880 (65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
1882 /* Initialize MBUF/DESC pool. */
1883 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1884 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
1885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
1886 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
1887 else
1888 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
1889 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
1890 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
1892 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
1893 tw32(BUFMGR_MB_RDMA_LOW_WATER,
1894 tp->bufmgr_config.mbuf_read_dma_low_water);
1895 tw32(BUFMGR_MB_MACRX_LOW_WATER,
1896 tp->bufmgr_config.mbuf_mac_rx_low_water);
1897 tw32(BUFMGR_MB_HIGH_WATER,
1898 tp->bufmgr_config.mbuf_high_water);
1899 } else {
1900 tw32(BUFMGR_MB_RDMA_LOW_WATER,
1901 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
1902 tw32(BUFMGR_MB_MACRX_LOW_WATER,
1903 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
1904 tw32(BUFMGR_MB_HIGH_WATER,
1905 tp->bufmgr_config.mbuf_high_water_jumbo);
1907 tw32(BUFMGR_DMA_LOW_WATER,
1908 tp->bufmgr_config.dma_low_water);
1909 tw32(BUFMGR_DMA_HIGH_WATER,
1910 tp->bufmgr_config.dma_high_water);
1912 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
1913 for (i = 0; i < 2000; i++) {
1914 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
1915 break;
1916 udelay(10);
1918 if (i >= 2000) {
1919 printf("tg3_setup_hw cannot enable BUFMGR\n");
1920 return -ENODEV;
1923 tw32(FTQ_RESET, 0xffffffff);
1924 tw32(FTQ_RESET, 0x00000000);
1925 for (i = 0; i < 2000; i++) {
1926 if (tr32(FTQ_RESET) == 0x00000000)
1927 break;
1928 udelay(10);
1930 if (i >= 2000) {
1931 printf("tg3_setup_hw cannot reset FTQ\n");
1932 return -ENODEV;
1935 /* Initialize TG3_BDINFO's at:
1936 * RCVDBDI_STD_BD: standard eth size rx ring
1937 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
1938 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
1940 * like so:
1941 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
1942 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
1943 * ring attribute flags
1944 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
1946 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
1947 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
1949 * ??? No space allocated for mini receive ring? :(
1951 * The size of each ring is fixed in the firmware, but the location is
1952 * configurable.
1955 static const uint32_t table_all[] = {
1956 /* Setup replenish thresholds. */
1957 RCVBDI_STD_THRESH, TG3_DEF_RX_RING_PENDING / 8,
1959 /* Etherboot lives below 4GB */
1960 RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
1961 RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC,
1963 static const uint32_t table_not_5705[] = {
1964 /* Buffer maximum length */
1965 RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT,
1967 /* Disable the mini frame rx ring */
1968 RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED,
1970 /* Disable the jumbo frame rx ring */
1971 RCVBDI_JUMBO_THRESH, 0,
1972 RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED,
1976 TG3_WRITE_SETTINGS(table_all);
1977 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
1978 virt_to_bus(tp->rx_std));
1979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1980 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
1981 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
1982 } else {
1983 TG3_WRITE_SETTINGS(table_not_5705);
1988 /* There is only one send ring on 5705, no need to explicitly
1989 * disable the others.
1991 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1992 /* Clear out send RCB ring in SRAM. */
1993 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
1994 tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED);
1997 tp->tx_prod = 0;
1998 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
1999 tw32_mailbox2(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
2001 tg3_set_bdinfo(tp,
2002 NIC_SRAM_SEND_RCB,
2003 virt_to_bus(tp->tx_ring),
2004 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
2005 NIC_SRAM_TX_BUFFER_DESC);
2007 /* There is only one receive return ring on 5705, no need to explicitly
2008 * disable the others.
2010 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2011 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; i += TG3_BDINFO_SIZE) {
2012 tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS,
2013 BDINFO_FLAGS_DISABLED);
2017 tp->rx_rcb_ptr = 0;
2018 tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
2020 tg3_set_bdinfo(tp,
2021 NIC_SRAM_RCV_RET_RCB,
2022 virt_to_bus(tp->rx_rcb),
2023 (TG3_RX_RCB_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
2026 tp->rx_std_ptr = TG3_DEF_RX_RING_PENDING;
2027 tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2028 tp->rx_std_ptr);
2030 tw32_mailbox2(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 0);
2032 /* Initialize MAC address and backoff seed. */
2033 __tg3_set_mac_addr(tp);
2035 /* Calculate RDMAC_MODE setting early, we need it to determine
2036 * the RCVLPC_STATE_ENABLE mask.
2038 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2039 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2040 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2041 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2042 RDMAC_MODE_LNGREAD_ENAB);
2043 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2044 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
2045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2046 if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2047 if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2048 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2049 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2054 /* Setup host coalescing engine. */
2055 tw32(HOSTCC_MODE, 0);
2056 for (i = 0; i < 2000; i++) {
2057 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2058 break;
2059 udelay(10);
2062 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2063 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2064 tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2066 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
2067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
2068 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2069 GRC_LCLCTRL_GPIO_OUTPUT1);
2070 tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2072 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
2073 tr32(MAILBOX_INTERRUPT_0);
2075 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2076 tw32_carefully(DMAC_MODE, DMAC_MODE_ENABLE);
2079 val = ( WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2080 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2081 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2082 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2083 WDMAC_MODE_LNGREAD_ENAB);
2084 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
2085 ((tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0) &&
2086 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2087 val |= WDMAC_MODE_RX_ACCEL;
2089 tw32_carefully(WDMAC_MODE, val);
2091 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
2092 val = tr32(TG3PCI_X_CAPS);
2093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2094 val &= PCIX_CAPS_BURST_MASK;
2095 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2096 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2097 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
2098 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2099 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2100 val |= (tp->split_mode_max_reqs <<
2101 PCIX_CAPS_SPLIT_SHIFT);
2103 tw32(TG3PCI_X_CAPS, val);
2106 tw32_carefully(RDMAC_MODE, rdmac_mode);
2108 static const uint32_t table_all[] = {
2109 /* MTU + ethernet header + FCS + optional VLAN tag */
2110 MAC_RX_MTU_SIZE, ETH_MAX_MTU + ETH_HLEN + 8,
2112 /* The slot time is changed by tg3_setup_phy if we
2113 * run at gigabit with half duplex.
2115 MAC_TX_LENGTHS,
2116 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2117 (6 << TX_LENGTHS_IPG_SHIFT) |
2118 (32 << TX_LENGTHS_SLOT_TIME_SHIFT),
2120 /* Receive rules. */
2121 MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS,
2122 RCVLPC_CONFIG, 0x0181,
2124 /* Receive/send statistics. */
2125 RCVLPC_STATS_ENABLE, 0xffffff,
2126 RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE,
2127 SNDDATAI_STATSENAB, 0xffffff,
2128 SNDDATAI_STATSCTRL, (SNDDATAI_SCTRL_ENABLE |SNDDATAI_SCTRL_FASTUPD),
2130 /* Host coalescing engine */
2131 HOSTCC_RXCOL_TICKS, 0,
2132 HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS,
2133 HOSTCC_RXMAX_FRAMES, 1,
2134 HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES,
2135 HOSTCC_RXCOAL_MAXF_INT, 1,
2136 HOSTCC_TXCOAL_MAXF_INT, 0,
2138 /* Status/statistics block address. */
2139 /* Etherboot lives below 4GB, so HIGH == 0 */
2140 HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2142 /* No need to enable 32byte coalesce mode. */
2143 HOSTCC_MODE, HOSTCC_MODE_ENABLE | 0,
2145 RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE,
2146 RCVLPC_MODE, RCVLPC_MODE_ENABLE,
2148 RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE,
2150 SNDDATAC_MODE, SNDDATAC_MODE_ENABLE,
2151 SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE,
2152 RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB,
2153 RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ,
2154 SNDDATAI_MODE, SNDDATAI_MODE_ENABLE,
2155 SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE,
2156 SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE,
2158 /* Accept all multicast frames. */
2159 MAC_HASH_REG_0, 0xffffffff,
2160 MAC_HASH_REG_1, 0xffffffff,
2161 MAC_HASH_REG_2, 0xffffffff,
2162 MAC_HASH_REG_3, 0xffffffff,
2164 static const uint32_t table_not_5705[] = {
2165 /* Host coalescing engine */
2166 HOSTCC_RXCOAL_TICK_INT, 0,
2167 HOSTCC_TXCOAL_TICK_INT, 0,
2169 /* Status/statistics block address. */
2170 /* Etherboot lives below 4GB, so HIGH == 0 */
2171 HOSTCC_STAT_COAL_TICKS, DEFAULT_STAT_COAL_TICKS,
2172 HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2173 HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK,
2174 HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK,
2176 RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE,
2178 MBFREE_MODE, MBFREE_MODE_ENABLE,
2180 TG3_WRITE_SETTINGS(table_all);
2181 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2182 virt_to_bus(tp->hw_stats));
2183 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2184 virt_to_bus(tp->hw_status));
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2186 TG3_WRITE_SETTINGS(table_not_5705);
2190 tp->tx_mode = TX_MODE_ENABLE;
2191 tw32_carefully(MAC_TX_MODE, tp->tx_mode);
2193 tp->rx_mode = RX_MODE_ENABLE;
2194 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2196 tp->mi_mode = MAC_MI_MODE_BASE;
2197 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2199 tw32(MAC_LED_CTRL, 0);
2200 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2201 if (tp->phy_id == PHY_ID_SERDES) {
2202 tw32_carefully(MAC_RX_MODE, RX_MODE_RESET);
2204 tp->rx_mode |= RX_MODE_KEEP_VLAN_TAG; /* drop tagged vlan packets */
2205 tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2207 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2208 tw32(MAC_SERDES_CFG, 0x616000);
2210 /* Prevent chip from dropping frames when flow control
2211 * is enabled.
2213 tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
2214 tr32(MAC_LOW_WMARK_MAX_RX_FRAME);
2216 err = tg3_setup_phy(tp);
2218 /* Ignore CRC stats */
2220 /* Initialize receive rules. */
2221 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
2222 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2223 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
2224 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2226 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
2227 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750))
2228 limit = 8;
2229 else
2230 limit = 16;
2231 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
2232 limit -= 4;
2233 switch (limit) {
2234 case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
2235 case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
2236 case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
2237 case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
2238 case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
2239 case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
2240 case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
2241 case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
2242 case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
2243 case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
2244 case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
2245 case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
2246 case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
2247 case 3: /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
2248 case 2:
2249 case 1:
2250 default:
2251 break;
2254 return err;
2259 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
2260 static void tg3_nvram_init(struct tg3 *tp)
2262 tw32(GRC_EEPROM_ADDR,
2263 (EEPROM_ADDR_FSM_RESET |
2264 (EEPROM_DEFAULT_CLOCK_PERIOD <<
2265 EEPROM_ADDR_CLKPERD_SHIFT)));
2267 mdelay(1);
2269 /* Enable seeprom accesses. */
2270 tw32_carefully(GRC_LOCAL_CTRL,
2271 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
2273 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2274 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2275 uint32_t nvcfg1 = tr32(NVRAM_CFG1);
2277 tp->tg3_flags |= TG3_FLAG_NVRAM;
2278 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
2279 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
2280 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
2281 } else {
2282 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
2283 tw32(NVRAM_CFG1, nvcfg1);
2286 } else {
2287 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
2292 static int tg3_nvram_read_using_eeprom(
2293 struct tg3 *tp __unused, uint32_t offset, uint32_t *val)
2295 uint32_t tmp;
2296 int i;
2298 if (offset > EEPROM_ADDR_ADDR_MASK ||
2299 (offset % 4) != 0) {
2300 return -EINVAL;
2303 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2304 EEPROM_ADDR_DEVID_MASK |
2305 EEPROM_ADDR_READ);
2306 tw32(GRC_EEPROM_ADDR,
2307 tmp |
2308 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2309 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2310 EEPROM_ADDR_ADDR_MASK) |
2311 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2313 for (i = 0; i < 10000; i++) {
2314 tmp = tr32(GRC_EEPROM_ADDR);
2316 if (tmp & EEPROM_ADDR_COMPLETE)
2317 break;
2318 udelay(100);
2320 if (!(tmp & EEPROM_ADDR_COMPLETE)) {
2321 return -EBUSY;
2324 *val = tr32(GRC_EEPROM_DATA);
2325 return 0;
2328 static int tg3_nvram_read(struct tg3 *tp, uint32_t offset, uint32_t *val)
2330 int i, saw_done_clear;
2332 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2333 return tg3_nvram_read_using_eeprom(tp, offset, val);
2335 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
2336 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
2337 NVRAM_BUFFERED_PAGE_POS) +
2338 (offset % NVRAM_BUFFERED_PAGE_SIZE);
2340 if (offset > NVRAM_ADDR_MSK)
2341 return -EINVAL;
2343 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2344 for (i = 0; i < 1000; i++) {
2345 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2346 break;
2347 udelay(20);
2350 tw32(NVRAM_ADDR, offset);
2351 tw32(NVRAM_CMD,
2352 NVRAM_CMD_RD | NVRAM_CMD_GO |
2353 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2355 /* Wait for done bit to clear then set again. */
2356 saw_done_clear = 0;
2357 for (i = 0; i < 1000; i++) {
2358 udelay(10);
2359 if (!saw_done_clear &&
2360 !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2361 saw_done_clear = 1;
2362 else if (saw_done_clear &&
2363 (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2364 break;
2366 if (i >= 1000) {
2367 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2368 return -EBUSY;
2371 *val = bswap_32(tr32(NVRAM_RDDATA));
2372 tw32(NVRAM_SWARB, 0x20);
2374 return 0;
2377 struct subsys_tbl_ent {
2378 uint16_t subsys_vendor, subsys_devid;
2379 uint32_t phy_id;
2382 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
2383 /* Broadcom boards. */
2384 { 0x14e4, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
2385 { 0x14e4, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
2386 { 0x14e4, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
2387 { 0x14e4, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */
2388 { 0x14e4, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
2389 { 0x14e4, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
2390 { 0x14e4, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */
2391 { 0x14e4, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
2392 { 0x14e4, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
2393 { 0x14e4, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */
2394 { 0x14e4, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */
2396 /* 3com boards. */
2397 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
2398 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
2399 /* { PCI_VENDOR_ID_3COM, 0x1002, PHY_ID_XXX }, 3C996CT */
2400 /* { PCI_VENDOR_ID_3COM, 0x1003, PHY_ID_XXX }, 3C997T */
2401 { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */
2402 /* { PCI_VENDOR_ID_3COM, 0x1005, PHY_ID_XXX }, 3C997SZ */
2403 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
2404 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
2406 /* DELL boards. */
2407 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
2408 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
2409 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
2410 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
2411 { PCI_VENDOR_ID_DELL, 0x0179, PHY_ID_BCM5751 }, /* EtherXpress */
2413 /* Fujitsu Siemens Computer */
2414 { PCI_VENDOR_ID_FSC, 0x105d, PHY_ID_BCM5751 }, /* Futro C200 */
2416 /* Compaq boards. */
2417 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
2418 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
2419 { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */
2420 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
2421 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 } /* NC7780_2 */
2424 static int tg3_phy_probe(struct tg3 *tp)
2426 uint32_t eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
2427 uint32_t hw_phy_id, hw_phy_id_masked;
2428 enum phy_led_mode eeprom_led_mode;
2429 uint32_t val;
2430 unsigned i;
2431 int eeprom_signature_found, err;
2433 tp->phy_id = PHY_ID_INVALID;
2435 for (i = 0; i < sizeof(subsys_id_to_phy_id)/sizeof(subsys_id_to_phy_id[0]); i++) {
2436 if ((subsys_id_to_phy_id[i].subsys_vendor == tp->subsystem_vendor) &&
2437 (subsys_id_to_phy_id[i].subsys_devid == tp->subsystem_device)) {
2438 tp->phy_id = subsys_id_to_phy_id[i].phy_id;
2439 break;
2443 eeprom_phy_id = PHY_ID_INVALID;
2444 eeprom_led_mode = led_mode_auto;
2445 eeprom_signature_found = 0;
2446 tg3_read_mem(NIC_SRAM_DATA_SIG, &val);
2447 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
2448 uint32_t nic_cfg;
2450 tg3_read_mem(NIC_SRAM_DATA_CFG, &nic_cfg);
2451 tp->nic_sram_data_cfg = nic_cfg;
2453 eeprom_signature_found = 1;
2455 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
2456 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
2457 eeprom_phy_id = PHY_ID_SERDES;
2458 } else {
2459 uint32_t nic_phy_id;
2461 tg3_read_mem(NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
2462 if (nic_phy_id != 0) {
2463 uint32_t id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
2464 uint32_t id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
2466 eeprom_phy_id = (id1 >> 16) << 10;
2467 eeprom_phy_id |= (id2 & 0xfc00) << 16;
2468 eeprom_phy_id |= (id2 & 0x03ff) << 0;
2472 switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
2473 case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
2474 eeprom_led_mode = led_mode_three_link;
2475 break;
2477 case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
2478 eeprom_led_mode = led_mode_link10;
2479 break;
2481 default:
2482 eeprom_led_mode = led_mode_auto;
2483 break;
2485 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2486 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
2487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
2488 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)) {
2489 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
2492 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
2493 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
2494 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
2495 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
2498 /* Now read the physical PHY_ID from the chip and verify
2499 * that it is sane. If it doesn't look good, we fall back
2500 * to either the hard-coded table based PHY_ID and failing
2501 * that the value found in the eeprom area.
2503 err = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
2504 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
2506 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
2507 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
2508 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
2510 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
2512 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
2513 tp->phy_id = hw_phy_id;
2514 } else {
2515 /* phy_id currently holds the value found in the
2516 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
2517 * if a match was not found there.
2519 if (tp->phy_id == PHY_ID_INVALID) {
2520 if (!eeprom_signature_found ||
2521 !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
2522 return -ENODEV;
2523 tp->phy_id = eeprom_phy_id;
2527 err = tg3_phy_reset(tp);
2528 if (err)
2529 return err;
2531 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2532 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2533 uint32_t mii_tg3_ctrl;
2535 /* These chips, when reset, only advertise 10Mb
2536 * capabilities. Fix that.
2538 err = tg3_writephy(tp, MII_ADVERTISE,
2539 (ADVERTISE_CSMA |
2540 ADVERTISE_PAUSE_CAP |
2541 ADVERTISE_10HALF |
2542 ADVERTISE_10FULL |
2543 ADVERTISE_100HALF |
2544 ADVERTISE_100FULL));
2545 mii_tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
2546 MII_TG3_CTRL_ADV_1000_FULL |
2547 MII_TG3_CTRL_AS_MASTER |
2548 MII_TG3_CTRL_ENABLE_AS_MASTER);
2549 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2550 mii_tg3_ctrl = 0;
2552 err |= tg3_writephy(tp, MII_TG3_CTRL, mii_tg3_ctrl);
2553 err |= tg3_writephy(tp, MII_BMCR,
2554 (BMCR_ANRESTART | BMCR_ANENABLE));
2557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2558 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2559 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2560 tg3_writedsp(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
2563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2564 tg3_writephy(tp, 0x1c, 0x8d68);
2565 tg3_writephy(tp, 0x1c, 0x8d68);
2568 /* Enable Ethernet@WireSpeed */
2569 tg3_phy_set_wirespeed(tp);
2571 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
2572 err = tg3_init_5401phy_dsp(tp);
2575 /* Determine the PHY led mode.
2576 * Be careful if this gets set wrong it can result in an inability to
2577 * establish a link.
2579 if (tp->phy_id == PHY_ID_SERDES) {
2580 tp->led_mode = led_mode_three_link;
2582 else if (tp->subsystem_vendor == PCI_VENDOR_ID_DELL) {
2583 tp->led_mode = led_mode_link10;
2584 } else {
2585 tp->led_mode = led_mode_three_link;
2586 if (eeprom_signature_found &&
2587 eeprom_led_mode != led_mode_auto)
2588 tp->led_mode = eeprom_led_mode;
2591 if (tp->phy_id == PHY_ID_SERDES)
2592 tp->link_config.advertising =
2593 (ADVERTISED_1000baseT_Half |
2594 ADVERTISED_1000baseT_Full |
2595 ADVERTISED_Autoneg |
2596 ADVERTISED_FIBRE);
2597 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2598 tp->link_config.advertising &=
2599 ~(ADVERTISED_1000baseT_Half |
2600 ADVERTISED_1000baseT_Full);
2602 return err;
2605 #if SUPPORT_PARTNO_STR
2606 static void tg3_read_partno(struct tg3 *tp)
2608 unsigned char vpd_data[256];
2609 int i;
2611 for (i = 0; i < 256; i += 4) {
2612 uint32_t tmp;
2614 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
2615 goto out_not_found;
2617 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
2618 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
2619 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
2620 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
2623 /* Now parse and find the part number. */
2624 for (i = 0; i < 256; ) {
2625 unsigned char val = vpd_data[i];
2626 int block_end;
2628 if (val == 0x82 || val == 0x91) {
2629 i = (i + 3 +
2630 (vpd_data[i + 1] +
2631 (vpd_data[i + 2] << 8)));
2632 continue;
2635 if (val != 0x90)
2636 goto out_not_found;
2638 block_end = (i + 3 +
2639 (vpd_data[i + 1] +
2640 (vpd_data[i + 2] << 8)));
2641 i += 3;
2642 while (i < block_end) {
2643 if (vpd_data[i + 0] == 'P' &&
2644 vpd_data[i + 1] == 'N') {
2645 int partno_len = vpd_data[i + 2];
2647 if (partno_len > 24)
2648 goto out_not_found;
2650 memcpy(tp->board_part_number,
2651 &vpd_data[i + 3],
2652 partno_len);
2654 /* Success. */
2655 return;
2659 /* Part number not found. */
2660 goto out_not_found;
2663 out_not_found:
2664 memcpy(tp->board_part_number, "none", sizeof("none"));
2666 #else
2667 #define tg3_read_partno(TP) ((TP)->board_part_number[0] = '\0')
2668 #endif
2670 static int tg3_get_invariants(struct tg3 *tp)
2672 uint32_t misc_ctrl_reg;
2673 uint32_t pci_state_reg, grc_misc_cfg;
2674 uint16_t pci_cmd;
2675 uint8_t pci_latency;
2676 uint32_t val ;
2677 int err;
2679 /* Read the subsystem vendor and device ids */
2680 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
2681 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
2683 /* The sun_5704 code needs infrastructure etherboot does have
2684 * ignore it for now.
2687 /* If we have an AMD 762 or Intel ICH/ICH0 chipset, write
2688 * reordering to the mailbox registers done by the host
2689 * controller can cause major troubles. We read back from
2690 * every mailbox register write to force the writes to be
2691 * posted to the chip in order.
2693 * TG3_FLAG_MBOX_WRITE_REORDER has been forced on.
2696 /* Force memory write invalidate off. If we leave it on,
2697 * then on 5700_BX chips we have to enable a workaround.
2698 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundry
2699 * to match the cacheline size. The Broadcom driver have this
2700 * workaround but turns MWI off all the times so never uses
2701 * it. This seems to suggest that the workaround is insufficient.
2703 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
2704 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
2705 /* Also, force SERR#/PERR# in PCI command. */
2706 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
2707 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
2709 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
2710 * has the register indirect write enable bit set before
2711 * we try to access any of the MMIO registers. It is also
2712 * critical that the PCI-X hw workaround situation is decided
2713 * before that as well.
2715 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, &misc_ctrl_reg);
2717 tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT);
2719 /* Initialize misc host control in PCI block. */
2720 tp->misc_host_ctrl |= (misc_ctrl_reg &
2721 MISC_HOST_CTRL_CHIPREV);
2722 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
2723 tp->misc_host_ctrl);
2725 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, &pci_latency);
2726 if (pci_latency < 64) {
2727 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 64);
2730 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg);
2732 /* If this is a 5700 BX chipset, and we are in PCI-X
2733 * mode, enable register write workaround.
2735 * The workaround is to use indirect register accesses
2736 * for all chip writes not to mailbox registers.
2738 * In etherboot to simplify things we just always use this work around.
2740 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
2741 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
2743 /* Back to back register writes can cause problems on the 5701,
2744 * the workaround is to read back all reg writes except those to
2745 * mailbox regs.
2746 * In etherboot we always use indirect register accesses so
2747 * we don't see this.
2750 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
2751 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
2752 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
2753 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
2755 /* Chip-specific fixup from Broadcom driver */
2756 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
2757 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
2758 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
2759 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
2762 /* determine if it is PCIE system */
2763 // Alf : I have no idea what this is about...
2764 // But it's definitely usefull
2765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
2766 val = tr32(TG3PCI_MSI_CAP_ID) ;
2767 if (((val >> 8) & 0xff) == T3_PCIE_CAPABILITY_ID_REG) {
2768 val = tr32(T3_PCIE_CAPABILITY_ID_REG) ;
2769 if ((val & 0xff) == T3_PCIE_CAPABILITY_ID) {
2770 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS ;
2775 /* Force the chip into D0. */
2776 tg3_set_power_state_0(tp);
2778 /* Etherboot does not ask the tg3 to do checksums */
2779 /* Etherboot does not ask the tg3 to do jumbo frames */
2780 /* Ehterboot does not ask the tg3 to use WakeOnLan. */
2782 /* A few boards don't want Ethernet@WireSpeed phy feature */
2783 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
2784 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
2785 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2786 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
2787 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1))) {
2788 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
2791 /* Avoid tagged irq status etherboot does not use irqs */
2793 /* Only 5701 and later support tagged irq status mode.
2794 * Also, 5788 chips cannot use tagged irq status.
2796 * However, since etherboot does not use irqs avoid tagged irqs
2797 * status because the interrupt condition is more difficult to
2798 * fully clear in that mode.
2801 /* Since some 5700_AX && 5700_BX have problems with 32BYTE
2802 * coalesce_mode, and the rest work fine anything set.
2803 * Don't enable HOST_CC_MODE_32BYTE in etherboot.
2806 /* Initialize MAC MI mode, polling disabled. */
2807 tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2809 /* Initialize data/descriptor byte/word swapping. */
2810 tw32(GRC_MODE, tp->grc_mode);
2812 tg3_switch_clocks(tp);
2814 /* Clear this out for sanity. */
2815 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2817 /* Etherboot does not need to check if the PCIX_TARGET_HWBUG
2818 * is needed. It always uses it.
2821 udelay(50);
2822 tg3_nvram_init(tp);
2824 /* The TX descriptors will reside in main memory.
2827 /* See which board we are using.
2829 grc_misc_cfg = tr32(GRC_MISC_CFG);
2830 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2833 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
2834 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
2835 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
2838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2839 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
2840 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
2841 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
2843 #define PCI_DEVICE_ID_TIGON3_5901 0x170d
2844 #define PCI_DEVICE_ID_TIGON3_5901_2 0x170e
2846 /* these are limited to 10/100 only */
2847 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) &&
2848 ((grc_misc_cfg == 0x8000) || (grc_misc_cfg == 0x4000))) ||
2849 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2850 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM) &&
2851 ((tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901) ||
2852 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2)))) {
2853 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
2856 err = tg3_phy_probe(tp);
2857 if (err) {
2858 printf("phy probe failed, err %d\n", err);
2861 tg3_read_partno(tp);
2864 /* 5700 BX chips need to have their TX producer index mailboxes
2865 * written twice to workaround a bug.
2866 * In etherboot we do this unconditionally to simplify things.
2869 /* 5700 chips can get confused if TX buffers straddle the
2870 * 4GB address boundary in some cases.
2872 * In etherboot we can ignore the problem as etherboot lives below 4GB.
2875 /* In etherboot wake-on-lan is unconditionally disabled */
2876 return err;
2879 static int tg3_get_device_address(struct tg3 *tp)
2881 struct nic *nic = tp->nic;
2882 uint32_t hi, lo, mac_offset;
2884 if (PCI_FUNC(tp->pdev->devfn) == 0)
2885 mac_offset = 0x7c;
2886 else
2887 mac_offset = 0xcc;
2889 /* First try to get it from MAC address mailbox. */
2890 tg3_read_mem(NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
2891 if ((hi >> 16) == 0x484b) {
2892 nic->node_addr[0] = (hi >> 8) & 0xff;
2893 nic->node_addr[1] = (hi >> 0) & 0xff;
2895 tg3_read_mem(NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
2896 nic->node_addr[2] = (lo >> 24) & 0xff;
2897 nic->node_addr[3] = (lo >> 16) & 0xff;
2898 nic->node_addr[4] = (lo >> 8) & 0xff;
2899 nic->node_addr[5] = (lo >> 0) & 0xff;
2901 /* Next, try NVRAM. */
2902 else if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
2903 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
2904 nic->node_addr[0] = ((hi >> 16) & 0xff);
2905 nic->node_addr[1] = ((hi >> 24) & 0xff);
2906 nic->node_addr[2] = ((lo >> 0) & 0xff);
2907 nic->node_addr[3] = ((lo >> 8) & 0xff);
2908 nic->node_addr[4] = ((lo >> 16) & 0xff);
2909 nic->node_addr[5] = ((lo >> 24) & 0xff);
2911 /* Finally just fetch it out of the MAC control regs. */
2912 else {
2913 hi = tr32(MAC_ADDR_0_HIGH);
2914 lo = tr32(MAC_ADDR_0_LOW);
2916 nic->node_addr[5] = lo & 0xff;
2917 nic->node_addr[4] = (lo >> 8) & 0xff;
2918 nic->node_addr[3] = (lo >> 16) & 0xff;
2919 nic->node_addr[2] = (lo >> 24) & 0xff;
2920 nic->node_addr[1] = hi & 0xff;
2921 nic->node_addr[0] = (hi >> 8) & 0xff;
2924 return 0;
2928 static int tg3_setup_dma(struct tg3 *tp)
2930 tw32(TG3PCI_CLOCK_CTRL, 0);
2932 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
2933 tp->dma_rwctrl =
2934 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2935 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2936 (0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2937 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2938 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2940 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
2942 } else {
2943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2944 tp->dma_rwctrl =
2945 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2946 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2947 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2948 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2949 (0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
2950 else
2951 tp->dma_rwctrl =
2952 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2953 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2954 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2955 (0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
2956 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2958 /* Wheee, some more chip bugs... */
2959 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2960 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2961 uint32_t ccval = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
2963 if ((ccval == 0x6) || (ccval == 0x7)) {
2964 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
2969 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2970 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2971 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
2975 Alf : Tried that, but it does not work. Should be this way though :-(
2976 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
2977 tp->dma_rwctrl |= 0x001f0000;
2980 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
2982 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2984 return 0;
2987 static void tg3_init_link_config(struct tg3 *tp)
2989 tp->link_config.advertising =
2990 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2991 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2992 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
2993 ADVERTISED_Autoneg | ADVERTISED_MII);
2994 tp->carrier_ok = 0;
2995 tp->link_config.active_speed = SPEED_INVALID;
2996 tp->link_config.active_duplex = DUPLEX_INVALID;
3000 #if SUPPORT_PHY_STR
3001 static const char * tg3_phy_string(struct tg3 *tp)
3003 switch (tp->phy_id & PHY_ID_MASK) {
3004 case PHY_ID_BCM5400: return "5400";
3005 case PHY_ID_BCM5401: return "5401";
3006 case PHY_ID_BCM5411: return "5411";
3007 case PHY_ID_BCM5701: return "5701";
3008 case PHY_ID_BCM5703: return "5703";
3009 case PHY_ID_BCM5704: return "5704";
3010 case PHY_ID_BCM5705: return "5705";
3011 case PHY_ID_BCM5750: return "5750";
3012 case PHY_ID_BCM5751: return "5751";
3013 case PHY_ID_BCM8002: return "8002/serdes";
3014 case PHY_ID_SERDES: return "serdes";
3015 default: return "unknown";
3018 #else
3019 #define tg3_phy_string(TP) "?"
3020 #endif
3023 static void tg3_poll_link(struct tg3 *tp)
3025 uint32_t mac_stat;
3027 mac_stat = tr32(MAC_STATUS);
3028 if (tp->phy_id == PHY_ID_SERDES) {
3029 if (tp->carrier_ok?
3030 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED):
3031 (mac_stat & MAC_STATUS_PCS_SYNCED)) {
3032 tw32_carefully(MAC_MODE, tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK);
3033 tw32_carefully(MAC_MODE, tp->mac_mode);
3035 tg3_setup_phy(tp);
3038 else {
3039 if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) {
3040 tg3_setup_phy(tp);
3045 /**************************************************************************
3046 POLL - Wait for a frame
3047 ***************************************************************************/
3048 static void tg3_ack_irqs(struct tg3 *tp)
3050 if (tp->hw_status->status & SD_STATUS_UPDATED) {
3052 * writing any value to intr-mbox-0 clears PCI INTA# and
3053 * chip-internal interrupt pending events.
3054 * writing non-zero to intr-mbox-0 additional tells the
3055 * NIC to stop sending us irqs, engaging "in-intr-handler"
3056 * event coalescing.
3058 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3059 0x00000001);
3061 * Flush PCI write. This also guarantees that our
3062 * status block has been flushed to host memory.
3064 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3065 tp->hw_status->status &= ~SD_STATUS_UPDATED;
3069 static int tg3_poll(struct nic *nic, int retrieve)
3071 /* return true if there's an ethernet packet ready to read */
3072 /* nic->packet should contain data on return */
3073 /* nic->packetlen should contain length of data */
3075 struct tg3 *tp = &tg3;
3076 int result;
3078 result = 0;
3080 if ( (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) && !retrieve )
3081 return 1;
3083 tg3_ack_irqs(tp);
3085 if (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) {
3086 struct tg3_rx_buffer_desc *desc;
3087 unsigned int len;
3088 desc = &tp->rx_rcb[tp->rx_rcb_ptr];
3089 if ((desc->opaque & RXD_OPAQUE_RING_MASK) == RXD_OPAQUE_RING_STD) {
3090 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3092 nic->packetlen = len;
3093 memcpy(nic->packet, bus_to_virt(desc->addr_lo), len);
3094 result = 1;
3096 tp->rx_rcb_ptr = (tp->rx_rcb_ptr + 1) % TG3_RX_RCB_RING_SIZE;
3098 /* ACK the status ring */
3099 tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, tp->rx_rcb_ptr);
3101 /* Refill RX ring. */
3102 if (result) {
3103 tp->rx_std_ptr = (tp->rx_std_ptr + 1) % TG3_RX_RING_SIZE;
3104 tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tp->rx_std_ptr);
3107 tg3_poll_link(tp);
3108 return result;
3111 /**************************************************************************
3112 TRANSMIT - Transmit a frame
3113 ***************************************************************************/
3114 #if 0
3115 static void tg3_set_txd(struct tg3 *tp, int entry,
3116 dma_addr_t mapping, int len, uint32_t flags,
3117 uint32_t mss_and_is_end)
3119 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3120 int is_end = (mss_and_is_end & 0x1);
3121 if (is_end) {
3122 flags |= TXD_FLAG_END;
3125 txd->addr_hi = 0;
3126 txd->addr_lo = mapping & 0xffffffff;
3127 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3128 txd->vlan_tag = 0 << TXD_VLAN_TAG_SHIFT;
3130 #endif
3132 static void tg3_transmit(struct nic *nic, const char *dst_addr,
3133 unsigned int type, unsigned int size, const char *packet)
3135 static int frame_idx;
3136 struct eth_frame *frame;
3138 /* send the packet to destination */
3139 struct tg3_tx_buffer_desc *txd;
3140 struct tg3 *tp;
3141 uint32_t entry;
3142 int i;
3144 /* Wait until there is a free packet frame */
3145 tp = &tg3;
3146 i = 0;
3147 entry = tp->tx_prod;
3148 while((tp->hw_status->idx[0].tx_consumer != entry) &&
3149 (tp->hw_status->idx[0].tx_consumer != PREV_TX(entry))) {
3150 mdelay(10); /* give the nick a chance */
3151 if (++i > 500) { /* timeout 5s for transmit */
3152 printf("transmit timed out\n");
3153 tg3_halt(tp);
3154 tg3_setup_hw(tp);
3155 return;
3158 if (i != 0) {
3159 printf("#");
3162 /* Copy the packet to the our local buffer */
3163 frame = &tg3_bss.tx_frame[frame_idx];
3164 memcpy(frame->dst_addr, dst_addr, ETH_ALEN);
3165 memcpy(frame->src_addr, nic->node_addr, ETH_ALEN);
3166 frame->type = htons(type);
3167 memset(frame->data, 0, sizeof(frame->data));
3168 memcpy(frame->data, packet, size);
3170 /* Setup the ring buffer entry to transmit */
3171 txd = &tp->tx_ring[entry];
3172 txd->addr_hi = 0; /* Etherboot runs under 4GB */
3173 txd->addr_lo = virt_to_bus(frame);
3174 txd->len_flags = ((size + ETH_HLEN) << TXD_LEN_SHIFT) | TXD_FLAG_END;
3175 txd->vlan_tag = 0 << TXD_VLAN_TAG_SHIFT;
3177 /* Advance to the next entry */
3178 entry = NEXT_TX(entry);
3179 frame_idx ^= 1;
3181 /* Packets are ready, update Tx producer idx local and on card */
3182 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3183 tw32_mailbox2((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3184 tp->tx_prod = entry;
3187 /**************************************************************************
3188 DISABLE - Turn off ethernet interface
3189 ***************************************************************************/
3190 static void tg3_disable ( struct nic *nic __unused ) {
3191 struct tg3 *tp = &tg3;
3192 /* put the card in its initial state */
3193 /* This function serves 3 purposes.
3194 * This disables DMA and interrupts so we don't receive
3195 * unexpected packets or interrupts from the card after
3196 * etherboot has finished.
3197 * This frees resources so etherboot may use
3198 * this driver on another interface
3199 * This allows etherboot to reinitialize the interface
3200 * if something is something goes wrong.
3202 tg3_halt(tp);
3203 tp->tg3_flags &= ~(TG3_FLAG_INIT_COMPLETE|TG3_FLAG_GOT_SERDES_FLOWCTL);
3204 tp->carrier_ok = 0;
3205 iounmap((void *)tp->regs);
3208 /**************************************************************************
3209 IRQ - Enable, Disable, or Force interrupts
3210 ***************************************************************************/
3211 static void tg3_irq(struct nic *nic __unused, irq_action_t action __unused)
3213 switch ( action ) {
3214 case DISABLE :
3215 break;
3216 case ENABLE :
3217 break;
3218 case FORCE :
3219 break;
3223 static struct nic_operations tg3_operations = {
3224 .connect = dummy_connect,
3225 .poll = tg3_poll,
3226 .transmit = tg3_transmit,
3227 .irq = tg3_irq,
3231 /**************************************************************************
3232 PROBE - Look for an adapter, this routine's visible to the outside
3233 You should omit the last argument struct pci_device * for a non-PCI NIC
3234 ***************************************************************************/
3235 static int tg3_probe ( struct nic *nic, struct pci_device *pdev ) {
3237 struct tg3 *tp = &tg3;
3238 unsigned long tg3reg_base, tg3reg_len;
3239 int i, err, pm_cap;
3241 memset(tp, 0, sizeof(*tp));
3243 adjust_pci_device(pdev);
3245 nic->irqno = 0;
3246 nic->ioaddr = pdev->ioaddr;
3248 /* Find power-management capability. */
3249 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3250 if (pm_cap == 0) {
3251 printf("Cannot find PowerManagement capability, aborting.\n");
3252 return 0;
3254 tg3reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
3255 if (tg3reg_base == -1UL) {
3256 printf("Unuseable bar\n");
3257 return 0;
3259 tg3reg_len = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
3261 tp->pdev = pdev;
3262 tp->nic = nic;
3263 tp->pm_cap = pm_cap;
3264 tp->rx_mode = 0;
3265 tp->tx_mode = 0;
3266 tp->mi_mode = MAC_MI_MODE_BASE;
3267 tp->tg3_flags = 0 & ~TG3_FLAG_INIT_COMPLETE;
3269 /* The word/byte swap controls here control register access byte
3270 * swapping. DMA data byte swapping is controlled in the GRC_MODE
3271 * setting below.
3273 tp->misc_host_ctrl =
3274 MISC_HOST_CTRL_MASK_PCI_INT |
3275 MISC_HOST_CTRL_WORD_SWAP |
3276 MISC_HOST_CTRL_INDIR_ACCESS |
3277 MISC_HOST_CTRL_PCISTATE_RW;
3279 /* The NONFRM (non-frame) byte/word swap controls take effect
3280 * on descriptor entries, anything which isn't packet data.
3282 * The StrongARM chips on the board (one for tx, one for rx)
3283 * are running in big-endian mode.
3285 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
3286 GRC_MODE_WSWAP_NONFRM_DATA);
3287 #if __BYTE_ORDER == __BIG_ENDIAN
3288 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
3289 #endif
3290 tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
3291 if (tp->regs == 0UL) {
3292 printf("Cannot map device registers, aborting\n");
3293 return 0;
3296 tg3_init_link_config(tp);
3298 err = tg3_get_invariants(tp);
3299 if (err) {
3300 printf("Problem fetching invariants of chip, aborting.\n");
3301 goto err_out_iounmap;
3304 err = tg3_get_device_address(tp);
3305 if (err) {
3306 printf("Could not obtain valid ethernet address, aborting.\n");
3307 goto err_out_iounmap;
3310 DBG ( "Ethernet addr: %s\n", eth_ntoa ( nic->node_addr ) );
3312 tg3_setup_dma(tp);
3314 /* Now that we have fully setup the chip, save away a snapshot
3315 * of the PCI config space. We need to restore this after
3316 * GRC_MISC_CFG core clock resets and some resume events.
3318 pci_save_state(tp->pdev, tp->pci_cfg_state);
3320 printf("Tigon3 [partno(%s) rev %hx PHY(%s)] (PCI%s:%s:%s)\n",
3321 tp->board_part_number,
3322 tp->pci_chip_rev_id,
3323 tg3_phy_string(tp),
3324 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
3325 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
3326 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
3327 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
3328 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"));
3331 err = tg3_setup_hw(tp);
3332 if (err) {
3333 goto err_out_disable;
3335 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
3337 /* Wait for a reasonable time for the link to come up */
3338 tg3_poll_link(tp);
3339 for(i = 0; !tp->carrier_ok && (i < VALID_LINK_TIMEOUT*100); i++) {
3340 mdelay(1);
3341 tg3_poll_link(tp);
3343 if (!tp->carrier_ok){
3344 printf("Valid link not established\n");
3345 goto err_out_disable;
3348 nic->nic_op = &tg3_operations;
3349 return 1;
3351 err_out_iounmap:
3352 iounmap((void *)tp->regs);
3353 return 0;
3354 err_out_disable:
3355 tg3_disable(nic);
3356 return 0;
3360 static struct pci_device_id tg3_nics[] = {
3361 PCI_ROM(0x14e4, 0x1644, "tg3-5700", "Broadcom Tigon 3 5700"),
3362 PCI_ROM(0x14e4, 0x1645, "tg3-5701", "Broadcom Tigon 3 5701"),
3363 PCI_ROM(0x14e4, 0x1646, "tg3-5702", "Broadcom Tigon 3 5702"),
3364 PCI_ROM(0x14e4, 0x1647, "tg3-5703", "Broadcom Tigon 3 5703"),
3365 PCI_ROM(0x14e4, 0x1648, "tg3-5704", "Broadcom Tigon 3 5704"),
3366 PCI_ROM(0x14e4, 0x164d, "tg3-5702FE", "Broadcom Tigon 3 5702FE"),
3367 PCI_ROM(0x14e4, 0x1653, "tg3-5705", "Broadcom Tigon 3 5705"),
3368 PCI_ROM(0x14e4, 0x1654, "tg3-5705_2", "Broadcom Tigon 3 5705_2"),
3369 PCI_ROM(0x14e4, 0x165d, "tg3-5705M", "Broadcom Tigon 3 5705M"),
3370 PCI_ROM(0x14e4, 0x165e, "tg3-5705M_2", "Broadcom Tigon 3 5705M_2"),
3371 PCI_ROM(0x14e4, 0x1677, "tg3-5751", "Broadcom Tigon 3 5751"),
3372 PCI_ROM(0x14e4, 0x1696, "tg3-5782", "Broadcom Tigon 3 5782"),
3373 PCI_ROM(0x14e4, 0x169c, "tg3-5788", "Broadcom Tigon 3 5788"),
3374 PCI_ROM(0x14e4, 0x16a6, "tg3-5702X", "Broadcom Tigon 3 5702X"),
3375 PCI_ROM(0x14e4, 0x16a7, "tg3-5703X", "Broadcom Tigon 3 5703X"),
3376 PCI_ROM(0x14e4, 0x16a8, "tg3-5704S", "Broadcom Tigon 3 5704S"),
3377 PCI_ROM(0x14e4, 0x16c6, "tg3-5702A3", "Broadcom Tigon 3 5702A3"),
3378 PCI_ROM(0x14e4, 0x16c7, "tg3-5703A3", "Broadcom Tigon 3 5703A3"),
3379 PCI_ROM(0x14e4, 0x170d, "tg3-5901", "Broadcom Tigon 3 5901"),
3380 PCI_ROM(0x14e4, 0x170e, "tg3-5901_2", "Broadcom Tigon 3 5901_2"),
3381 PCI_ROM(0x1148, 0x4400, "tg3-9DXX", "Syskonnect 9DXX"),
3382 PCI_ROM(0x1148, 0x4500, "tg3-9MXX", "Syskonnect 9MXX"),
3383 PCI_ROM(0x173b, 0x03e8, "tg3-ac1000", "Altima AC1000"),
3384 PCI_ROM(0x173b, 0x03e9, "tg3-ac1001", "Altima AC1001"),
3385 PCI_ROM(0x173b, 0x03ea, "tg3-ac9100", "Altima AC9100"),
3386 PCI_ROM(0x173b, 0x03eb, "tg3-ac1003", "Altima AC1003"),
3389 PCI_DRIVER ( tg3_driver, tg3_nics, PCI_NO_CLASS );
3391 DRIVER ( "TG3", nic_driver, pci_driver, tg3_driver,
3392 tg3_probe, tg3_disable );
3395 * Local variables:
3396 * c-basic-offset: 8
3397 * c-indent-level: 8
3398 * tab-width: 8
3399 * End: