[uri] Special case NULL in churi()
[gpxe.git] / src / drivers / net / sky2.c
blob00940afe6ec93c0f13823237641188cd736ef20d
1 /*
2 * gPXE driver for Marvell Yukon 2 chipset. Derived from Linux sky2 driver
3 * (v1.22), which was based on earlier sk98lin and skge drivers.
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
11 * Modified for gPXE, April 2009 by Joshua Oreman
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 FILE_LICENCE ( GPL2_ONLY );
29 #include <stdint.h>
30 #include <errno.h>
31 #include <stdio.h>
32 #include <unistd.h>
33 #include <gpxe/ethernet.h>
34 #include <gpxe/if_ether.h>
35 #include <gpxe/iobuf.h>
36 #include <gpxe/malloc.h>
37 #include <gpxe/pci.h>
38 #include <byteswap.h>
39 #include <mii.h>
41 #include "sky2.h"
43 #define DRV_NAME "sky2"
44 #define DRV_VERSION "1.22"
45 #define PFX DRV_NAME " "
48 * The Yukon II chipset takes 64 bit command blocks (called list elements)
49 * that are organized into three (receive, transmit, status) different rings
50 * similar to Tigon3.
52 * Each ring start must be aligned to a 4k boundary. You will get mysterious
53 * "invalid LE" errors if they're not.
55 * The card silently forces each ring size to be at least 128. If you
56 * act as though one of them is smaller (by setting the below
57 * #defines) you'll get bad bugs.
60 #define RX_LE_SIZE 128
61 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
62 #define RX_RING_ALIGN 4096
63 #define RX_PENDING (RX_LE_SIZE/6 - 2)
65 #define TX_RING_SIZE 128
66 #define TX_PENDING (TX_RING_SIZE - 1)
67 #define TX_RING_ALIGN 4096
68 #define MAX_SKB_TX_LE 4
70 #define STATUS_RING_SIZE 512 /* 2 ports * (TX + RX) */
71 #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
72 #define STATUS_RING_ALIGN 4096
73 #define PHY_RETRIES 1000
75 #define SKY2_EEPROM_MAGIC 0x9955aabb
78 #define RING_NEXT(x,s) (((x)+1) & ((s)-1))
80 static struct pci_device_id sky2_id_table[] = {
81 PCI_ROM(0x1148, 0x9000, "sk9sxx", "Syskonnect SK-9Sxx", 0),
82 PCI_ROM(0x1148, 0x9e00, "sk9exx", "Syskonnect SK-9Exx", 0),
83 PCI_ROM(0x1186, 0x4b00, "dge560t", "D-Link DGE-560T", 0),
84 PCI_ROM(0x1186, 0x4001, "dge550sx", "D-Link DGE-550SX", 0),
85 PCI_ROM(0x1186, 0x4b02, "dge560sx", "D-Link DGE-560SX", 0),
86 PCI_ROM(0x1186, 0x4b03, "dge550t", "D-Link DGE-550T", 0),
87 PCI_ROM(0x11ab, 0x4340, "m88e8021", "Marvell 88E8021", 0),
88 PCI_ROM(0x11ab, 0x4341, "m88e8022", "Marvell 88E8022", 0),
89 PCI_ROM(0x11ab, 0x4342, "m88e8061", "Marvell 88E8061", 0),
90 PCI_ROM(0x11ab, 0x4343, "m88e8062", "Marvell 88E8062", 0),
91 PCI_ROM(0x11ab, 0x4344, "m88e8021b", "Marvell 88E8021", 0),
92 PCI_ROM(0x11ab, 0x4345, "m88e8022b", "Marvell 88E8022", 0),
93 PCI_ROM(0x11ab, 0x4346, "m88e8061b", "Marvell 88E8061", 0),
94 PCI_ROM(0x11ab, 0x4347, "m88e8062b", "Marvell 88E8062", 0),
95 PCI_ROM(0x11ab, 0x4350, "m88e8035", "Marvell 88E8035", 0),
96 PCI_ROM(0x11ab, 0x4351, "m88e8036", "Marvell 88E8036", 0),
97 PCI_ROM(0x11ab, 0x4352, "m88e8038", "Marvell 88E8038", 0),
98 PCI_ROM(0x11ab, 0x4353, "m88e8039", "Marvell 88E8039", 0),
99 PCI_ROM(0x11ab, 0x4354, "m88e8040", "Marvell 88E8040", 0),
100 PCI_ROM(0x11ab, 0x4355, "m88e8040t", "Marvell 88E8040T", 0),
101 PCI_ROM(0x11ab, 0x4356, "m88ec033", "Marvel 88EC033", 0),
102 PCI_ROM(0x11ab, 0x4357, "m88e8042", "Marvell 88E8042", 0),
103 PCI_ROM(0x11ab, 0x435a, "m88e8048", "Marvell 88E8048", 0),
104 PCI_ROM(0x11ab, 0x4360, "m88e8052", "Marvell 88E8052", 0),
105 PCI_ROM(0x11ab, 0x4361, "m88e8050", "Marvell 88E8050", 0),
106 PCI_ROM(0x11ab, 0x4362, "m88e8053", "Marvell 88E8053", 0),
107 PCI_ROM(0x11ab, 0x4363, "m88e8055", "Marvell 88E8055", 0),
108 PCI_ROM(0x11ab, 0x4364, "m88e8056", "Marvell 88E8056", 0),
109 PCI_ROM(0x11ab, 0x4365, "m88e8070", "Marvell 88E8070", 0),
110 PCI_ROM(0x11ab, 0x4366, "m88ec036", "Marvell 88EC036", 0),
111 PCI_ROM(0x11ab, 0x4367, "m88ec032", "Marvell 88EC032", 0),
112 PCI_ROM(0x11ab, 0x4368, "m88ec034", "Marvell 88EC034", 0),
113 PCI_ROM(0x11ab, 0x4369, "m88ec042", "Marvell 88EC042", 0),
114 PCI_ROM(0x11ab, 0x436a, "m88e8058", "Marvell 88E8058", 0),
115 PCI_ROM(0x11ab, 0x436b, "m88e8071", "Marvell 88E8071", 0),
116 PCI_ROM(0x11ab, 0x436c, "m88e8072", "Marvell 88E8072", 0),
117 PCI_ROM(0x11ab, 0x436d, "m88e8055b", "Marvell 88E8055", 0),
118 PCI_ROM(0x11ab, 0x4370, "m88e8075", "Marvell 88E8075", 0),
119 PCI_ROM(0x11ab, 0x4380, "m88e8057", "Marvell 88E8057", 0)
122 /* Avoid conditionals by using array */
123 static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
124 static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
125 static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
127 static void sky2_set_multicast(struct net_device *dev);
129 /* Access to PHY via serial interconnect */
130 static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
132 int i;
134 gma_write16(hw, port, GM_SMI_DATA, val);
135 gma_write16(hw, port, GM_SMI_CTRL,
136 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
138 for (i = 0; i < PHY_RETRIES; i++) {
139 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
140 if (ctrl == 0xffff)
141 goto io_error;
143 if (!(ctrl & GM_SMI_CT_BUSY))
144 return 0;
146 udelay(10);
149 DBG(PFX "%s: phy write timeout\n", hw->dev[port]->name);
150 return -ETIMEDOUT;
152 io_error:
153 DBG(PFX "%s: phy I/O error\n", hw->dev[port]->name);
154 return -EIO;
157 static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
159 int i;
161 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
162 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
164 for (i = 0; i < PHY_RETRIES; i++) {
165 u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
166 if (ctrl == 0xffff)
167 goto io_error;
169 if (ctrl & GM_SMI_CT_RD_VAL) {
170 *val = gma_read16(hw, port, GM_SMI_DATA);
171 return 0;
174 udelay(10);
177 DBG(PFX "%s: phy read timeout\n", hw->dev[port]->name);
178 return -ETIMEDOUT;
179 io_error:
180 DBG(PFX "%s: phy I/O error\n", hw->dev[port]->name);
181 return -EIO;
184 static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
186 u16 v = 0;
187 __gm_phy_read(hw, port, reg, &v);
188 return v;
192 static void sky2_power_on(struct sky2_hw *hw)
194 /* switch power to VCC (WA for VAUX problem) */
195 sky2_write8(hw, B0_POWER_CTRL,
196 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
198 /* disable Core Clock Division, */
199 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
201 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
202 /* enable bits are inverted */
203 sky2_write8(hw, B2_Y2_CLK_GATE,
204 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
205 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
206 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
207 else
208 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
210 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
211 u32 reg;
213 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
215 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
216 /* set all bits to 0 except bits 15..12 and 8 */
217 reg &= P_ASPM_CONTROL_MSK;
218 sky2_pci_write32(hw, PCI_DEV_REG4, reg);
220 reg = sky2_pci_read32(hw, PCI_DEV_REG5);
221 /* set all bits to 0 except bits 28 & 27 */
222 reg &= P_CTL_TIM_VMAIN_AV_MSK;
223 sky2_pci_write32(hw, PCI_DEV_REG5, reg);
225 sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
227 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
228 reg = sky2_read32(hw, B2_GP_IO);
229 reg |= GLB_GPIO_STAT_RACE_DIS;
230 sky2_write32(hw, B2_GP_IO, reg);
232 sky2_read32(hw, B2_GP_IO);
236 static void sky2_power_aux(struct sky2_hw *hw)
238 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
239 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
240 else
241 /* enable bits are inverted */
242 sky2_write8(hw, B2_Y2_CLK_GATE,
243 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
244 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
245 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
247 /* switch power to VAUX */
248 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
249 sky2_write8(hw, B0_POWER_CTRL,
250 (PC_VAUX_ENA | PC_VCC_ENA |
251 PC_VAUX_ON | PC_VCC_OFF));
254 static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
256 u16 reg;
258 /* disable all GMAC IRQ's */
259 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
261 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
262 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
263 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
264 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
266 reg = gma_read16(hw, port, GM_RX_CTRL);
267 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
268 gma_write16(hw, port, GM_RX_CTRL, reg);
271 /* flow control to advertise bits */
272 static const u16 copper_fc_adv[] = {
273 [FC_NONE] = 0,
274 [FC_TX] = PHY_M_AN_ASP,
275 [FC_RX] = PHY_M_AN_PC,
276 [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
279 /* flow control to advertise bits when using 1000BaseX */
280 static const u16 fiber_fc_adv[] = {
281 [FC_NONE] = PHY_M_P_NO_PAUSE_X,
282 [FC_TX] = PHY_M_P_ASYM_MD_X,
283 [FC_RX] = PHY_M_P_SYM_MD_X,
284 [FC_BOTH] = PHY_M_P_BOTH_MD_X,
287 /* flow control to GMA disable bits */
288 static const u16 gm_fc_disable[] = {
289 [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
290 [FC_TX] = GM_GPCR_FC_RX_DIS,
291 [FC_RX] = GM_GPCR_FC_TX_DIS,
292 [FC_BOTH] = 0,
296 static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
298 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
299 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
301 if (sky2->autoneg == AUTONEG_ENABLE &&
302 !(hw->flags & SKY2_HW_NEWER_PHY)) {
303 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
305 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
306 PHY_M_EC_MAC_S_MSK);
307 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
309 /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
310 if (hw->chip_id == CHIP_ID_YUKON_EC)
311 /* set downshift counter to 3x and enable downshift */
312 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
313 else
314 /* set master & slave downshift counter to 1x */
315 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
317 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
320 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
321 if (sky2_is_copper(hw)) {
322 if (!(hw->flags & SKY2_HW_GIGABIT)) {
323 /* enable automatic crossover */
324 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
326 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
327 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
328 u16 spec;
330 /* Enable Class A driver for FE+ A0 */
331 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
332 spec |= PHY_M_FESC_SEL_CL_A;
333 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
335 } else {
336 /* disable energy detect */
337 ctrl &= ~PHY_M_PC_EN_DET_MSK;
339 /* enable automatic crossover */
340 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
342 /* downshift on PHY 88E1112 and 88E1149 is changed */
343 if (sky2->autoneg == AUTONEG_ENABLE
344 && (hw->flags & SKY2_HW_NEWER_PHY)) {
345 /* set downshift counter to 3x and enable downshift */
346 ctrl &= ~PHY_M_PC_DSC_MSK;
347 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
350 } else {
351 /* workaround for deviation #4.88 (CRC errors) */
352 /* disable Automatic Crossover */
354 ctrl &= ~PHY_M_PC_MDIX_MSK;
357 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
359 /* special setup for PHY 88E1112 Fiber */
360 if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
361 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
363 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
364 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
365 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
366 ctrl &= ~PHY_M_MAC_MD_MSK;
367 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
368 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
370 if (hw->pmd_type == 'P') {
371 /* select page 1 to access Fiber registers */
372 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
374 /* for SFP-module set SIGDET polarity to low */
375 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
376 ctrl |= PHY_M_FIB_SIGD_POL;
377 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
380 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
383 ctrl = PHY_CT_RESET;
384 ct1000 = 0;
385 adv = PHY_AN_CSMA;
386 reg = 0;
388 if (sky2->autoneg == AUTONEG_ENABLE) {
389 if (sky2_is_copper(hw)) {
390 if (sky2->advertising & ADVERTISED_1000baseT_Full)
391 ct1000 |= PHY_M_1000C_AFD;
392 if (sky2->advertising & ADVERTISED_1000baseT_Half)
393 ct1000 |= PHY_M_1000C_AHD;
394 if (sky2->advertising & ADVERTISED_100baseT_Full)
395 adv |= PHY_M_AN_100_FD;
396 if (sky2->advertising & ADVERTISED_100baseT_Half)
397 adv |= PHY_M_AN_100_HD;
398 if (sky2->advertising & ADVERTISED_10baseT_Full)
399 adv |= PHY_M_AN_10_FD;
400 if (sky2->advertising & ADVERTISED_10baseT_Half)
401 adv |= PHY_M_AN_10_HD;
403 adv |= copper_fc_adv[sky2->flow_mode];
404 } else { /* special defines for FIBER (88E1040S only) */
405 if (sky2->advertising & ADVERTISED_1000baseT_Full)
406 adv |= PHY_M_AN_1000X_AFD;
407 if (sky2->advertising & ADVERTISED_1000baseT_Half)
408 adv |= PHY_M_AN_1000X_AHD;
410 adv |= fiber_fc_adv[sky2->flow_mode];
413 /* Restart Auto-negotiation */
414 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
415 } else {
416 /* forced speed/duplex settings */
417 ct1000 = PHY_M_1000C_MSE;
419 /* Disable auto update for duplex flow control and speed */
420 reg |= GM_GPCR_AU_ALL_DIS;
422 switch (sky2->speed) {
423 case SPEED_1000:
424 ctrl |= PHY_CT_SP1000;
425 reg |= GM_GPCR_SPEED_1000;
426 break;
427 case SPEED_100:
428 ctrl |= PHY_CT_SP100;
429 reg |= GM_GPCR_SPEED_100;
430 break;
433 if (sky2->duplex == DUPLEX_FULL) {
434 reg |= GM_GPCR_DUP_FULL;
435 ctrl |= PHY_CT_DUP_MD;
436 } else if (sky2->speed < SPEED_1000)
437 sky2->flow_mode = FC_NONE;
440 reg |= gm_fc_disable[sky2->flow_mode];
442 /* Forward pause packets to GMAC? */
443 if (sky2->flow_mode & FC_RX)
444 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
445 else
446 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
449 gma_write16(hw, port, GM_GP_CTRL, reg);
451 if (hw->flags & SKY2_HW_GIGABIT)
452 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
454 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
455 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
457 /* Setup Phy LED's */
458 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
459 ledover = 0;
461 switch (hw->chip_id) {
462 case CHIP_ID_YUKON_FE:
463 /* on 88E3082 these bits are at 11..9 (shifted left) */
464 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
466 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
468 /* delete ACT LED control bits */
469 ctrl &= ~PHY_M_FELP_LED1_MSK;
470 /* change ACT LED control to blink mode */
471 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
472 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
473 break;
475 case CHIP_ID_YUKON_FE_P:
476 /* Enable Link Partner Next Page */
477 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
478 ctrl |= PHY_M_PC_ENA_LIP_NP;
480 /* disable Energy Detect and enable scrambler */
481 ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
482 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
484 /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
485 ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
486 PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
487 PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
489 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
490 break;
492 case CHIP_ID_YUKON_XL:
493 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
495 /* select page 3 to access LED control register */
496 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
498 /* set LED Function Control register */
499 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
500 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
501 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
502 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
503 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
505 /* set Polarity Control register */
506 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
507 (PHY_M_POLC_LS1_P_MIX(4) |
508 PHY_M_POLC_IS0_P_MIX(4) |
509 PHY_M_POLC_LOS_CTRL(2) |
510 PHY_M_POLC_INIT_CTRL(2) |
511 PHY_M_POLC_STA1_CTRL(2) |
512 PHY_M_POLC_STA0_CTRL(2)));
514 /* restore page register */
515 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
516 break;
518 case CHIP_ID_YUKON_EC_U:
519 case CHIP_ID_YUKON_EX:
520 case CHIP_ID_YUKON_SUPR:
521 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
523 /* select page 3 to access LED control register */
524 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
526 /* set LED Function Control register */
527 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
528 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
529 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
530 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
531 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
533 /* set Blink Rate in LED Timer Control Register */
534 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
535 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
536 /* restore page register */
537 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
538 break;
540 default:
541 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
542 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
544 /* turn off the Rx LED (LED_RX) */
545 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
548 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
549 /* apply fixes in PHY AFE */
550 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
552 /* increase differential signal amplitude in 10BASE-T */
553 gm_phy_write(hw, port, 0x18, 0xaa99);
554 gm_phy_write(hw, port, 0x17, 0x2011);
556 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
557 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
558 gm_phy_write(hw, port, 0x18, 0xa204);
559 gm_phy_write(hw, port, 0x17, 0x2002);
562 /* set page register to 0 */
563 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
564 } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
565 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
566 /* apply workaround for integrated resistors calibration */
567 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
568 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
569 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
570 hw->chip_id < CHIP_ID_YUKON_SUPR) {
571 /* no effect on Yukon-XL */
572 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
574 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
575 /* turn on 100 Mbps LED (LED_LINK100) */
576 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
579 if (ledover)
580 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
584 /* Enable phy interrupt on auto-negotiation complete (or link up) */
585 if (sky2->autoneg == AUTONEG_ENABLE)
586 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
587 else
588 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
591 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
592 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
594 static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
596 u32 reg1;
598 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
599 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
600 reg1 &= ~phy_power[port];
602 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
603 reg1 |= coma_mode[port];
605 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
606 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
607 sky2_pci_read32(hw, PCI_DEV_REG1);
609 if (hw->chip_id == CHIP_ID_YUKON_FE)
610 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
611 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
612 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
615 static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
617 u32 reg1;
618 u16 ctrl;
620 /* release GPHY Control reset */
621 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
623 /* release GMAC reset */
624 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
626 if (hw->flags & SKY2_HW_NEWER_PHY) {
627 /* select page 2 to access MAC control register */
628 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
630 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
631 /* allow GMII Power Down */
632 ctrl &= ~PHY_M_MAC_GMIF_PUP;
633 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
635 /* set page register back to 0 */
636 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
639 /* setup General Purpose Control Register */
640 gma_write16(hw, port, GM_GP_CTRL,
641 GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
643 if (hw->chip_id != CHIP_ID_YUKON_EC) {
644 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
645 /* select page 2 to access MAC control register */
646 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
648 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
649 /* enable Power Down */
650 ctrl |= PHY_M_PC_POW_D_ENA;
651 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
653 /* set page register back to 0 */
654 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
657 /* set IEEE compatible Power Down Mode (dev. #4.99) */
658 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
661 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
662 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
663 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
664 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
665 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
668 static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
670 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
671 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
672 hw->chip_id == CHIP_ID_YUKON_FE_P ||
673 hw->chip_id == CHIP_ID_YUKON_SUPR) {
674 /* disable jumbo frames on devices that support them */
675 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
676 TX_JUMBO_DIS | TX_STFW_ENA);
677 } else {
678 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
682 static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
684 u16 reg;
685 u32 rx_reg;
686 int i;
687 const u8 *addr = hw->dev[port]->ll_addr;
689 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
690 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
692 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
694 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
695 /* WA DEV_472 -- looks like crossed wires on port 2 */
696 /* clear GMAC 1 Control reset */
697 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
698 do {
699 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
700 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
701 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
702 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
703 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
706 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
708 /* Enable Transmit FIFO Underrun */
709 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
711 sky2_phy_power_up(hw, port);
712 sky2_phy_init(hw, port);
714 /* MIB clear */
715 reg = gma_read16(hw, port, GM_PHY_ADDR);
716 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
718 for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
719 gma_read16(hw, port, i);
720 gma_write16(hw, port, GM_PHY_ADDR, reg);
722 /* transmit control */
723 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
725 /* receive control reg: unicast + multicast + no FCS */
726 gma_write16(hw, port, GM_RX_CTRL,
727 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
729 /* transmit flow control */
730 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
732 /* transmit parameter */
733 gma_write16(hw, port, GM_TX_PARAM,
734 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
735 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
736 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
737 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
739 /* serial mode register */
740 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
741 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
743 gma_write16(hw, port, GM_SERIAL_MODE, reg);
745 /* virtual address for data */
746 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
748 /* physical address: used for pause frames */
749 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
751 /* ignore counter overflows */
752 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
753 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
754 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
756 /* Configure Rx MAC FIFO */
757 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
758 rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
759 if (hw->chip_id == CHIP_ID_YUKON_EX ||
760 hw->chip_id == CHIP_ID_YUKON_FE_P)
761 rx_reg |= GMF_RX_OVER_ON;
763 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
765 if (hw->chip_id == CHIP_ID_YUKON_XL) {
766 /* Hardware errata - clear flush mask */
767 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
768 } else {
769 /* Flush Rx MAC FIFO on any flow control or error */
770 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
773 /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
774 reg = RX_GMF_FL_THR_DEF + 1;
775 /* Another magic mystery workaround from sk98lin */
776 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
777 hw->chip_rev == CHIP_REV_YU_FE2_A0)
778 reg = 0x178;
779 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
781 /* Configure Tx MAC FIFO */
782 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
783 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
785 /* On chips without ram buffer, pause is controled by MAC level */
786 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
787 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
788 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
790 sky2_set_tx_stfwd(hw, port);
793 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
794 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
795 /* disable dynamic watermark */
796 reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
797 reg &= ~TX_DYN_WM_ENA;
798 sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
802 /* Assign Ram Buffer allocation to queue */
803 static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
805 u32 end;
807 /* convert from K bytes to qwords used for hw register */
808 start *= 1024/8;
809 space *= 1024/8;
810 end = start + space - 1;
812 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
813 sky2_write32(hw, RB_ADDR(q, RB_START), start);
814 sky2_write32(hw, RB_ADDR(q, RB_END), end);
815 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
816 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
818 if (q == Q_R1 || q == Q_R2) {
819 u32 tp = space - space/4;
821 /* On receive queue's set the thresholds
822 * give receiver priority when > 3/4 full
823 * send pause when down to 2K
825 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
826 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
828 tp = space - 2048/8;
829 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
830 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
831 } else {
832 /* Enable store & forward on Tx queue's because
833 * Tx FIFO is only 1K on Yukon
835 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
838 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
839 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
842 /* Setup Bus Memory Interface */
843 static void sky2_qset(struct sky2_hw *hw, u16 q)
845 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
846 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
847 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
848 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
851 /* Setup prefetch unit registers. This is the interface between
852 * hardware and driver list elements
854 static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
855 u64 addr, u32 last)
857 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
858 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
859 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
860 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
861 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
862 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
864 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
867 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
869 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
871 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
872 le->ctrl = 0;
873 return le;
876 static void tx_init(struct sky2_port *sky2)
878 struct sky2_tx_le *le;
880 sky2->tx_prod = sky2->tx_cons = 0;
882 le = get_tx_le(sky2);
883 le->addr = 0;
884 le->opcode = OP_ADDR64 | HW_OWNER;
887 static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
888 struct sky2_tx_le *le)
890 return sky2->tx_ring + (le - sky2->tx_le);
893 /* Update chip's next pointer */
894 static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
896 /* Make sure write' to descriptors are complete before we tell hardware */
897 wmb();
898 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
899 DBGIO(PFX "queue %#x idx <- %d\n", q, idx);
903 static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
905 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
907 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
908 le->ctrl = 0;
909 return le;
912 /* Build description to hardware for one receive segment */
913 static void sky2_rx_add(struct sky2_port *sky2, u8 op,
914 u32 map, unsigned len)
916 struct sky2_rx_le *le;
918 le = sky2_next_rx(sky2);
919 le->addr = cpu_to_le32(map);
920 le->length = cpu_to_le16(len);
921 le->opcode = op | HW_OWNER;
924 /* Build description to hardware for one possibly fragmented skb */
925 static void sky2_rx_submit(struct sky2_port *sky2,
926 const struct rx_ring_info *re)
928 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
932 static void sky2_rx_map_iob(struct pci_device *pdev __unused,
933 struct rx_ring_info *re,
934 unsigned size __unused)
936 struct io_buffer *iob = re->iob;
937 re->data_addr = virt_to_bus(iob->data);
940 /* Diable the checksum offloading.
942 static void rx_set_checksum(struct sky2_port *sky2)
944 struct sky2_rx_le *le = sky2_next_rx(sky2);
946 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
947 le->ctrl = 0;
948 le->opcode = OP_TCPSTART | HW_OWNER;
950 sky2_write32(sky2->hw,
951 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
952 BMU_DIS_RX_CHKSUM);
956 * The RX Stop command will not work for Yukon-2 if the BMU does not
957 * reach the end of packet and since we can't make sure that we have
958 * incoming data, we must reset the BMU while it is not doing a DMA
959 * transfer. Since it is possible that the RX path is still active,
960 * the RX RAM buffer will be stopped first, so any possible incoming
961 * data will not trigger a DMA. After the RAM buffer is stopped, the
962 * BMU is polled until any DMA in progress is ended and only then it
963 * will be reset.
965 static void sky2_rx_stop(struct sky2_port *sky2)
967 struct sky2_hw *hw = sky2->hw;
968 unsigned rxq = rxqaddr[sky2->port];
969 int i;
971 /* disable the RAM Buffer receive queue */
972 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
974 for (i = 0; i < 0xffff; i++)
975 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
976 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
977 goto stopped;
979 DBG(PFX "%s: receiver stop failed\n", sky2->netdev->name);
980 stopped:
981 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
983 /* reset the Rx prefetch unit */
984 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
985 wmb();
988 /* Clean out receive buffer area, assumes receiver hardware stopped */
989 static void sky2_rx_clean(struct sky2_port *sky2)
991 unsigned i;
993 memset(sky2->rx_le, 0, RX_LE_BYTES);
994 for (i = 0; i < RX_PENDING; i++) {
995 struct rx_ring_info *re = sky2->rx_ring + i;
997 if (re->iob) {
998 free_iob(re->iob);
999 re->iob = NULL;
1005 * Allocate an iob for receiving.
1007 static struct io_buffer *sky2_rx_alloc(struct sky2_port *sky2)
1009 struct io_buffer *iob;
1011 iob = alloc_iob(sky2->rx_data_size + ETH_DATA_ALIGN);
1012 if (!iob)
1013 return NULL;
1016 * Cards with a RAM buffer hang in the rx FIFO if the
1017 * receive buffer isn't aligned to (Linux module comments say
1018 * 64 bytes, Linux module code says 8 bytes). Since io_buffers
1019 * are always 2kb-aligned under gPXE, just leave it be
1020 * without ETH_DATA_ALIGN in those cases.
1022 * XXX This causes unaligned access to the IP header,
1023 * which is undesirable, but it's less undesirable than the
1024 * card hanging.
1026 if (!(sky2->hw->flags & SKY2_HW_RAM_BUFFER)) {
1027 iob_reserve(iob, ETH_DATA_ALIGN);
1030 return iob;
1033 static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1035 sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1039 * Allocate and setup receiver buffer pool.
1040 * Normal case this ends up creating one list element for skb
1041 * in the receive ring. One element is used for checksum
1042 * enable/disable, and one extra to avoid wrap.
1044 static int sky2_rx_start(struct sky2_port *sky2)
1046 struct sky2_hw *hw = sky2->hw;
1047 struct rx_ring_info *re;
1048 unsigned rxq = rxqaddr[sky2->port];
1049 unsigned i, size, thresh;
1051 sky2->rx_put = sky2->rx_next = 0;
1052 sky2_qset(hw, rxq);
1054 /* On PCI express lowering the watermark gives better performance */
1055 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
1056 sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1058 /* These chips have no ram buffer?
1059 * MAC Rx RAM Read is controlled by hardware */
1060 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1061 (hw->chip_rev == CHIP_REV_YU_EC_U_A1
1062 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1063 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1065 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1067 if (!(hw->flags & SKY2_HW_NEW_LE))
1068 rx_set_checksum(sky2);
1070 /* Space needed for frame data + headers rounded up */
1071 size = (ETH_FRAME_LEN + 8) & ~7;
1073 /* Stopping point for hardware truncation */
1074 thresh = (size - 8) / sizeof(u32);
1076 sky2->rx_data_size = size;
1078 /* Fill Rx ring */
1079 for (i = 0; i < RX_PENDING; i++) {
1080 re = sky2->rx_ring + i;
1082 re->iob = sky2_rx_alloc(sky2);
1083 if (!re->iob)
1084 goto nomem;
1086 sky2_rx_map_iob(hw->pdev, re, sky2->rx_data_size);
1087 sky2_rx_submit(sky2, re);
1091 * The receiver hangs if it receives frames larger than the
1092 * packet buffer. As a workaround, truncate oversize frames, but
1093 * the register is limited to 9 bits, so if you do frames > 2052
1094 * you better get the MTU right!
1096 if (thresh > 0x1ff)
1097 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1098 else {
1099 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1100 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1103 /* Tell chip about available buffers */
1104 sky2_rx_update(sky2, rxq);
1105 return 0;
1106 nomem:
1107 sky2_rx_clean(sky2);
1108 return -ENOMEM;
1111 /* Free the le and ring buffers */
1112 static void sky2_free_rings(struct sky2_port *sky2)
1114 free_dma(sky2->rx_le, RX_LE_BYTES);
1115 free(sky2->rx_ring);
1117 free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
1118 free(sky2->tx_ring);
1120 sky2->tx_le = NULL;
1121 sky2->rx_le = NULL;
1123 sky2->rx_ring = NULL;
1124 sky2->tx_ring = NULL;
1127 /* Bring up network interface. */
1128 static int sky2_up(struct net_device *dev)
1130 struct sky2_port *sky2 = netdev_priv(dev);
1131 struct sky2_hw *hw = sky2->hw;
1132 unsigned port = sky2->port;
1133 u32 imask, ramsize;
1134 int err = -ENOMEM;
1136 netdev_link_down(dev);
1138 /* must be power of 2 */
1139 sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
1140 sky2->tx_le_map = virt_to_bus(sky2->tx_le);
1141 if (!sky2->tx_le)
1142 goto err_out;
1143 memset(sky2->tx_le, 0, TX_RING_SIZE * sizeof(struct sky2_tx_le));
1145 sky2->tx_ring = zalloc(TX_RING_SIZE * sizeof(struct tx_ring_info));
1146 if (!sky2->tx_ring)
1147 goto err_out;
1149 tx_init(sky2);
1151 sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN);
1152 sky2->rx_le_map = virt_to_bus(sky2->rx_le);
1153 if (!sky2->rx_le)
1154 goto err_out;
1155 memset(sky2->rx_le, 0, RX_LE_BYTES);
1157 sky2->rx_ring = zalloc(RX_PENDING * sizeof(struct rx_ring_info));
1158 if (!sky2->rx_ring)
1159 goto err_out;
1161 sky2_mac_init(hw, port);
1163 /* Register is number of 4K blocks on internal RAM buffer. */
1164 ramsize = sky2_read8(hw, B2_E_0) * 4;
1165 if (ramsize > 0) {
1166 u32 rxspace;
1168 hw->flags |= SKY2_HW_RAM_BUFFER;
1169 DBG2(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1170 if (ramsize < 16)
1171 rxspace = ramsize / 2;
1172 else
1173 rxspace = 8 + (2*(ramsize - 16))/3;
1175 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1176 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1178 /* Make sure SyncQ is disabled */
1179 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1180 RB_RST_SET);
1183 sky2_qset(hw, txqaddr[port]);
1185 /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1186 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1187 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1189 /* Set almost empty threshold */
1190 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1191 && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1192 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1194 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1195 TX_RING_SIZE - 1);
1197 err = sky2_rx_start(sky2);
1198 if (err)
1199 goto err_out;
1201 /* Enable interrupts from phy/mac for port */
1202 imask = sky2_read32(hw, B0_IMSK);
1203 imask |= portirq_msk[port];
1204 sky2_write32(hw, B0_IMSK, imask);
1206 DBGIO(PFX "%s: le bases: st %p [%x], rx %p [%x], tx %p [%x]\n",
1207 dev->name, hw->st_le, hw->st_dma, sky2->rx_le, sky2->rx_le_map,
1208 sky2->tx_le, sky2->tx_le_map);
1210 sky2_set_multicast(dev);
1211 return 0;
1213 err_out:
1214 sky2_free_rings(sky2);
1215 return err;
1218 /* Modular subtraction in ring */
1219 static inline int tx_dist(unsigned tail, unsigned head)
1221 return (head - tail) & (TX_RING_SIZE - 1);
1224 /* Number of list elements available for next tx */
1225 static inline int tx_avail(const struct sky2_port *sky2)
1227 return TX_PENDING - tx_dist(sky2->tx_cons, sky2->tx_prod);
1232 * Put one packet in ring for transmit.
1233 * A single packet can generate multiple list elements, and
1234 * the number of ring elements will probably be less than the number
1235 * of list elements used.
1237 static int sky2_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1239 struct sky2_port *sky2 = netdev_priv(dev);
1240 struct sky2_hw *hw = sky2->hw;
1241 struct sky2_tx_le *le = NULL;
1242 struct tx_ring_info *re;
1243 unsigned len;
1244 u32 mapping;
1245 u8 ctrl;
1247 if (tx_avail(sky2) < 1)
1248 return -EBUSY;
1250 len = iob_len(iob);
1251 mapping = virt_to_bus(iob->data);
1253 DBGIO(PFX "%s: tx queued, slot %d, len %d\n", dev->name,
1254 sky2->tx_prod, len);
1256 ctrl = 0;
1258 le = get_tx_le(sky2);
1259 le->addr = cpu_to_le32((u32) mapping);
1260 le->length = cpu_to_le16(len);
1261 le->ctrl = ctrl;
1262 le->opcode = (OP_PACKET | HW_OWNER);
1264 re = tx_le_re(sky2, le);
1265 re->iob = iob;
1267 le->ctrl |= EOP;
1269 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1271 return 0;
1275 * Free ring elements from starting at tx_cons until "done"
1277 * NB: the hardware will tell us about partial completion of multi-part
1278 * buffers so make sure not to free iob too early.
1280 static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1282 struct net_device *dev = sky2->netdev;
1283 unsigned idx;
1285 assert(done < TX_RING_SIZE);
1287 for (idx = sky2->tx_cons; idx != done;
1288 idx = RING_NEXT(idx, TX_RING_SIZE)) {
1289 struct sky2_tx_le *le = sky2->tx_le + idx;
1290 struct tx_ring_info *re = sky2->tx_ring + idx;
1292 if (le->ctrl & EOP) {
1293 DBGIO(PFX "%s: tx done %d\n", dev->name, idx);
1294 netdev_tx_complete(dev, re->iob);
1298 sky2->tx_cons = idx;
1299 mb();
1302 /* Cleanup all untransmitted buffers, assume transmitter not running */
1303 static void sky2_tx_clean(struct net_device *dev)
1305 struct sky2_port *sky2 = netdev_priv(dev);
1307 sky2_tx_complete(sky2, sky2->tx_prod);
1310 /* Network shutdown */
1311 static void sky2_down(struct net_device *dev)
1313 struct sky2_port *sky2 = netdev_priv(dev);
1314 struct sky2_hw *hw = sky2->hw;
1315 unsigned port = sky2->port;
1316 u16 ctrl;
1317 u32 imask;
1319 /* Never really got started! */
1320 if (!sky2->tx_le)
1321 return;
1323 DBG2(PFX "%s: disabling interface\n", dev->name);
1325 /* Disable port IRQ */
1326 imask = sky2_read32(hw, B0_IMSK);
1327 imask &= ~portirq_msk[port];
1328 sky2_write32(hw, B0_IMSK, imask);
1330 sky2_gmac_reset(hw, port);
1332 /* Stop transmitter */
1333 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1334 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1336 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1337 RB_RST_SET | RB_DIS_OP_MD);
1339 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1340 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1341 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1343 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1345 /* Workaround shared GMAC reset */
1346 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1347 && port == 0 && hw->dev[1]))
1348 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1350 /* Disable Force Sync bit and Enable Alloc bit */
1351 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1352 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1354 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1355 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1356 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1358 /* Reset the PCI FIFO of the async Tx queue */
1359 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1360 BMU_RST_SET | BMU_FIFO_RST);
1362 /* Reset the Tx prefetch units */
1363 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1364 PREF_UNIT_RST_SET);
1366 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1368 sky2_rx_stop(sky2);
1370 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1371 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1373 sky2_phy_power_down(hw, port);
1375 /* turn off LED's */
1376 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1378 sky2_tx_clean(dev);
1379 sky2_rx_clean(sky2);
1381 sky2_free_rings(sky2);
1383 return;
1386 static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1388 if (hw->flags & SKY2_HW_FIBRE_PHY)
1389 return SPEED_1000;
1391 if (!(hw->flags & SKY2_HW_GIGABIT)) {
1392 if (aux & PHY_M_PS_SPEED_100)
1393 return SPEED_100;
1394 else
1395 return SPEED_10;
1398 switch (aux & PHY_M_PS_SPEED_MSK) {
1399 case PHY_M_PS_SPEED_1000:
1400 return SPEED_1000;
1401 case PHY_M_PS_SPEED_100:
1402 return SPEED_100;
1403 default:
1404 return SPEED_10;
1408 static void sky2_link_up(struct sky2_port *sky2)
1410 struct sky2_hw *hw = sky2->hw;
1411 unsigned port = sky2->port;
1412 u16 reg;
1413 static const char *fc_name[] = {
1414 [FC_NONE] = "none",
1415 [FC_TX] = "tx",
1416 [FC_RX] = "rx",
1417 [FC_BOTH] = "both",
1420 /* enable Rx/Tx */
1421 reg = gma_read16(hw, port, GM_GP_CTRL);
1422 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1423 gma_write16(hw, port, GM_GP_CTRL, reg);
1425 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1427 netdev_link_up(sky2->netdev);
1429 /* Turn on link LED */
1430 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1431 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1433 DBG(PFX "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1434 sky2->netdev->name, sky2->speed,
1435 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1436 fc_name[sky2->flow_status]);
1439 static void sky2_link_down(struct sky2_port *sky2)
1441 struct sky2_hw *hw = sky2->hw;
1442 unsigned port = sky2->port;
1443 u16 reg;
1445 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1447 reg = gma_read16(hw, port, GM_GP_CTRL);
1448 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1449 gma_write16(hw, port, GM_GP_CTRL, reg);
1451 netdev_link_down(sky2->netdev);
1453 /* Turn on link LED */
1454 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1456 DBG(PFX "%s: Link is down.\n", sky2->netdev->name);
1458 sky2_phy_init(hw, port);
1461 static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1463 struct sky2_hw *hw = sky2->hw;
1464 unsigned port = sky2->port;
1465 u16 advert, lpa;
1467 advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1468 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1469 if (lpa & PHY_M_AN_RF) {
1470 DBG(PFX "%s: remote fault\n", sky2->netdev->name);
1471 return -1;
1474 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1475 DBG(PFX "%s: speed/duplex mismatch\n", sky2->netdev->name);
1476 return -1;
1479 sky2->speed = sky2_phy_speed(hw, aux);
1480 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1482 /* Since the pause result bits seem to in different positions on
1483 * different chips. look at registers.
1486 sky2->flow_status = FC_NONE;
1487 if (advert & ADVERTISE_PAUSE_CAP) {
1488 if (lpa & LPA_PAUSE_CAP)
1489 sky2->flow_status = FC_BOTH;
1490 else if (advert & ADVERTISE_PAUSE_ASYM)
1491 sky2->flow_status = FC_RX;
1492 } else if (advert & ADVERTISE_PAUSE_ASYM) {
1493 if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
1494 sky2->flow_status = FC_TX;
1497 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1498 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1499 sky2->flow_status = FC_NONE;
1501 if (sky2->flow_status & FC_TX)
1502 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1503 else
1504 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1506 return 0;
1509 /* Interrupt from PHY */
1510 static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1512 struct net_device *dev = hw->dev[port];
1513 struct sky2_port *sky2 = netdev_priv(dev);
1514 u16 istatus, phystat;
1516 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1517 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1519 DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1520 sky2->netdev->name, istatus, phystat);
1522 if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
1523 if (sky2_autoneg_done(sky2, phystat) == 0)
1524 sky2_link_up(sky2);
1525 return;
1528 if (istatus & PHY_M_IS_LSP_CHANGE)
1529 sky2->speed = sky2_phy_speed(hw, phystat);
1531 if (istatus & PHY_M_IS_DUP_CHANGE)
1532 sky2->duplex =
1533 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1535 if (istatus & PHY_M_IS_LST_CHANGE) {
1536 if (phystat & PHY_M_PS_LINK_UP)
1537 sky2_link_up(sky2);
1538 else
1539 sky2_link_down(sky2);
1543 /* Normal packet - take iob from ring element and put in a new one */
1544 static struct io_buffer *receive_new(struct sky2_port *sky2,
1545 struct rx_ring_info *re,
1546 unsigned int length)
1548 struct io_buffer *iob, *niob;
1549 unsigned hdr_space = sky2->rx_data_size;
1551 /* Don't be tricky about reusing pages (yet) */
1552 niob = sky2_rx_alloc(sky2);
1553 if (!niob)
1554 return NULL;
1556 iob = re->iob;
1558 re->iob = niob;
1559 sky2_rx_map_iob(sky2->hw->pdev, re, hdr_space);
1561 iob_put(iob, length);
1562 return iob;
1566 * Receive one packet.
1567 * For larger packets, get new buffer.
1569 static struct io_buffer *sky2_receive(struct net_device *dev,
1570 u16 length, u32 status)
1572 struct sky2_port *sky2 = netdev_priv(dev);
1573 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
1574 struct io_buffer *iob = NULL;
1575 u16 count = (status & GMR_FS_LEN) >> 16;
1577 DBGIO(PFX "%s: rx slot %d status 0x%x len %d\n",
1578 dev->name, sky2->rx_next, status, length);
1580 sky2->rx_next = (sky2->rx_next + 1) % RX_PENDING;
1582 /* This chip has hardware problems that generates bogus status.
1583 * So do only marginal checking and expect higher level protocols
1584 * to handle crap frames.
1586 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
1587 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
1588 length == count)
1589 goto okay;
1591 if (status & GMR_FS_ANY_ERR)
1592 goto error;
1594 if (!(status & GMR_FS_RX_OK))
1595 goto resubmit;
1597 /* if length reported by DMA does not match PHY, packet was truncated */
1598 if (length != count)
1599 goto len_error;
1601 okay:
1602 iob = receive_new(sky2, re, length);
1603 resubmit:
1604 sky2_rx_submit(sky2, re);
1606 return iob;
1608 len_error:
1609 /* Truncation of overlength packets
1610 causes PHY length to not match MAC length */
1611 DBG2(PFX "%s: rx length error: status %#x length %d\n",
1612 dev->name, status, length);
1614 /* Pass NULL as iob because we want to keep our iob in the
1615 ring for the next packet. */
1616 netdev_rx_err(dev, NULL, -EINVAL);
1617 goto resubmit;
1619 error:
1620 if (status & GMR_FS_RX_FF_OV) {
1621 DBG2(PFX "%s: FIFO overflow error\n", dev->name);
1622 netdev_rx_err(dev, NULL, -EBUSY);
1623 goto resubmit;
1626 DBG2(PFX "%s: rx error, status 0x%x length %d\n",
1627 dev->name, status, length);
1628 netdev_rx_err(dev, NULL, -EIO);
1630 goto resubmit;
1633 /* Transmit complete */
1634 static inline void sky2_tx_done(struct net_device *dev, u16 last)
1636 struct sky2_port *sky2 = netdev_priv(dev);
1638 sky2_tx_complete(sky2, last);
1641 /* Process status response ring */
1642 static void sky2_status_intr(struct sky2_hw *hw, u16 idx)
1644 unsigned rx[2] = { 0, 0 };
1646 rmb();
1647 do {
1648 struct sky2_port *sky2;
1649 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1650 unsigned port;
1651 struct net_device *dev;
1652 struct io_buffer *iob;
1653 u32 status;
1654 u16 length;
1655 u8 opcode = le->opcode;
1657 if (!(opcode & HW_OWNER))
1658 break;
1660 port = le->css & CSS_LINK_BIT;
1661 dev = hw->dev[port];
1662 sky2 = netdev_priv(dev);
1663 length = le16_to_cpu(le->length);
1664 status = le32_to_cpu(le->status);
1666 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1668 le->opcode = 0;
1669 switch (opcode & ~HW_OWNER) {
1670 case OP_RXSTAT:
1671 ++rx[port];
1672 iob = sky2_receive(dev, length, status);
1673 if (!iob) {
1674 netdev_rx_err(dev, NULL, -ENOMEM);
1675 break;
1678 netdev_rx(dev, iob);
1679 break;
1681 case OP_RXCHKS:
1682 DBG2(PFX "status OP_RXCHKS but checksum offloading disabled\n");
1683 break;
1685 case OP_TXINDEXLE:
1686 /* TX index reports status for both ports */
1687 assert(TX_RING_SIZE <= 0x1000);
1688 sky2_tx_done(hw->dev[0], status & 0xfff);
1689 if (hw->dev[1])
1690 sky2_tx_done(hw->dev[1],
1691 ((status >> 24) & 0xff)
1692 | (u16)(length & 0xf) << 8);
1693 break;
1695 default:
1696 DBG(PFX "unknown status opcode 0x%x\n", opcode);
1698 } while (hw->st_idx != idx);
1700 /* Fully processed status ring so clear irq */
1701 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1703 if (rx[0])
1704 sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1);
1706 if (rx[1])
1707 sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
1710 static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1712 struct net_device *dev = hw->dev[port];
1714 DBGIO(PFX "%s: hw error interrupt status 0x%x\n", dev->name, status);
1716 if (status & Y2_IS_PAR_RD1) {
1717 DBG(PFX "%s: ram data read parity error\n", dev->name);
1718 /* Clear IRQ */
1719 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1722 if (status & Y2_IS_PAR_WR1) {
1723 DBG(PFX "%s: ram data write parity error\n", dev->name);
1724 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1727 if (status & Y2_IS_PAR_MAC1) {
1728 DBG(PFX "%s: MAC parity error\n", dev->name);
1729 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1732 if (status & Y2_IS_PAR_RX1) {
1733 DBG(PFX "%s: RX parity error\n", dev->name);
1734 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1737 if (status & Y2_IS_TCP_TXA1) {
1738 DBG(PFX "%s: TCP segmentation error\n", dev->name);
1739 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1743 static void sky2_hw_intr(struct sky2_hw *hw)
1745 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1746 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
1748 status &= hwmsk;
1750 if (status & Y2_IS_TIST_OV)
1751 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1753 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1754 u16 pci_err;
1756 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1757 pci_err = sky2_pci_read16(hw, PCI_STATUS);
1758 DBG(PFX "PCI hardware error (0x%x)\n", pci_err);
1760 sky2_pci_write16(hw, PCI_STATUS,
1761 pci_err | PCI_STATUS_ERROR_BITS);
1762 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1765 if (status & Y2_IS_PCI_EXP) {
1766 /* PCI-Express uncorrectable Error occurred */
1767 u32 err;
1769 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1770 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
1771 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
1772 0xfffffffful);
1773 DBG(PFX "PCI-Express error (0x%x)\n", err);
1775 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
1776 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1779 if (status & Y2_HWE_L1_MASK)
1780 sky2_hw_error(hw, 0, status);
1781 status >>= 8;
1782 if (status & Y2_HWE_L1_MASK)
1783 sky2_hw_error(hw, 1, status);
1786 static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1788 struct net_device *dev = hw->dev[port];
1789 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1791 DBGIO(PFX "%s: mac interrupt status 0x%x\n", dev->name, status);
1793 if (status & GM_IS_RX_CO_OV)
1794 gma_read16(hw, port, GM_RX_IRQ_SRC);
1796 if (status & GM_IS_TX_CO_OV)
1797 gma_read16(hw, port, GM_TX_IRQ_SRC);
1799 if (status & GM_IS_RX_FF_OR) {
1800 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1803 if (status & GM_IS_TX_FF_UR) {
1804 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1808 /* This should never happen it is a bug. */
1809 static void sky2_le_error(struct sky2_hw *hw, unsigned port,
1810 u16 q, unsigned ring_size __unused)
1812 struct net_device *dev = hw->dev[port];
1813 struct sky2_port *sky2 = netdev_priv(dev);
1814 int idx;
1815 const u64 *le = (q == Q_R1 || q == Q_R2)
1816 ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
1818 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
1819 DBG(PFX "%s: descriptor error q=%#x get=%d [%llx] last=%d put=%d should be %d\n",
1820 dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
1821 (int) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_LAST_IDX)),
1822 (int) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)),
1823 le == (u64 *)sky2->rx_le? sky2->rx_put : sky2->tx_prod);
1825 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
1828 /* Hardware/software error handling */
1829 static void sky2_err_intr(struct sky2_hw *hw, u32 status)
1831 DBG(PFX "error interrupt status=%#x\n", status);
1833 if (status & Y2_IS_HW_ERR)
1834 sky2_hw_intr(hw);
1836 if (status & Y2_IS_IRQ_MAC1)
1837 sky2_mac_intr(hw, 0);
1839 if (status & Y2_IS_IRQ_MAC2)
1840 sky2_mac_intr(hw, 1);
1842 if (status & Y2_IS_CHK_RX1)
1843 sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
1845 if (status & Y2_IS_CHK_RX2)
1846 sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
1848 if (status & Y2_IS_CHK_TXA1)
1849 sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
1851 if (status & Y2_IS_CHK_TXA2)
1852 sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
1855 static void sky2_poll(struct net_device *dev)
1857 struct sky2_port *sky2 = netdev_priv(dev);
1858 struct sky2_hw *hw = sky2->hw;
1859 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
1860 u16 idx;
1862 if (status & Y2_IS_ERROR)
1863 sky2_err_intr(hw, status);
1865 if (status & Y2_IS_IRQ_PHY1)
1866 sky2_phy_intr(hw, 0);
1868 if (status & Y2_IS_IRQ_PHY2)
1869 sky2_phy_intr(hw, 1);
1871 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
1872 sky2_status_intr(hw, idx);
1875 /* Bug/Errata workaround?
1876 * Need to kick the TX irq moderation timer.
1878 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
1879 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1880 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1882 sky2_read32(hw, B0_Y2_SP_LISR);
1885 /* Chip internal frequency for clock calculations */
1886 static u32 sky2_mhz(const struct sky2_hw *hw)
1888 switch (hw->chip_id) {
1889 case CHIP_ID_YUKON_EC:
1890 case CHIP_ID_YUKON_EC_U:
1891 case CHIP_ID_YUKON_EX:
1892 case CHIP_ID_YUKON_SUPR:
1893 case CHIP_ID_YUKON_UL_2:
1894 return 125;
1896 case CHIP_ID_YUKON_FE:
1897 return 100;
1899 case CHIP_ID_YUKON_FE_P:
1900 return 50;
1902 case CHIP_ID_YUKON_XL:
1903 return 156;
1905 default:
1906 DBG(PFX "unknown chip ID!\n");
1907 return 100; /* bogus */
1911 static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
1913 return sky2_mhz(hw) * us;
1916 static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
1918 return clk / sky2_mhz(hw);
1921 static int sky2_init(struct sky2_hw *hw)
1923 u8 t8;
1925 /* Enable all clocks and check for bad PCI access */
1926 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
1928 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1930 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
1931 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
1933 switch(hw->chip_id) {
1934 case CHIP_ID_YUKON_XL:
1935 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
1936 break;
1938 case CHIP_ID_YUKON_EC_U:
1939 hw->flags = SKY2_HW_GIGABIT
1940 | SKY2_HW_NEWER_PHY
1941 | SKY2_HW_ADV_POWER_CTL;
1942 break;
1944 case CHIP_ID_YUKON_EX:
1945 hw->flags = SKY2_HW_GIGABIT
1946 | SKY2_HW_NEWER_PHY
1947 | SKY2_HW_NEW_LE
1948 | SKY2_HW_ADV_POWER_CTL;
1949 break;
1951 case CHIP_ID_YUKON_EC:
1952 /* This rev is really old, and requires untested workarounds */
1953 if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
1954 DBG(PFX "unsupported revision Yukon-EC rev A1\n");
1955 return -EOPNOTSUPP;
1957 hw->flags = SKY2_HW_GIGABIT;
1958 break;
1960 case CHIP_ID_YUKON_FE:
1961 break;
1963 case CHIP_ID_YUKON_FE_P:
1964 hw->flags = SKY2_HW_NEWER_PHY
1965 | SKY2_HW_NEW_LE
1966 | SKY2_HW_AUTO_TX_SUM
1967 | SKY2_HW_ADV_POWER_CTL;
1968 break;
1970 case CHIP_ID_YUKON_SUPR:
1971 hw->flags = SKY2_HW_GIGABIT
1972 | SKY2_HW_NEWER_PHY
1973 | SKY2_HW_NEW_LE
1974 | SKY2_HW_AUTO_TX_SUM
1975 | SKY2_HW_ADV_POWER_CTL;
1976 break;
1978 case CHIP_ID_YUKON_UL_2:
1979 hw->flags = SKY2_HW_GIGABIT
1980 | SKY2_HW_ADV_POWER_CTL;
1981 break;
1983 default:
1984 DBG(PFX "unsupported chip type 0x%x\n", hw->chip_id);
1985 return -EOPNOTSUPP;
1988 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
1989 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
1990 hw->flags |= SKY2_HW_FIBRE_PHY;
1992 hw->ports = 1;
1993 t8 = sky2_read8(hw, B2_Y2_HW_RES);
1994 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
1995 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1996 ++hw->ports;
1999 return 0;
2002 static void sky2_reset(struct sky2_hw *hw)
2004 u16 status;
2005 int i, cap;
2006 u32 hwe_mask = Y2_HWE_ALL_MASK;
2008 /* disable ASF */
2009 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2010 status = sky2_read16(hw, HCU_CCSR);
2011 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2012 HCU_CCSR_UC_STATE_MSK);
2013 sky2_write16(hw, HCU_CCSR, status);
2014 } else
2015 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2016 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2018 /* do a SW reset */
2019 sky2_write8(hw, B0_CTST, CS_RST_SET);
2020 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2022 /* allow writes to PCI config */
2023 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2025 /* clear PCI errors, if any */
2026 status = sky2_pci_read16(hw, PCI_STATUS);
2027 status |= PCI_STATUS_ERROR_BITS;
2028 sky2_pci_write16(hw, PCI_STATUS, status);
2030 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2032 cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
2033 if (cap) {
2034 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2035 0xfffffffful);
2037 /* If an error bit is stuck on ignore it */
2038 if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
2039 DBG(PFX "ignoring stuck error report bit\n");
2040 else
2041 hwe_mask |= Y2_IS_PCI_EXP;
2044 sky2_power_on(hw);
2045 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2047 for (i = 0; i < hw->ports; i++) {
2048 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2049 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2051 if (hw->chip_id == CHIP_ID_YUKON_EX ||
2052 hw->chip_id == CHIP_ID_YUKON_SUPR)
2053 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2054 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2055 | GMC_BYP_RETR_ON);
2058 /* Clear I2C IRQ noise */
2059 sky2_write32(hw, B2_I2C_IRQ, 1);
2061 /* turn off hardware timer (unused) */
2062 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2063 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2065 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2067 /* Turn off descriptor polling */
2068 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2070 /* Turn off receive timestamp */
2071 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2072 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2074 /* enable the Tx Arbiters */
2075 for (i = 0; i < hw->ports; i++)
2076 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2078 /* Initialize ram interface */
2079 for (i = 0; i < hw->ports; i++) {
2080 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2082 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2083 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2084 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2085 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2086 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2087 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2088 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2089 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2090 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2091 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2092 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2093 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2096 sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
2098 for (i = 0; i < hw->ports; i++)
2099 sky2_gmac_reset(hw, i);
2101 memset(hw->st_le, 0, STATUS_LE_BYTES);
2102 hw->st_idx = 0;
2104 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2105 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2107 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2108 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2110 /* Set the list last index */
2111 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2113 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2114 sky2_write8(hw, STAT_FIFO_WM, 16);
2116 /* set Status-FIFO ISR watermark */
2117 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2118 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2119 else
2120 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2122 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2123 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2124 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2126 /* enable status unit */
2127 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2129 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2130 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2131 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2134 static u32 sky2_supported_modes(const struct sky2_hw *hw)
2136 if (sky2_is_copper(hw)) {
2137 u32 modes = SUPPORTED_10baseT_Half
2138 | SUPPORTED_10baseT_Full
2139 | SUPPORTED_100baseT_Half
2140 | SUPPORTED_100baseT_Full
2141 | SUPPORTED_Autoneg | SUPPORTED_TP;
2143 if (hw->flags & SKY2_HW_GIGABIT)
2144 modes |= SUPPORTED_1000baseT_Half
2145 | SUPPORTED_1000baseT_Full;
2146 return modes;
2147 } else
2148 return SUPPORTED_1000baseT_Half
2149 | SUPPORTED_1000baseT_Full
2150 | SUPPORTED_Autoneg
2151 | SUPPORTED_FIBRE;
2154 static void sky2_set_multicast(struct net_device *dev)
2156 struct sky2_port *sky2 = netdev_priv(dev);
2157 struct sky2_hw *hw = sky2->hw;
2158 unsigned port = sky2->port;
2159 u16 reg;
2160 u8 filter[8];
2161 int rx_pause;
2163 rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
2165 reg = gma_read16(hw, port, GM_RX_CTRL);
2166 reg |= GM_RXCR_UCF_ENA;
2168 memset(filter, 0xff, sizeof(filter));
2170 gma_write16(hw, port, GM_MC_ADDR_H1,
2171 (u16) filter[0] | ((u16) filter[1] << 8));
2172 gma_write16(hw, port, GM_MC_ADDR_H2,
2173 (u16) filter[2] | ((u16) filter[3] << 8));
2174 gma_write16(hw, port, GM_MC_ADDR_H3,
2175 (u16) filter[4] | ((u16) filter[5] << 8));
2176 gma_write16(hw, port, GM_MC_ADDR_H4,
2177 (u16) filter[6] | ((u16) filter[7] << 8));
2179 gma_write16(hw, port, GM_RX_CTRL, reg);
2182 /* Initialize network device */
2183 static struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2184 unsigned port)
2186 struct sky2_port *sky2;
2187 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
2189 if (!dev) {
2190 DBG(PFX "etherdev alloc failed\n");
2191 return NULL;
2194 dev->dev = &hw->pdev->dev;
2196 sky2 = netdev_priv(dev);
2197 sky2->netdev = dev;
2198 sky2->hw = hw;
2200 /* Auto speed and flow control */
2201 sky2->autoneg = AUTONEG_ENABLE;
2202 sky2->flow_mode = FC_BOTH;
2204 sky2->duplex = -1;
2205 sky2->speed = -1;
2206 sky2->advertising = sky2_supported_modes(hw);
2208 hw->dev[port] = dev;
2210 sky2->port = port;
2212 /* read the mac address */
2213 memcpy(dev->hw_addr, (void *)(hw->regs + B2_MAC_1 + port * 8), ETH_ALEN);
2215 return dev;
2218 static void sky2_show_addr(struct net_device *dev)
2220 DBG2(PFX "%s: addr %s\n", dev->name, netdev_addr(dev));
2223 #if DBGLVL_MAX
2224 /* This driver supports yukon2 chipset only */
2225 static const char *sky2_name(u8 chipid, char *buf, int sz)
2227 const char *name[] = {
2228 "XL", /* 0xb3 */
2229 "EC Ultra", /* 0xb4 */
2230 "Extreme", /* 0xb5 */
2231 "EC", /* 0xb6 */
2232 "FE", /* 0xb7 */
2233 "FE+", /* 0xb8 */
2234 "Supreme", /* 0xb9 */
2235 "UL 2", /* 0xba */
2238 if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_UL_2)
2239 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
2240 else
2241 snprintf(buf, sz, "(chip %#x)", chipid);
2242 return buf;
2244 #endif
2246 static void sky2_net_irq(struct net_device *dev, int enable)
2248 struct sky2_port *sky2 = netdev_priv(dev);
2249 struct sky2_hw *hw = sky2->hw;
2251 u32 imask = sky2_read32(hw, B0_IMSK);
2252 if (enable)
2253 imask |= portirq_msk[sky2->port];
2254 else
2255 imask &= ~portirq_msk[sky2->port];
2256 sky2_write32(hw, B0_IMSK, imask);
2259 static struct net_device_operations sky2_operations = {
2260 .open = sky2_up,
2261 .close = sky2_down,
2262 .transmit = sky2_xmit_frame,
2263 .poll = sky2_poll,
2264 .irq = sky2_net_irq
2267 static int sky2_probe(struct pci_device *pdev,
2268 const struct pci_device_id *ent __unused)
2270 struct net_device *dev;
2271 struct sky2_hw *hw;
2272 int err;
2273 char buf1[16] __unused; /* only for debugging */
2275 adjust_pci_device(pdev);
2277 err = -ENOMEM;
2278 hw = zalloc(sizeof(*hw));
2279 if (!hw) {
2280 DBG(PFX "cannot allocate hardware struct\n");
2281 goto err_out;
2284 hw->pdev = pdev;
2286 hw->regs = (unsigned long)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0), 0x4000);
2287 if (!hw->regs) {
2288 DBG(PFX "cannot map device registers\n");
2289 goto err_out_free_hw;
2292 /* ring for status responses */
2293 hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN);
2294 if (!hw->st_le)
2295 goto err_out_iounmap;
2296 hw->st_dma = virt_to_bus(hw->st_le);
2297 memset(hw->st_le, 0, STATUS_LE_BYTES);
2299 err = sky2_init(hw);
2300 if (err)
2301 goto err_out_iounmap;
2303 #if DBGLVL_MAX
2304 DBG2(PFX "Yukon-2 %s chip revision %d\n",
2305 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
2306 #endif
2308 sky2_reset(hw);
2310 dev = sky2_init_netdev(hw, 0);
2311 if (!dev) {
2312 err = -ENOMEM;
2313 goto err_out_free_pci;
2316 netdev_init(dev, &sky2_operations);
2318 err = register_netdev(dev);
2319 if (err) {
2320 DBG(PFX "cannot register net device\n");
2321 goto err_out_free_netdev;
2324 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
2326 sky2_show_addr(dev);
2328 if (hw->ports > 1) {
2329 struct net_device *dev1;
2331 dev1 = sky2_init_netdev(hw, 1);
2332 if (!dev1)
2333 DBG(PFX "allocation for second device failed\n");
2334 else if ((err = register_netdev(dev1))) {
2335 DBG(PFX "register of second port failed (%d)\n", err);
2336 hw->dev[1] = NULL;
2337 netdev_nullify(dev1);
2338 netdev_put(dev1);
2339 } else
2340 sky2_show_addr(dev1);
2343 pci_set_drvdata(pdev, dev);
2345 return 0;
2347 err_out_free_netdev:
2348 netdev_nullify(dev);
2349 netdev_put(dev);
2350 err_out_free_pci:
2351 sky2_write8(hw, B0_CTST, CS_RST_SET);
2352 free_dma(hw->st_le, STATUS_LE_BYTES);
2353 err_out_iounmap:
2354 iounmap((void *)hw->regs);
2355 err_out_free_hw:
2356 free(hw);
2357 err_out:
2358 pci_set_drvdata(pdev, NULL);
2359 return err;
2362 static void sky2_remove(struct pci_device *pdev)
2364 struct sky2_hw *hw = pci_get_drvdata(pdev);
2365 int i;
2367 if (!hw)
2368 return;
2370 for (i = hw->ports-1; i >= 0; --i)
2371 unregister_netdev(hw->dev[i]);
2373 sky2_write32(hw, B0_IMSK, 0);
2375 sky2_power_aux(hw);
2377 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
2378 sky2_write8(hw, B0_CTST, CS_RST_SET);
2379 sky2_read8(hw, B0_CTST);
2381 free_dma(hw->st_le, STATUS_LE_BYTES);
2383 for (i = hw->ports-1; i >= 0; --i) {
2384 netdev_nullify(hw->dev[i]);
2385 netdev_put(hw->dev[i]);
2388 iounmap((void *)hw->regs);
2389 free(hw);
2391 pci_set_drvdata(pdev, NULL);
2394 struct pci_driver sky2_driver __pci_driver = {
2395 .ids = sky2_id_table,
2396 .id_count = (sizeof (sky2_id_table) / sizeof (sky2_id_table[0])),
2397 .probe = sky2_probe,
2398 .remove = sky2_remove