1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
88 #include <sys/param.h>
89 #include <sys/systm.h>
91 #include <sys/endian.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
102 #include <net/ethernet.h>
104 #include <net/if_arp.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108 #include <net/if_vlan_var.h>
110 #include <netinet/in.h>
111 #include <netinet/in_systm.h>
112 #include <netinet/ip.h>
114 #include <machine/bus.h>
115 #include <machine/in_cksum.h>
116 #include <machine/resource.h>
117 #include <sys/rman.h>
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/brgphyreg.h>
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
127 #define SK_USEIOSPACE
130 #include <dev/sk/if_skreg.h>
131 #include <dev/sk/xmaciireg.h>
132 #include <dev/sk/yukonreg.h>
134 MODULE_DEPEND(sk
, pci
, 1, 1, 1);
135 MODULE_DEPEND(sk
, ether
, 1, 1, 1);
136 MODULE_DEPEND(sk
, miibus
, 1, 1, 1);
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
141 static const struct sk_type sk_devs
[] = {
145 "SysKonnect Gigabit Ethernet (V1.0)"
150 "SysKonnect Gigabit Ethernet (V2.0)"
155 "Marvell Gigabit Ethernet"
159 DEVICEID_BELKIN_5005
,
160 "Belkin F5D5005 Gigabit Ethernet"
165 "3Com 3C940 Gigabit Ethernet"
169 DEVICEID_LINKSYS_EG1032
,
170 "Linksys EG1032 Gigabit Ethernet"
174 DEVICEID_DLINK_DGE530T_A1
,
175 "D-Link DGE-530T Gigabit Ethernet"
179 DEVICEID_DLINK_DGE530T_B1
,
180 "D-Link DGE-530T Gigabit Ethernet"
185 static int skc_probe(device_t
);
186 static int skc_attach(device_t
);
187 static int skc_detach(device_t
);
188 static int skc_shutdown(device_t
);
189 static int skc_suspend(device_t
);
190 static int skc_resume(device_t
);
191 static bus_dma_tag_t
skc_get_dma_tag(device_t
, device_t
);
192 static int sk_detach(device_t
);
193 static int sk_probe(device_t
);
194 static int sk_attach(device_t
);
195 static void sk_tick(void *);
196 static void sk_yukon_tick(void *);
197 static void sk_intr(void *);
198 static void sk_intr_xmac(struct sk_if_softc
*);
199 static void sk_intr_bcom(struct sk_if_softc
*);
200 static void sk_intr_yukon(struct sk_if_softc
*);
201 static __inline
void sk_rxcksum(struct ifnet
*, struct mbuf
*, u_int32_t
);
202 static __inline
int sk_rxvalid(struct sk_softc
*, u_int32_t
, u_int32_t
);
203 static void sk_rxeof(struct sk_if_softc
*);
204 static void sk_jumbo_rxeof(struct sk_if_softc
*);
205 static void sk_txeof(struct sk_if_softc
*);
206 static void sk_txcksum(struct ifnet
*, struct mbuf
*, struct sk_tx_desc
*);
207 static int sk_encap(struct sk_if_softc
*, struct mbuf
**);
208 static void sk_start(struct ifnet
*);
209 static void sk_start_locked(struct ifnet
*);
210 static int sk_ioctl(struct ifnet
*, u_long
, caddr_t
);
211 static void sk_init(void *);
212 static void sk_init_locked(struct sk_if_softc
*);
213 static void sk_init_xmac(struct sk_if_softc
*);
214 static void sk_init_yukon(struct sk_if_softc
*);
215 static void sk_stop(struct sk_if_softc
*);
216 static void sk_watchdog(void *);
217 static int sk_ifmedia_upd(struct ifnet
*);
218 static void sk_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
219 static void sk_reset(struct sk_softc
*);
220 static __inline
void sk_discard_rxbuf(struct sk_if_softc
*, int);
221 static __inline
void sk_discard_jumbo_rxbuf(struct sk_if_softc
*, int);
222 static int sk_newbuf(struct sk_if_softc
*, int);
223 static int sk_jumbo_newbuf(struct sk_if_softc
*, int);
224 static void sk_dmamap_cb(void *, bus_dma_segment_t
*, int, int);
225 static int sk_dma_alloc(struct sk_if_softc
*);
226 static int sk_dma_jumbo_alloc(struct sk_if_softc
*);
227 static void sk_dma_free(struct sk_if_softc
*);
228 static void sk_dma_jumbo_free(struct sk_if_softc
*);
229 static int sk_init_rx_ring(struct sk_if_softc
*);
230 static int sk_init_jumbo_rx_ring(struct sk_if_softc
*);
231 static void sk_init_tx_ring(struct sk_if_softc
*);
232 static u_int32_t
sk_win_read_4(struct sk_softc
*, int);
233 static u_int16_t
sk_win_read_2(struct sk_softc
*, int);
234 static u_int8_t
sk_win_read_1(struct sk_softc
*, int);
235 static void sk_win_write_4(struct sk_softc
*, int, u_int32_t
);
236 static void sk_win_write_2(struct sk_softc
*, int, u_int32_t
);
237 static void sk_win_write_1(struct sk_softc
*, int, u_int32_t
);
239 static int sk_miibus_readreg(device_t
, int, int);
240 static int sk_miibus_writereg(device_t
, int, int, int);
241 static void sk_miibus_statchg(device_t
);
243 static int sk_xmac_miibus_readreg(struct sk_if_softc
*, int, int);
244 static int sk_xmac_miibus_writereg(struct sk_if_softc
*, int, int,
246 static void sk_xmac_miibus_statchg(struct sk_if_softc
*);
248 static int sk_marv_miibus_readreg(struct sk_if_softc
*, int, int);
249 static int sk_marv_miibus_writereg(struct sk_if_softc
*, int, int,
251 static void sk_marv_miibus_statchg(struct sk_if_softc
*);
253 static uint32_t sk_xmchash(const uint8_t *);
254 static void sk_setfilt(struct sk_if_softc
*, u_int16_t
*, int);
255 static void sk_rxfilter(struct sk_if_softc
*);
256 static void sk_rxfilter_genesis(struct sk_if_softc
*);
257 static void sk_rxfilter_yukon(struct sk_if_softc
*);
259 static int sysctl_int_range(SYSCTL_HANDLER_ARGS
, int low
, int high
);
260 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS
);
263 static int jumbo_disable
= 0;
264 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable
);
267 static u_short
in_addword(u_short a
, u_short b
);
271 * It seems that SK-NET GENESIS supports very simple checksum offload
272 * capability for Tx and I believe it can generate 0 checksum value for
273 * UDP packets in Tx as the hardware can't differenciate UDP packets from
274 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
275 * means sender didn't perforam checksum computation. For the safety I
276 * disabled UDP checksum offload capability at the moment. Alternatively
277 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
280 #define SK_CSUM_FEATURES (CSUM_TCP)
283 * Note that we have newbus methods for both the GEnesis controller
284 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
285 * the miibus code is a child of the XMACs. We need to do it this way
286 * so that the miibus drivers can access the PHY registers on the
287 * right PHY. It's not quite what I had in mind, but it's the only
288 * design that achieves the desired effect.
290 static device_method_t skc_methods
[] = {
291 /* Device interface */
292 DEVMETHOD(device_probe
, skc_probe
),
293 DEVMETHOD(device_attach
, skc_attach
),
294 DEVMETHOD(device_detach
, skc_detach
),
295 DEVMETHOD(device_suspend
, skc_suspend
),
296 DEVMETHOD(device_resume
, skc_resume
),
297 DEVMETHOD(device_shutdown
, skc_shutdown
),
299 DEVMETHOD(bus_get_dma_tag
, skc_get_dma_tag
),
304 static driver_t skc_driver
= {
307 sizeof(struct sk_softc
)
310 static devclass_t skc_devclass
;
312 static device_method_t sk_methods
[] = {
313 /* Device interface */
314 DEVMETHOD(device_probe
, sk_probe
),
315 DEVMETHOD(device_attach
, sk_attach
),
316 DEVMETHOD(device_detach
, sk_detach
),
317 DEVMETHOD(device_shutdown
, bus_generic_shutdown
),
320 DEVMETHOD(miibus_readreg
, sk_miibus_readreg
),
321 DEVMETHOD(miibus_writereg
, sk_miibus_writereg
),
322 DEVMETHOD(miibus_statchg
, sk_miibus_statchg
),
327 static driver_t sk_driver
= {
330 sizeof(struct sk_if_softc
)
333 static devclass_t sk_devclass
;
335 DRIVER_MODULE(skc
, pci
, skc_driver
, skc_devclass
, NULL
, NULL
);
336 DRIVER_MODULE(sk
, skc
, sk_driver
, sk_devclass
, NULL
, NULL
);
337 DRIVER_MODULE(miibus
, sk
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
339 static struct resource_spec sk_res_spec_io
[] = {
340 { SYS_RES_IOPORT
, PCIR_BAR(1), RF_ACTIVE
},
341 { SYS_RES_IRQ
, 0, RF_ACTIVE
| RF_SHAREABLE
},
345 static struct resource_spec sk_res_spec_mem
[] = {
346 { SYS_RES_MEMORY
, PCIR_BAR(0), RF_ACTIVE
},
347 { SYS_RES_IRQ
, 0, RF_ACTIVE
| RF_SHAREABLE
},
351 #define SK_SETBIT(sc, reg, x) \
352 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
354 #define SK_CLRBIT(sc, reg, x) \
355 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
357 #define SK_WIN_SETBIT_4(sc, reg, x) \
358 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
360 #define SK_WIN_CLRBIT_4(sc, reg, x) \
361 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
363 #define SK_WIN_SETBIT_2(sc, reg, x) \
364 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
366 #define SK_WIN_CLRBIT_2(sc, reg, x) \
367 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
370 sk_win_read_4(sc
, reg
)
375 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
376 return(CSR_READ_4(sc
, SK_WIN_BASE
+ SK_REG(reg
)));
378 return(CSR_READ_4(sc
, reg
));
383 sk_win_read_2(sc
, reg
)
388 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
389 return(CSR_READ_2(sc
, SK_WIN_BASE
+ SK_REG(reg
)));
391 return(CSR_READ_2(sc
, reg
));
396 sk_win_read_1(sc
, reg
)
401 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
402 return(CSR_READ_1(sc
, SK_WIN_BASE
+ SK_REG(reg
)));
404 return(CSR_READ_1(sc
, reg
));
409 sk_win_write_4(sc
, reg
, val
)
415 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
416 CSR_WRITE_4(sc
, SK_WIN_BASE
+ SK_REG(reg
), val
);
418 CSR_WRITE_4(sc
, reg
, val
);
424 sk_win_write_2(sc
, reg
, val
)
430 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
431 CSR_WRITE_2(sc
, SK_WIN_BASE
+ SK_REG(reg
), val
);
433 CSR_WRITE_2(sc
, reg
, val
);
439 sk_win_write_1(sc
, reg
, val
)
445 CSR_WRITE_4(sc
, SK_RAP
, SK_WIN(reg
));
446 CSR_WRITE_1(sc
, SK_WIN_BASE
+ SK_REG(reg
), val
);
448 CSR_WRITE_1(sc
, reg
, val
);
454 /* stole these from in_cksum.c */
455 #define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
457 in_addword(u_short a
, u_short b
)
459 u_int64_t sum
= a
+ b
;
467 sk_miibus_readreg(dev
, phy
, reg
)
471 struct sk_if_softc
*sc_if
;
474 sc_if
= device_get_softc(dev
);
476 SK_IF_MII_LOCK(sc_if
);
477 switch(sc_if
->sk_softc
->sk_type
) {
479 v
= sk_xmac_miibus_readreg(sc_if
, phy
, reg
);
484 v
= sk_marv_miibus_readreg(sc_if
, phy
, reg
);
490 SK_IF_MII_UNLOCK(sc_if
);
496 sk_miibus_writereg(dev
, phy
, reg
, val
)
500 struct sk_if_softc
*sc_if
;
503 sc_if
= device_get_softc(dev
);
505 SK_IF_MII_LOCK(sc_if
);
506 switch(sc_if
->sk_softc
->sk_type
) {
508 v
= sk_xmac_miibus_writereg(sc_if
, phy
, reg
, val
);
513 v
= sk_marv_miibus_writereg(sc_if
, phy
, reg
, val
);
519 SK_IF_MII_UNLOCK(sc_if
);
525 sk_miibus_statchg(dev
)
528 struct sk_if_softc
*sc_if
;
530 sc_if
= device_get_softc(dev
);
532 SK_IF_MII_LOCK(sc_if
);
533 switch(sc_if
->sk_softc
->sk_type
) {
535 sk_xmac_miibus_statchg(sc_if
);
540 sk_marv_miibus_statchg(sc_if
);
543 SK_IF_MII_UNLOCK(sc_if
);
549 sk_xmac_miibus_readreg(sc_if
, phy
, reg
)
550 struct sk_if_softc
*sc_if
;
555 SK_XM_WRITE_2(sc_if
, XM_PHY_ADDR
, reg
|(phy
<< 8));
556 SK_XM_READ_2(sc_if
, XM_PHY_DATA
);
557 if (sc_if
->sk_phytype
!= SK_PHYTYPE_XMAC
) {
558 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
560 if (SK_XM_READ_2(sc_if
, XM_MMUCMD
) &
561 XM_MMUCMD_PHYDATARDY
)
565 if (i
== SK_TIMEOUT
) {
566 if_printf(sc_if
->sk_ifp
, "phy failed to come ready\n");
571 i
= SK_XM_READ_2(sc_if
, XM_PHY_DATA
);
577 sk_xmac_miibus_writereg(sc_if
, phy
, reg
, val
)
578 struct sk_if_softc
*sc_if
;
583 SK_XM_WRITE_2(sc_if
, XM_PHY_ADDR
, reg
|(phy
<< 8));
584 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
585 if (!(SK_XM_READ_2(sc_if
, XM_MMUCMD
) & XM_MMUCMD_PHYBUSY
))
589 if (i
== SK_TIMEOUT
) {
590 if_printf(sc_if
->sk_ifp
, "phy failed to come ready\n");
594 SK_XM_WRITE_2(sc_if
, XM_PHY_DATA
, val
);
595 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
597 if (!(SK_XM_READ_2(sc_if
, XM_MMUCMD
) & XM_MMUCMD_PHYBUSY
))
601 if_printf(sc_if
->sk_ifp
, "phy write timed out\n");
607 sk_xmac_miibus_statchg(sc_if
)
608 struct sk_if_softc
*sc_if
;
610 struct mii_data
*mii
;
612 mii
= device_get_softc(sc_if
->sk_miibus
);
615 * If this is a GMII PHY, manually set the XMAC's
616 * duplex mode accordingly.
618 if (sc_if
->sk_phytype
!= SK_PHYTYPE_XMAC
) {
619 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_FDX
) {
620 SK_XM_SETBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_GMIIFDX
);
622 SK_XM_CLRBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_GMIIFDX
);
628 sk_marv_miibus_readreg(sc_if
, phy
, reg
)
629 struct sk_if_softc
*sc_if
;
635 if (sc_if
->sk_phytype
!= SK_PHYTYPE_MARV_COPPER
&&
636 sc_if
->sk_phytype
!= SK_PHYTYPE_MARV_FIBER
) {
640 SK_YU_WRITE_2(sc_if
, YUKON_SMICR
, YU_SMICR_PHYAD(phy
) |
641 YU_SMICR_REGAD(reg
) | YU_SMICR_OP_READ
);
643 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
645 val
= SK_YU_READ_2(sc_if
, YUKON_SMICR
);
646 if (val
& YU_SMICR_READ_VALID
)
650 if (i
== SK_TIMEOUT
) {
651 if_printf(sc_if
->sk_ifp
, "phy failed to come ready\n");
655 val
= SK_YU_READ_2(sc_if
, YUKON_SMIDR
);
661 sk_marv_miibus_writereg(sc_if
, phy
, reg
, val
)
662 struct sk_if_softc
*sc_if
;
667 SK_YU_WRITE_2(sc_if
, YUKON_SMIDR
, val
);
668 SK_YU_WRITE_2(sc_if
, YUKON_SMICR
, YU_SMICR_PHYAD(phy
) |
669 YU_SMICR_REGAD(reg
) | YU_SMICR_OP_WRITE
);
671 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
673 if ((SK_YU_READ_2(sc_if
, YUKON_SMICR
) & YU_SMICR_BUSY
) == 0)
677 if_printf(sc_if
->sk_ifp
, "phy write timeout\n");
683 sk_marv_miibus_statchg(sc_if
)
684 struct sk_if_softc
*sc_if
;
697 /* Compute CRC for the address value. */
698 crc
= ether_crc32_le(addr
, ETHER_ADDR_LEN
);
700 return (~crc
& ((1 << HASH_BITS
) - 1));
704 sk_setfilt(sc_if
, addr
, slot
)
705 struct sk_if_softc
*sc_if
;
711 base
= XM_RXFILT_ENTRY(slot
);
713 SK_XM_WRITE_2(sc_if
, base
, addr
[0]);
714 SK_XM_WRITE_2(sc_if
, base
+ 2, addr
[1]);
715 SK_XM_WRITE_2(sc_if
, base
+ 4, addr
[2]);
722 struct sk_if_softc
*sc_if
;
726 SK_IF_LOCK_ASSERT(sc_if
);
728 sc
= sc_if
->sk_softc
;
729 if (sc
->sk_type
== SK_GENESIS
)
730 sk_rxfilter_genesis(sc_if
);
732 sk_rxfilter_yukon(sc_if
);
736 sk_rxfilter_genesis(sc_if
)
737 struct sk_if_softc
*sc_if
;
739 struct ifnet
*ifp
= sc_if
->sk_ifp
;
740 u_int32_t hashes
[2] = { 0, 0 }, mode
;
742 struct ifmultiaddr
*ifma
;
743 u_int16_t dummy
[] = { 0, 0, 0 };
744 u_int16_t maddr
[(ETHER_ADDR_LEN
+1)/2];
746 SK_IF_LOCK_ASSERT(sc_if
);
748 mode
= SK_XM_READ_4(sc_if
, XM_MODE
);
749 mode
&= ~(XM_MODE_RX_PROMISC
| XM_MODE_RX_USE_HASH
|
750 XM_MODE_RX_USE_PERFECT
);
751 /* First, zot all the existing perfect filters. */
752 for (i
= 1; i
< XM_RXFILT_MAX
; i
++)
753 sk_setfilt(sc_if
, dummy
, i
);
755 /* Now program new ones. */
756 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
757 if (ifp
->if_flags
& IFF_ALLMULTI
)
758 mode
|= XM_MODE_RX_USE_HASH
;
759 if (ifp
->if_flags
& IFF_PROMISC
)
760 mode
|= XM_MODE_RX_PROMISC
;
761 hashes
[0] = 0xFFFFFFFF;
762 hashes
[1] = 0xFFFFFFFF;
766 TAILQ_FOREACH_REVERSE(ifma
, &ifp
->if_multiaddrs
, ifmultihead
,
768 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
771 * Program the first XM_RXFILT_MAX multicast groups
772 * into the perfect filter.
774 bcopy(LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
775 maddr
, ETHER_ADDR_LEN
);
776 if (i
< XM_RXFILT_MAX
) {
777 sk_setfilt(sc_if
, maddr
, i
);
778 mode
|= XM_MODE_RX_USE_PERFECT
;
782 h
= sk_xmchash((const uint8_t *)maddr
);
784 hashes
[0] |= (1 << h
);
786 hashes
[1] |= (1 << (h
- 32));
787 mode
|= XM_MODE_RX_USE_HASH
;
789 if_maddr_runlock(ifp
);
792 SK_XM_WRITE_4(sc_if
, XM_MODE
, mode
);
793 SK_XM_WRITE_4(sc_if
, XM_MAR0
, hashes
[0]);
794 SK_XM_WRITE_4(sc_if
, XM_MAR2
, hashes
[1]);
798 sk_rxfilter_yukon(sc_if
)
799 struct sk_if_softc
*sc_if
;
802 u_int32_t crc
, hashes
[2] = { 0, 0 }, mode
;
803 struct ifmultiaddr
*ifma
;
805 SK_IF_LOCK_ASSERT(sc_if
);
808 mode
= SK_YU_READ_2(sc_if
, YUKON_RCR
);
809 if (ifp
->if_flags
& IFF_PROMISC
)
810 mode
&= ~(YU_RCR_UFLEN
| YU_RCR_MUFLEN
);
811 else if (ifp
->if_flags
& IFF_ALLMULTI
) {
812 mode
|= YU_RCR_UFLEN
| YU_RCR_MUFLEN
;
813 hashes
[0] = 0xFFFFFFFF;
814 hashes
[1] = 0xFFFFFFFF;
816 mode
|= YU_RCR_UFLEN
;
818 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
819 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
821 crc
= ether_crc32_be(LLADDR((struct sockaddr_dl
*)
822 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
823 /* Just want the 6 least significant bits. */
825 /* Set the corresponding bit in the hash table. */
826 hashes
[crc
>> 5] |= 1 << (crc
& 0x1f);
828 if_maddr_runlock(ifp
);
829 if (hashes
[0] != 0 || hashes
[1] != 0)
830 mode
|= YU_RCR_MUFLEN
;
833 SK_YU_WRITE_2(sc_if
, YUKON_MCAH1
, hashes
[0] & 0xffff);
834 SK_YU_WRITE_2(sc_if
, YUKON_MCAH2
, (hashes
[0] >> 16) & 0xffff);
835 SK_YU_WRITE_2(sc_if
, YUKON_MCAH3
, hashes
[1] & 0xffff);
836 SK_YU_WRITE_2(sc_if
, YUKON_MCAH4
, (hashes
[1] >> 16) & 0xffff);
837 SK_YU_WRITE_2(sc_if
, YUKON_RCR
, mode
);
841 sk_init_rx_ring(sc_if
)
842 struct sk_if_softc
*sc_if
;
844 struct sk_ring_data
*rd
;
846 u_int32_t csum_start
;
849 sc_if
->sk_cdata
.sk_rx_cons
= 0;
851 csum_start
= (ETHER_HDR_LEN
+ sizeof(struct ip
)) << 16 |
853 rd
= &sc_if
->sk_rdata
;
854 bzero(rd
->sk_rx_ring
, sizeof(struct sk_rx_desc
) * SK_RX_RING_CNT
);
855 for (i
= 0; i
< SK_RX_RING_CNT
; i
++) {
856 if (sk_newbuf(sc_if
, i
) != 0)
858 if (i
== (SK_RX_RING_CNT
- 1))
859 addr
= SK_RX_RING_ADDR(sc_if
, 0);
861 addr
= SK_RX_RING_ADDR(sc_if
, i
+ 1);
862 rd
->sk_rx_ring
[i
].sk_next
= htole32(SK_ADDR_LO(addr
));
863 rd
->sk_rx_ring
[i
].sk_csum_start
= htole32(csum_start
);
866 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_ring_tag
,
867 sc_if
->sk_cdata
.sk_rx_ring_map
,
868 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
874 sk_init_jumbo_rx_ring(sc_if
)
875 struct sk_if_softc
*sc_if
;
877 struct sk_ring_data
*rd
;
879 u_int32_t csum_start
;
882 sc_if
->sk_cdata
.sk_jumbo_rx_cons
= 0;
884 csum_start
= ((ETHER_HDR_LEN
+ sizeof(struct ip
)) << 16) |
886 rd
= &sc_if
->sk_rdata
;
887 bzero(rd
->sk_jumbo_rx_ring
,
888 sizeof(struct sk_rx_desc
) * SK_JUMBO_RX_RING_CNT
);
889 for (i
= 0; i
< SK_JUMBO_RX_RING_CNT
; i
++) {
890 if (sk_jumbo_newbuf(sc_if
, i
) != 0)
892 if (i
== (SK_JUMBO_RX_RING_CNT
- 1))
893 addr
= SK_JUMBO_RX_RING_ADDR(sc_if
, 0);
895 addr
= SK_JUMBO_RX_RING_ADDR(sc_if
, i
+ 1);
896 rd
->sk_jumbo_rx_ring
[i
].sk_next
= htole32(SK_ADDR_LO(addr
));
897 rd
->sk_jumbo_rx_ring
[i
].sk_csum_start
= htole32(csum_start
);
900 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
901 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
,
902 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
908 sk_init_tx_ring(sc_if
)
909 struct sk_if_softc
*sc_if
;
911 struct sk_ring_data
*rd
;
912 struct sk_txdesc
*txd
;
916 STAILQ_INIT(&sc_if
->sk_cdata
.sk_txfreeq
);
917 STAILQ_INIT(&sc_if
->sk_cdata
.sk_txbusyq
);
919 sc_if
->sk_cdata
.sk_tx_prod
= 0;
920 sc_if
->sk_cdata
.sk_tx_cons
= 0;
921 sc_if
->sk_cdata
.sk_tx_cnt
= 0;
923 rd
= &sc_if
->sk_rdata
;
924 bzero(rd
->sk_tx_ring
, sizeof(struct sk_tx_desc
) * SK_TX_RING_CNT
);
925 for (i
= 0; i
< SK_TX_RING_CNT
; i
++) {
926 if (i
== (SK_TX_RING_CNT
- 1))
927 addr
= SK_TX_RING_ADDR(sc_if
, 0);
929 addr
= SK_TX_RING_ADDR(sc_if
, i
+ 1);
930 rd
->sk_tx_ring
[i
].sk_next
= htole32(SK_ADDR_LO(addr
));
931 txd
= &sc_if
->sk_cdata
.sk_txdesc
[i
];
932 STAILQ_INSERT_TAIL(&sc_if
->sk_cdata
.sk_txfreeq
, txd
, tx_q
);
935 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_ring_tag
,
936 sc_if
->sk_cdata
.sk_tx_ring_map
,
937 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
941 sk_discard_rxbuf(sc_if
, idx
)
942 struct sk_if_softc
*sc_if
;
945 struct sk_rx_desc
*r
;
946 struct sk_rxdesc
*rxd
;
950 r
= &sc_if
->sk_rdata
.sk_rx_ring
[idx
];
951 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[idx
];
953 r
->sk_ctl
= htole32(m
->m_len
| SK_RXSTAT
| SK_OPCODE_CSUM
);
957 sk_discard_jumbo_rxbuf(sc_if
, idx
)
958 struct sk_if_softc
*sc_if
;
961 struct sk_rx_desc
*r
;
962 struct sk_rxdesc
*rxd
;
965 r
= &sc_if
->sk_rdata
.sk_jumbo_rx_ring
[idx
];
966 rxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[idx
];
968 r
->sk_ctl
= htole32(m
->m_len
| SK_RXSTAT
| SK_OPCODE_CSUM
);
972 sk_newbuf(sc_if
, idx
)
973 struct sk_if_softc
*sc_if
;
976 struct sk_rx_desc
*r
;
977 struct sk_rxdesc
*rxd
;
979 bus_dma_segment_t segs
[1];
983 m
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
986 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
987 m_adj(m
, ETHER_ALIGN
);
989 if (bus_dmamap_load_mbuf_sg(sc_if
->sk_cdata
.sk_rx_tag
,
990 sc_if
->sk_cdata
.sk_rx_sparemap
, m
, segs
, &nsegs
, 0) != 0) {
994 KASSERT(nsegs
== 1, ("%s: %d segments returned!", __func__
, nsegs
));
996 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[idx
];
997 if (rxd
->rx_m
!= NULL
) {
998 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_tag
, rxd
->rx_dmamap
,
999 BUS_DMASYNC_POSTREAD
);
1000 bus_dmamap_unload(sc_if
->sk_cdata
.sk_rx_tag
, rxd
->rx_dmamap
);
1002 map
= rxd
->rx_dmamap
;
1003 rxd
->rx_dmamap
= sc_if
->sk_cdata
.sk_rx_sparemap
;
1004 sc_if
->sk_cdata
.sk_rx_sparemap
= map
;
1005 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_tag
, rxd
->rx_dmamap
,
1006 BUS_DMASYNC_PREREAD
);
1008 r
= &sc_if
->sk_rdata
.sk_rx_ring
[idx
];
1009 r
->sk_data_lo
= htole32(SK_ADDR_LO(segs
[0].ds_addr
));
1010 r
->sk_data_hi
= htole32(SK_ADDR_HI(segs
[0].ds_addr
));
1011 r
->sk_ctl
= htole32(segs
[0].ds_len
| SK_RXSTAT
| SK_OPCODE_CSUM
);
1017 sk_jumbo_newbuf(sc_if
, idx
)
1018 struct sk_if_softc
*sc_if
;
1021 struct sk_rx_desc
*r
;
1022 struct sk_rxdesc
*rxd
;
1024 bus_dma_segment_t segs
[1];
1028 m
= m_getjcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
, MJUM9BYTES
);
1031 if ((m
->m_flags
& M_EXT
) == 0) {
1035 m
->m_pkthdr
.len
= m
->m_len
= MJUM9BYTES
;
1037 * Adjust alignment so packet payload begins on a
1038 * longword boundary. Mandatory for Alpha, useful on
1041 m_adj(m
, ETHER_ALIGN
);
1043 if (bus_dmamap_load_mbuf_sg(sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
1044 sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
, m
, segs
, &nsegs
, 0) != 0) {
1048 KASSERT(nsegs
== 1, ("%s: %d segments returned!", __func__
, nsegs
));
1050 rxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[idx
];
1051 if (rxd
->rx_m
!= NULL
) {
1052 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_tag
, rxd
->rx_dmamap
,
1053 BUS_DMASYNC_POSTREAD
);
1054 bus_dmamap_unload(sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
1057 map
= rxd
->rx_dmamap
;
1058 rxd
->rx_dmamap
= sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
;
1059 sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
= map
;
1060 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_tag
, rxd
->rx_dmamap
,
1061 BUS_DMASYNC_PREREAD
);
1063 r
= &sc_if
->sk_rdata
.sk_jumbo_rx_ring
[idx
];
1064 r
->sk_data_lo
= htole32(SK_ADDR_LO(segs
[0].ds_addr
));
1065 r
->sk_data_hi
= htole32(SK_ADDR_HI(segs
[0].ds_addr
));
1066 r
->sk_ctl
= htole32(segs
[0].ds_len
| SK_RXSTAT
| SK_OPCODE_CSUM
);
1072 * Set media options.
1078 struct sk_if_softc
*sc_if
= ifp
->if_softc
;
1079 struct mii_data
*mii
;
1081 mii
= device_get_softc(sc_if
->sk_miibus
);
1089 * Report current media status.
1092 sk_ifmedia_sts(ifp
, ifmr
)
1094 struct ifmediareq
*ifmr
;
1096 struct sk_if_softc
*sc_if
;
1097 struct mii_data
*mii
;
1099 sc_if
= ifp
->if_softc
;
1100 mii
= device_get_softc(sc_if
->sk_miibus
);
1103 ifmr
->ifm_active
= mii
->mii_media_active
;
1104 ifmr
->ifm_status
= mii
->mii_media_status
;
1110 sk_ioctl(ifp
, command
, data
)
1115 struct sk_if_softc
*sc_if
= ifp
->if_softc
;
1116 struct ifreq
*ifr
= (struct ifreq
*) data
;
1118 struct mii_data
*mii
;
1123 if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> SK_JUMBO_MTU
)
1125 else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
1126 if (sc_if
->sk_jumbo_disable
!= 0 &&
1127 ifr
->ifr_mtu
> SK_MAX_FRAMELEN
)
1131 ifp
->if_mtu
= ifr
->ifr_mtu
;
1132 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
1133 ifp
->if_drv_flags
&= ~IFF_DRV_RUNNING
;
1134 sk_init_locked(sc_if
);
1136 SK_IF_UNLOCK(sc_if
);
1142 if (ifp
->if_flags
& IFF_UP
) {
1143 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
1144 if ((ifp
->if_flags
^ sc_if
->sk_if_flags
)
1145 & (IFF_PROMISC
| IFF_ALLMULTI
))
1148 sk_init_locked(sc_if
);
1150 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
1153 sc_if
->sk_if_flags
= ifp
->if_flags
;
1154 SK_IF_UNLOCK(sc_if
);
1159 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
1161 SK_IF_UNLOCK(sc_if
);
1165 mii
= device_get_softc(sc_if
->sk_miibus
);
1166 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, command
);
1170 if (sc_if
->sk_softc
->sk_type
== SK_GENESIS
) {
1171 SK_IF_UNLOCK(sc_if
);
1174 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
1175 if ((mask
& IFCAP_TXCSUM
) != 0 &&
1176 (IFCAP_TXCSUM
& ifp
->if_capabilities
) != 0) {
1177 ifp
->if_capenable
^= IFCAP_TXCSUM
;
1178 if ((ifp
->if_capenable
& IFCAP_TXCSUM
) != 0)
1179 ifp
->if_hwassist
|= SK_CSUM_FEATURES
;
1181 ifp
->if_hwassist
&= ~SK_CSUM_FEATURES
;
1183 if ((mask
& IFCAP_RXCSUM
) != 0 &&
1184 (IFCAP_RXCSUM
& ifp
->if_capabilities
) != 0)
1185 ifp
->if_capenable
^= IFCAP_RXCSUM
;
1186 SK_IF_UNLOCK(sc_if
);
1189 error
= ether_ioctl(ifp
, command
, data
);
1197 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1198 * IDs against our list and return a device name if we find a match.
1204 const struct sk_type
*t
= sk_devs
;
1206 while(t
->sk_name
!= NULL
) {
1207 if ((pci_get_vendor(dev
) == t
->sk_vid
) &&
1208 (pci_get_device(dev
) == t
->sk_did
)) {
1210 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1211 * Rev. 3 is supported by re(4).
1213 if ((t
->sk_vid
== VENDORID_LINKSYS
) &&
1214 (t
->sk_did
== DEVICEID_LINKSYS_EG1032
) &&
1215 (pci_get_subdevice(dev
) !=
1216 SUBDEVICEID_LINKSYS_EG1032_REV2
)) {
1220 device_set_desc(dev
, t
->sk_name
);
1221 return (BUS_PROBE_DEFAULT
);
1230 * Force the GEnesis into reset, then bring it out of reset.
1234 struct sk_softc
*sc
;
1237 CSR_WRITE_2(sc
, SK_CSR
, SK_CSR_SW_RESET
);
1238 CSR_WRITE_2(sc
, SK_CSR
, SK_CSR_MASTER_RESET
);
1239 if (SK_YUKON_FAMILY(sc
->sk_type
))
1240 CSR_WRITE_2(sc
, SK_LINK_CTRL
, SK_LINK_RESET_SET
);
1243 CSR_WRITE_2(sc
, SK_CSR
, SK_CSR_SW_UNRESET
);
1245 CSR_WRITE_2(sc
, SK_CSR
, SK_CSR_MASTER_UNRESET
);
1246 if (SK_YUKON_FAMILY(sc
->sk_type
))
1247 CSR_WRITE_2(sc
, SK_LINK_CTRL
, SK_LINK_RESET_CLEAR
);
1249 if (sc
->sk_type
== SK_GENESIS
) {
1250 /* Configure packet arbiter */
1251 sk_win_write_2(sc
, SK_PKTARB_CTL
, SK_PKTARBCTL_UNRESET
);
1252 sk_win_write_2(sc
, SK_RXPA1_TINIT
, SK_PKTARB_TIMEOUT
);
1253 sk_win_write_2(sc
, SK_TXPA1_TINIT
, SK_PKTARB_TIMEOUT
);
1254 sk_win_write_2(sc
, SK_RXPA2_TINIT
, SK_PKTARB_TIMEOUT
);
1255 sk_win_write_2(sc
, SK_TXPA2_TINIT
, SK_PKTARB_TIMEOUT
);
1258 /* Enable RAM interface */
1259 sk_win_write_4(sc
, SK_RAMCTL
, SK_RAMCTL_UNRESET
);
1262 * Configure interrupt moderation. The moderation timer
1263 * defers interrupts specified in the interrupt moderation
1264 * timer mask based on the timeout specified in the interrupt
1265 * moderation timer init register. Each bit in the timer
1266 * register represents one tick, so to specify a timeout in
1267 * microseconds, we have to multiply by the correct number of
1268 * ticks-per-microsecond.
1270 switch (sc
->sk_type
) {
1272 sc
->sk_int_ticks
= SK_IMTIMER_TICKS_GENESIS
;
1275 sc
->sk_int_ticks
= SK_IMTIMER_TICKS_YUKON
;
1279 device_printf(sc
->sk_dev
, "interrupt moderation is %d us\n",
1281 sk_win_write_4(sc
, SK_IMTIMERINIT
, SK_IM_USECS(sc
->sk_int_mod
,
1283 sk_win_write_4(sc
, SK_IMMR
, SK_ISR_TX1_S_EOF
|SK_ISR_TX2_S_EOF
|
1284 SK_ISR_RX1_EOF
|SK_ISR_RX2_EOF
);
1285 sk_win_write_1(sc
, SK_IMTIMERCTL
, SK_IMCTL_START
);
1294 struct sk_softc
*sc
;
1296 sc
= device_get_softc(device_get_parent(dev
));
1299 * Not much to do here. We always know there will be
1300 * at least one XMAC present, and if there are two,
1301 * skc_attach() will create a second device instance
1304 switch (sc
->sk_type
) {
1306 device_set_desc(dev
, "XaQti Corp. XMAC II");
1311 device_set_desc(dev
, "Marvell Semiconductor, Inc. Yukon");
1315 return (BUS_PROBE_DEFAULT
);
1319 * Each XMAC chip is attached as a separate logical IP interface.
1320 * Single port cards will have only one logical interface of course.
1326 struct sk_softc
*sc
;
1327 struct sk_if_softc
*sc_if
;
1330 int error
, i
, phy
, port
;
1332 u_char inv_mac
[] = {0, 0, 0, 0, 0, 0};
1338 sc_if
= device_get_softc(dev
);
1339 sc
= device_get_softc(device_get_parent(dev
));
1340 port
= *(int *)device_get_ivars(dev
);
1342 sc_if
->sk_if_dev
= dev
;
1343 sc_if
->sk_port
= port
;
1344 sc_if
->sk_softc
= sc
;
1345 sc
->sk_if
[port
] = sc_if
;
1346 if (port
== SK_PORT_A
)
1347 sc_if
->sk_tx_bmu
= SK_BMU_TXS_CSR0
;
1348 if (port
== SK_PORT_B
)
1349 sc_if
->sk_tx_bmu
= SK_BMU_TXS_CSR1
;
1351 callout_init_mtx(&sc_if
->sk_tick_ch
, &sc_if
->sk_softc
->sk_mtx
, 0);
1352 callout_init_mtx(&sc_if
->sk_watchdog_ch
, &sc_if
->sk_softc
->sk_mtx
, 0);
1354 if (sk_dma_alloc(sc_if
) != 0) {
1358 sk_dma_jumbo_alloc(sc_if
);
1360 ifp
= sc_if
->sk_ifp
= if_alloc(IFT_ETHER
);
1362 device_printf(sc_if
->sk_if_dev
, "can not if_alloc()\n");
1366 ifp
->if_softc
= sc_if
;
1367 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1368 ifp
->if_mtu
= ETHERMTU
;
1369 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1371 * SK_GENESIS has a bug in checksum offload - From linux.
1373 if (sc_if
->sk_softc
->sk_type
!= SK_GENESIS
) {
1374 ifp
->if_capabilities
= IFCAP_TXCSUM
| IFCAP_RXCSUM
;
1375 ifp
->if_hwassist
= 0;
1377 ifp
->if_capabilities
= 0;
1378 ifp
->if_hwassist
= 0;
1380 ifp
->if_capenable
= ifp
->if_capabilities
;
1382 * Some revision of Yukon controller generates corrupted
1383 * frame when TX checksum offloading is enabled. The
1384 * frame has a valid checksum value so payload might be
1385 * modified during TX checksum calculation. Disable TX
1386 * checksum offloading but give users chance to enable it
1387 * when they know their controller works without problems
1388 * with TX checksum offloading.
1390 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
1391 ifp
->if_ioctl
= sk_ioctl
;
1392 ifp
->if_start
= sk_start
;
1393 ifp
->if_init
= sk_init
;
1394 IFQ_SET_MAXLEN(&ifp
->if_snd
, SK_TX_RING_CNT
- 1);
1395 ifp
->if_snd
.ifq_drv_maxlen
= SK_TX_RING_CNT
- 1;
1396 IFQ_SET_READY(&ifp
->if_snd
);
1399 * Get station address for this interface. Note that
1400 * dual port cards actually come with three station
1401 * addresses: one for each port, plus an extra. The
1402 * extra one is used by the SysKonnect driver software
1403 * as a 'virtual' station address for when both ports
1404 * are operating in failover mode. Currently we don't
1405 * use this extra address.
1408 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1410 sk_win_read_1(sc
, SK_MAC0_0
+ (port
* 8) + i
);
1412 /* Verify whether the station address is invalid or not. */
1413 if (bcmp(eaddr
, inv_mac
, sizeof(inv_mac
)) == 0) {
1414 device_printf(sc_if
->sk_if_dev
,
1415 "Generating random ethernet address\n");
1418 * Set OUI to convenient locally assigned address. 'b'
1419 * is 0x62, which has the locally assigned bit set, and
1420 * the broadcast/multicast bit clear.
1425 eaddr
[3] = (r
>> 16) & 0xff;
1426 eaddr
[4] = (r
>> 8) & 0xff;
1427 eaddr
[5] = (r
>> 0) & 0xff;
1430 * Set up RAM buffer addresses. The NIC will have a certain
1431 * amount of SRAM on it, somewhere between 512K and 2MB. We
1432 * need to divide this up a) between the transmitter and
1433 * receiver and b) between the two XMACs, if this is a
1434 * dual port NIC. Our algotithm is to divide up the memory
1435 * evenly so that everyone gets a fair share.
1437 * Just to be contrary, Yukon2 appears to have separate memory
1440 if (sk_win_read_1(sc
, SK_CONFIG
) & SK_CONFIG_SINGLEMAC
) {
1441 u_int32_t chunk
, val
;
1443 chunk
= sc
->sk_ramsize
/ 2;
1444 val
= sc
->sk_rboff
/ sizeof(u_int64_t
);
1445 sc_if
->sk_rx_ramstart
= val
;
1446 val
+= (chunk
/ sizeof(u_int64_t
));
1447 sc_if
->sk_rx_ramend
= val
- 1;
1448 sc_if
->sk_tx_ramstart
= val
;
1449 val
+= (chunk
/ sizeof(u_int64_t
));
1450 sc_if
->sk_tx_ramend
= val
- 1;
1452 u_int32_t chunk
, val
;
1454 chunk
= sc
->sk_ramsize
/ 4;
1455 val
= (sc
->sk_rboff
+ (chunk
* 2 * sc_if
->sk_port
)) /
1457 sc_if
->sk_rx_ramstart
= val
;
1458 val
+= (chunk
/ sizeof(u_int64_t
));
1459 sc_if
->sk_rx_ramend
= val
- 1;
1460 sc_if
->sk_tx_ramstart
= val
;
1461 val
+= (chunk
/ sizeof(u_int64_t
));
1462 sc_if
->sk_tx_ramend
= val
- 1;
1465 /* Read and save PHY type and set PHY address */
1466 sc_if
->sk_phytype
= sk_win_read_1(sc
, SK_EPROM1
) & 0xF;
1467 if (!SK_YUKON_FAMILY(sc
->sk_type
)) {
1468 switch(sc_if
->sk_phytype
) {
1469 case SK_PHYTYPE_XMAC
:
1470 sc_if
->sk_phyaddr
= SK_PHYADDR_XMAC
;
1472 case SK_PHYTYPE_BCOM
:
1473 sc_if
->sk_phyaddr
= SK_PHYADDR_BCOM
;
1476 device_printf(sc
->sk_dev
, "unsupported PHY type: %d\n",
1479 SK_IF_UNLOCK(sc_if
);
1483 if (sc_if
->sk_phytype
< SK_PHYTYPE_MARV_COPPER
&&
1484 sc
->sk_pmd
!= 'S') {
1485 /* not initialized, punt */
1486 sc_if
->sk_phytype
= SK_PHYTYPE_MARV_COPPER
;
1487 sc
->sk_coppertype
= 1;
1490 sc_if
->sk_phyaddr
= SK_PHYADDR_MARV
;
1492 if (!(sc
->sk_coppertype
))
1493 sc_if
->sk_phytype
= SK_PHYTYPE_MARV_FIBER
;
1497 * Call MI attach routine. Can't hold locks when calling into ether_*.
1499 SK_IF_UNLOCK(sc_if
);
1500 ether_ifattach(ifp
, eaddr
);
1504 * The hardware should be ready for VLAN_MTU by default:
1505 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1506 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1509 ifp
->if_capabilities
|= IFCAP_VLAN_MTU
;
1510 ifp
->if_capenable
|= IFCAP_VLAN_MTU
;
1512 * Tell the upper layer(s) we support long frames.
1513 * Must appear after the call to ether_ifattach() because
1514 * ether_ifattach() sets ifi_hdrlen to the default value.
1516 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1522 switch (sc
->sk_type
) {
1524 sk_init_xmac(sc_if
);
1525 if (sc_if
->sk_phytype
== SK_PHYTYPE_XMAC
)
1531 sk_init_yukon(sc_if
);
1536 SK_IF_UNLOCK(sc_if
);
1537 error
= mii_attach(dev
, &sc_if
->sk_miibus
, ifp
, sk_ifmedia_upd
,
1538 sk_ifmedia_sts
, BMSR_DEFCAPMASK
, phy
, MII_OFFSET_ANY
, 0);
1540 device_printf(sc_if
->sk_if_dev
, "attaching PHYs failed\n");
1541 ether_ifdetach(ifp
);
1547 /* Access should be ok even though lock has been dropped */
1548 sc
->sk_if
[port
] = NULL
;
1556 * Attach the interface. Allocate softc structures, do ifmedia
1557 * setup and ethernet/BPF attach.
1563 struct sk_softc
*sc
;
1564 int error
= 0, *port
;
1566 const char *pname
= NULL
;
1569 sc
= device_get_softc(dev
);
1572 mtx_init(&sc
->sk_mtx
, device_get_nameunit(dev
), MTX_NETWORK_LOCK
,
1574 mtx_init(&sc
->sk_mii_mtx
, "sk_mii_mutex", NULL
, MTX_DEF
);
1576 * Map control/status registers.
1578 pci_enable_busmaster(dev
);
1580 /* Allocate resources */
1581 #ifdef SK_USEIOSPACE
1582 sc
->sk_res_spec
= sk_res_spec_io
;
1584 sc
->sk_res_spec
= sk_res_spec_mem
;
1586 error
= bus_alloc_resources(dev
, sc
->sk_res_spec
, sc
->sk_res
);
1588 if (sc
->sk_res_spec
== sk_res_spec_mem
)
1589 sc
->sk_res_spec
= sk_res_spec_io
;
1591 sc
->sk_res_spec
= sk_res_spec_mem
;
1592 error
= bus_alloc_resources(dev
, sc
->sk_res_spec
, sc
->sk_res
);
1594 device_printf(dev
, "couldn't allocate %s resources\n",
1595 sc
->sk_res_spec
== sk_res_spec_mem
? "memory" :
1601 sc
->sk_type
= sk_win_read_1(sc
, SK_CHIPVER
);
1602 sc
->sk_rev
= (sk_win_read_1(sc
, SK_CONFIG
) >> 4) & 0xf;
1604 /* Bail out if chip is not recognized. */
1605 if (sc
->sk_type
!= SK_GENESIS
&& !SK_YUKON_FAMILY(sc
->sk_type
)) {
1606 device_printf(dev
, "unknown device: chipver=%02x, rev=%x\n",
1607 sc
->sk_type
, sc
->sk_rev
);
1612 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev
),
1613 SYSCTL_CHILDREN(device_get_sysctl_tree(dev
)),
1614 OID_AUTO
, "int_mod", CTLTYPE_INT
|CTLFLAG_RW
,
1615 &sc
->sk_int_mod
, 0, sysctl_hw_sk_int_mod
, "I",
1616 "SK interrupt moderation");
1618 /* Pull in device tunables. */
1619 sc
->sk_int_mod
= SK_IM_DEFAULT
;
1620 error
= resource_int_value(device_get_name(dev
), device_get_unit(dev
),
1621 "int_mod", &sc
->sk_int_mod
);
1623 if (sc
->sk_int_mod
< SK_IM_MIN
||
1624 sc
->sk_int_mod
> SK_IM_MAX
) {
1625 device_printf(dev
, "int_mod value out of range; "
1626 "using default: %d\n", SK_IM_DEFAULT
);
1627 sc
->sk_int_mod
= SK_IM_DEFAULT
;
1631 /* Reset the adapter. */
1634 skrs
= sk_win_read_1(sc
, SK_EPROM0
);
1635 if (sc
->sk_type
== SK_GENESIS
) {
1636 /* Read and save RAM size and RAMbuffer offset */
1638 case SK_RAMSIZE_512K_64
:
1639 sc
->sk_ramsize
= 0x80000;
1640 sc
->sk_rboff
= SK_RBOFF_0
;
1642 case SK_RAMSIZE_1024K_64
:
1643 sc
->sk_ramsize
= 0x100000;
1644 sc
->sk_rboff
= SK_RBOFF_80000
;
1646 case SK_RAMSIZE_1024K_128
:
1647 sc
->sk_ramsize
= 0x100000;
1648 sc
->sk_rboff
= SK_RBOFF_0
;
1650 case SK_RAMSIZE_2048K_128
:
1651 sc
->sk_ramsize
= 0x200000;
1652 sc
->sk_rboff
= SK_RBOFF_0
;
1655 device_printf(dev
, "unknown ram size: %d\n", skrs
);
1659 } else { /* SK_YUKON_FAMILY */
1661 sc
->sk_ramsize
= 0x20000;
1663 sc
->sk_ramsize
= skrs
* (1<<12);
1664 sc
->sk_rboff
= SK_RBOFF_0
;
1667 /* Read and save physical media type */
1668 sc
->sk_pmd
= sk_win_read_1(sc
, SK_PMDTYPE
);
1670 if (sc
->sk_pmd
== 'T' || sc
->sk_pmd
== '1')
1671 sc
->sk_coppertype
= 1;
1673 sc
->sk_coppertype
= 0;
1675 /* Determine whether to name it with VPD PN or just make it up.
1676 * Marvell Yukon VPD PN seems to freqently be bogus. */
1677 switch (pci_get_device(dev
)) {
1678 case DEVICEID_SK_V1
:
1679 case DEVICEID_BELKIN_5005
:
1680 case DEVICEID_3COM_3C940
:
1681 case DEVICEID_LINKSYS_EG1032
:
1682 case DEVICEID_DLINK_DGE530T_A1
:
1683 case DEVICEID_DLINK_DGE530T_B1
:
1684 /* Stay with VPD PN. */
1685 (void) pci_get_vpd_ident(dev
, &pname
);
1687 case DEVICEID_SK_V2
:
1688 /* YUKON VPD PN might bear no resemblance to reality. */
1689 switch (sc
->sk_type
) {
1691 /* Stay with VPD PN. */
1692 (void) pci_get_vpd_ident(dev
, &pname
);
1695 pname
= "Marvell Yukon Gigabit Ethernet";
1698 pname
= "Marvell Yukon Lite Gigabit Ethernet";
1701 pname
= "Marvell Yukon LP Gigabit Ethernet";
1704 pname
= "Marvell Yukon (Unknown) Gigabit Ethernet";
1708 /* Yukon Lite Rev. A0 needs special test. */
1709 if (sc
->sk_type
== SK_YUKON
|| sc
->sk_type
== SK_YUKON_LP
) {
1713 /* Save flash address register before testing. */
1714 far
= sk_win_read_4(sc
, SK_EP_ADDR
);
1716 sk_win_write_1(sc
, SK_EP_ADDR
+0x03, 0xff);
1717 testbyte
= sk_win_read_1(sc
, SK_EP_ADDR
+0x03);
1719 if (testbyte
!= 0x00) {
1720 /* Yukon Lite Rev. A0 detected. */
1721 sc
->sk_type
= SK_YUKON_LITE
;
1722 sc
->sk_rev
= SK_YUKON_LITE_REV_A0
;
1723 /* Restore flash address register. */
1724 sk_win_write_4(sc
, SK_EP_ADDR
, far
);
1729 device_printf(dev
, "unknown device: vendor=%04x, device=%04x, "
1730 "chipver=%02x, rev=%x\n",
1731 pci_get_vendor(dev
), pci_get_device(dev
),
1732 sc
->sk_type
, sc
->sk_rev
);
1737 if (sc
->sk_type
== SK_YUKON_LITE
) {
1738 switch (sc
->sk_rev
) {
1739 case SK_YUKON_LITE_REV_A0
:
1742 case SK_YUKON_LITE_REV_A1
:
1745 case SK_YUKON_LITE_REV_A3
:
1756 /* Announce the product name and more VPD data if there. */
1758 device_printf(dev
, "%s rev. %s(0x%x)\n",
1759 pname
, revstr
, sc
->sk_rev
);
1762 device_printf(dev
, "chip ver = 0x%02x\n", sc
->sk_type
);
1763 device_printf(dev
, "chip rev = 0x%02x\n", sc
->sk_rev
);
1764 device_printf(dev
, "SK_EPROM0 = 0x%02x\n", skrs
);
1765 device_printf(dev
, "SRAM size = 0x%06x\n", sc
->sk_ramsize
);
1768 sc
->sk_devs
[SK_PORT_A
] = device_add_child(dev
, "sk", -1);
1769 if (sc
->sk_devs
[SK_PORT_A
] == NULL
) {
1770 device_printf(dev
, "failed to add child for PORT_A\n");
1774 port
= malloc(sizeof(int), M_DEVBUF
, M_NOWAIT
);
1776 device_printf(dev
, "failed to allocate memory for "
1777 "ivars of PORT_A\n");
1782 device_set_ivars(sc
->sk_devs
[SK_PORT_A
], port
);
1784 if (!(sk_win_read_1(sc
, SK_CONFIG
) & SK_CONFIG_SINGLEMAC
)) {
1785 sc
->sk_devs
[SK_PORT_B
] = device_add_child(dev
, "sk", -1);
1786 if (sc
->sk_devs
[SK_PORT_B
] == NULL
) {
1787 device_printf(dev
, "failed to add child for PORT_B\n");
1791 port
= malloc(sizeof(int), M_DEVBUF
, M_NOWAIT
);
1793 device_printf(dev
, "failed to allocate memory for "
1794 "ivars of PORT_B\n");
1799 device_set_ivars(sc
->sk_devs
[SK_PORT_B
], port
);
1802 /* Turn on the 'driver is loaded' LED. */
1803 CSR_WRITE_2(sc
, SK_LED
, SK_LED_GREEN_ON
);
1805 error
= bus_generic_attach(dev
);
1807 device_printf(dev
, "failed to attach port(s)\n");
1811 /* Hook interrupt last to avoid having to lock softc */
1812 error
= bus_setup_intr(dev
, sc
->sk_res
[1], INTR_TYPE_NET
|INTR_MPSAFE
,
1813 NULL
, sk_intr
, sc
, &sc
->sk_intrhand
);
1816 device_printf(dev
, "couldn't set up irq\n");
1828 * Shutdown hardware and free up resources. This can be called any
1829 * time after the mutex has been initialized. It is called in both
1830 * the error case in attach and the normal detach case so it needs
1831 * to be careful about only freeing resources that have actually been
1838 struct sk_if_softc
*sc_if
;
1841 sc_if
= device_get_softc(dev
);
1842 KASSERT(mtx_initialized(&sc_if
->sk_softc
->sk_mtx
),
1843 ("sk mutex not initialized in sk_detach"));
1846 ifp
= sc_if
->sk_ifp
;
1847 /* These should only be active if attach_xmac succeeded */
1848 if (device_is_attached(dev
)) {
1850 /* Can't hold locks while calling detach */
1851 SK_IF_UNLOCK(sc_if
);
1852 callout_drain(&sc_if
->sk_tick_ch
);
1853 callout_drain(&sc_if
->sk_watchdog_ch
);
1854 ether_ifdetach(ifp
);
1860 * We're generally called from skc_detach() which is using
1861 * device_delete_child() to get to here. It's already trashed
1862 * miibus for us, so don't do it here or we'll panic.
1865 if (sc_if->sk_miibus != NULL)
1866 device_delete_child(dev, sc_if->sk_miibus);
1868 bus_generic_detach(dev
);
1869 sk_dma_jumbo_free(sc_if
);
1871 SK_IF_UNLOCK(sc_if
);
1880 struct sk_softc
*sc
;
1882 sc
= device_get_softc(dev
);
1883 KASSERT(mtx_initialized(&sc
->sk_mtx
), ("sk mutex not initialized"));
1885 if (device_is_alive(dev
)) {
1886 if (sc
->sk_devs
[SK_PORT_A
] != NULL
) {
1887 free(device_get_ivars(sc
->sk_devs
[SK_PORT_A
]), M_DEVBUF
);
1888 device_delete_child(dev
, sc
->sk_devs
[SK_PORT_A
]);
1890 if (sc
->sk_devs
[SK_PORT_B
] != NULL
) {
1891 free(device_get_ivars(sc
->sk_devs
[SK_PORT_B
]), M_DEVBUF
);
1892 device_delete_child(dev
, sc
->sk_devs
[SK_PORT_B
]);
1894 bus_generic_detach(dev
);
1897 if (sc
->sk_intrhand
)
1898 bus_teardown_intr(dev
, sc
->sk_res
[1], sc
->sk_intrhand
);
1899 bus_release_resources(dev
, sc
->sk_res_spec
, sc
->sk_res
);
1901 mtx_destroy(&sc
->sk_mii_mtx
);
1902 mtx_destroy(&sc
->sk_mtx
);
1907 static bus_dma_tag_t
1908 skc_get_dma_tag(device_t bus
, device_t child __unused
)
1911 return (bus_get_dma_tag(bus
));
1914 struct sk_dmamap_arg
{
1915 bus_addr_t sk_busaddr
;
1919 sk_dmamap_cb(arg
, segs
, nseg
, error
)
1921 bus_dma_segment_t
*segs
;
1925 struct sk_dmamap_arg
*ctx
;
1931 ctx
->sk_busaddr
= segs
[0].ds_addr
;
1935 * Allocate jumbo buffer storage. The SysKonnect adapters support
1936 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1937 * use them in their drivers. In order for us to use them, we need
1938 * large 9K receive buffers, however standard mbuf clusters are only
1939 * 2048 bytes in size. Consequently, we need to allocate and manage
1940 * our own jumbo buffer pool. Fortunately, this does not require an
1941 * excessive amount of additional code.
1945 struct sk_if_softc
*sc_if
;
1947 struct sk_dmamap_arg ctx
;
1948 struct sk_txdesc
*txd
;
1949 struct sk_rxdesc
*rxd
;
1952 /* create parent tag */
1955 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1956 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1957 * However bz@ reported that it does not work on amd64 with > 4GB
1958 * RAM. Until we have more clues of the breakage, disable DAC mode
1959 * by limiting DMA address to be in 32bit address space.
1961 error
= bus_dma_tag_create(
1962 bus_get_dma_tag(sc_if
->sk_if_dev
),/* parent */
1963 1, 0, /* algnmnt, boundary */
1964 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1965 BUS_SPACE_MAXADDR
, /* highaddr */
1966 NULL
, NULL
, /* filter, filterarg */
1967 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1969 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1971 NULL
, NULL
, /* lockfunc, lockarg */
1972 &sc_if
->sk_cdata
.sk_parent_tag
);
1974 device_printf(sc_if
->sk_if_dev
,
1975 "failed to create parent DMA tag\n");
1979 /* create tag for Tx ring */
1980 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
1981 SK_RING_ALIGN
, 0, /* algnmnt, boundary */
1982 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1983 BUS_SPACE_MAXADDR
, /* highaddr */
1984 NULL
, NULL
, /* filter, filterarg */
1985 SK_TX_RING_SZ
, /* maxsize */
1987 SK_TX_RING_SZ
, /* maxsegsize */
1989 NULL
, NULL
, /* lockfunc, lockarg */
1990 &sc_if
->sk_cdata
.sk_tx_ring_tag
);
1992 device_printf(sc_if
->sk_if_dev
,
1993 "failed to allocate Tx ring DMA tag\n");
1997 /* create tag for Rx ring */
1998 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
1999 SK_RING_ALIGN
, 0, /* algnmnt, boundary */
2000 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
2001 BUS_SPACE_MAXADDR
, /* highaddr */
2002 NULL
, NULL
, /* filter, filterarg */
2003 SK_RX_RING_SZ
, /* maxsize */
2005 SK_RX_RING_SZ
, /* maxsegsize */
2007 NULL
, NULL
, /* lockfunc, lockarg */
2008 &sc_if
->sk_cdata
.sk_rx_ring_tag
);
2010 device_printf(sc_if
->sk_if_dev
,
2011 "failed to allocate Rx ring DMA tag\n");
2015 /* create tag for Tx buffers */
2016 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
2017 1, 0, /* algnmnt, boundary */
2018 BUS_SPACE_MAXADDR
, /* lowaddr */
2019 BUS_SPACE_MAXADDR
, /* highaddr */
2020 NULL
, NULL
, /* filter, filterarg */
2021 MCLBYTES
* SK_MAXTXSEGS
, /* maxsize */
2022 SK_MAXTXSEGS
, /* nsegments */
2023 MCLBYTES
, /* maxsegsize */
2025 NULL
, NULL
, /* lockfunc, lockarg */
2026 &sc_if
->sk_cdata
.sk_tx_tag
);
2028 device_printf(sc_if
->sk_if_dev
,
2029 "failed to allocate Tx DMA tag\n");
2033 /* create tag for Rx buffers */
2034 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
2035 1, 0, /* algnmnt, boundary */
2036 BUS_SPACE_MAXADDR
, /* lowaddr */
2037 BUS_SPACE_MAXADDR
, /* highaddr */
2038 NULL
, NULL
, /* filter, filterarg */
2039 MCLBYTES
, /* maxsize */
2041 MCLBYTES
, /* maxsegsize */
2043 NULL
, NULL
, /* lockfunc, lockarg */
2044 &sc_if
->sk_cdata
.sk_rx_tag
);
2046 device_printf(sc_if
->sk_if_dev
,
2047 "failed to allocate Rx DMA tag\n");
2051 /* allocate DMA'able memory and load the DMA map for Tx ring */
2052 error
= bus_dmamem_alloc(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2053 (void **)&sc_if
->sk_rdata
.sk_tx_ring
, BUS_DMA_NOWAIT
|
2054 BUS_DMA_COHERENT
| BUS_DMA_ZERO
, &sc_if
->sk_cdata
.sk_tx_ring_map
);
2056 device_printf(sc_if
->sk_if_dev
,
2057 "failed to allocate DMA'able memory for Tx ring\n");
2062 error
= bus_dmamap_load(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2063 sc_if
->sk_cdata
.sk_tx_ring_map
, sc_if
->sk_rdata
.sk_tx_ring
,
2064 SK_TX_RING_SZ
, sk_dmamap_cb
, &ctx
, BUS_DMA_NOWAIT
);
2066 device_printf(sc_if
->sk_if_dev
,
2067 "failed to load DMA'able memory for Tx ring\n");
2070 sc_if
->sk_rdata
.sk_tx_ring_paddr
= ctx
.sk_busaddr
;
2072 /* allocate DMA'able memory and load the DMA map for Rx ring */
2073 error
= bus_dmamem_alloc(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2074 (void **)&sc_if
->sk_rdata
.sk_rx_ring
, BUS_DMA_NOWAIT
|
2075 BUS_DMA_COHERENT
| BUS_DMA_ZERO
, &sc_if
->sk_cdata
.sk_rx_ring_map
);
2077 device_printf(sc_if
->sk_if_dev
,
2078 "failed to allocate DMA'able memory for Rx ring\n");
2083 error
= bus_dmamap_load(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2084 sc_if
->sk_cdata
.sk_rx_ring_map
, sc_if
->sk_rdata
.sk_rx_ring
,
2085 SK_RX_RING_SZ
, sk_dmamap_cb
, &ctx
, BUS_DMA_NOWAIT
);
2087 device_printf(sc_if
->sk_if_dev
,
2088 "failed to load DMA'able memory for Rx ring\n");
2091 sc_if
->sk_rdata
.sk_rx_ring_paddr
= ctx
.sk_busaddr
;
2093 /* create DMA maps for Tx buffers */
2094 for (i
= 0; i
< SK_TX_RING_CNT
; i
++) {
2095 txd
= &sc_if
->sk_cdata
.sk_txdesc
[i
];
2097 txd
->tx_dmamap
= NULL
;
2098 error
= bus_dmamap_create(sc_if
->sk_cdata
.sk_tx_tag
, 0,
2101 device_printf(sc_if
->sk_if_dev
,
2102 "failed to create Tx dmamap\n");
2107 /* create DMA maps for Rx buffers */
2108 if ((error
= bus_dmamap_create(sc_if
->sk_cdata
.sk_rx_tag
, 0,
2109 &sc_if
->sk_cdata
.sk_rx_sparemap
)) != 0) {
2110 device_printf(sc_if
->sk_if_dev
,
2111 "failed to create spare Rx dmamap\n");
2114 for (i
= 0; i
< SK_RX_RING_CNT
; i
++) {
2115 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[i
];
2117 rxd
->rx_dmamap
= NULL
;
2118 error
= bus_dmamap_create(sc_if
->sk_cdata
.sk_rx_tag
, 0,
2121 device_printf(sc_if
->sk_if_dev
,
2122 "failed to create Rx dmamap\n");
2132 sk_dma_jumbo_alloc(sc_if
)
2133 struct sk_if_softc
*sc_if
;
2135 struct sk_dmamap_arg ctx
;
2136 struct sk_rxdesc
*jrxd
;
2139 if (jumbo_disable
!= 0) {
2140 device_printf(sc_if
->sk_if_dev
, "disabling jumbo frame support\n");
2141 sc_if
->sk_jumbo_disable
= 1;
2144 /* create tag for jumbo Rx ring */
2145 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
2146 SK_RING_ALIGN
, 0, /* algnmnt, boundary */
2147 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
2148 BUS_SPACE_MAXADDR
, /* highaddr */
2149 NULL
, NULL
, /* filter, filterarg */
2150 SK_JUMBO_RX_RING_SZ
, /* maxsize */
2152 SK_JUMBO_RX_RING_SZ
, /* maxsegsize */
2154 NULL
, NULL
, /* lockfunc, lockarg */
2155 &sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
);
2157 device_printf(sc_if
->sk_if_dev
,
2158 "failed to allocate jumbo Rx ring DMA tag\n");
2162 /* create tag for jumbo Rx buffers */
2163 error
= bus_dma_tag_create(sc_if
->sk_cdata
.sk_parent_tag
,/* parent */
2164 1, 0, /* algnmnt, boundary */
2165 BUS_SPACE_MAXADDR
, /* lowaddr */
2166 BUS_SPACE_MAXADDR
, /* highaddr */
2167 NULL
, NULL
, /* filter, filterarg */
2168 MJUM9BYTES
, /* maxsize */
2170 MJUM9BYTES
, /* maxsegsize */
2172 NULL
, NULL
, /* lockfunc, lockarg */
2173 &sc_if
->sk_cdata
.sk_jumbo_rx_tag
);
2175 device_printf(sc_if
->sk_if_dev
,
2176 "failed to allocate jumbo Rx DMA tag\n");
2180 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2181 error
= bus_dmamem_alloc(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2182 (void **)&sc_if
->sk_rdata
.sk_jumbo_rx_ring
, BUS_DMA_NOWAIT
|
2183 BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
2184 &sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
);
2186 device_printf(sc_if
->sk_if_dev
,
2187 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2192 error
= bus_dmamap_load(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2193 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
,
2194 sc_if
->sk_rdata
.sk_jumbo_rx_ring
, SK_JUMBO_RX_RING_SZ
, sk_dmamap_cb
,
2195 &ctx
, BUS_DMA_NOWAIT
);
2197 device_printf(sc_if
->sk_if_dev
,
2198 "failed to load DMA'able memory for jumbo Rx ring\n");
2201 sc_if
->sk_rdata
.sk_jumbo_rx_ring_paddr
= ctx
.sk_busaddr
;
2203 /* create DMA maps for jumbo Rx buffers */
2204 if ((error
= bus_dmamap_create(sc_if
->sk_cdata
.sk_jumbo_rx_tag
, 0,
2205 &sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
)) != 0) {
2206 device_printf(sc_if
->sk_if_dev
,
2207 "failed to create spare jumbo Rx dmamap\n");
2210 for (i
= 0; i
< SK_JUMBO_RX_RING_CNT
; i
++) {
2211 jrxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[i
];
2213 jrxd
->rx_dmamap
= NULL
;
2214 error
= bus_dmamap_create(sc_if
->sk_cdata
.sk_jumbo_rx_tag
, 0,
2217 device_printf(sc_if
->sk_if_dev
,
2218 "failed to create jumbo Rx dmamap\n");
2226 sk_dma_jumbo_free(sc_if
);
2227 device_printf(sc_if
->sk_if_dev
, "disabling jumbo frame support due to "
2228 "resource shortage\n");
2229 sc_if
->sk_jumbo_disable
= 1;
2235 struct sk_if_softc
*sc_if
;
2237 struct sk_txdesc
*txd
;
2238 struct sk_rxdesc
*rxd
;
2242 if (sc_if
->sk_cdata
.sk_tx_ring_tag
) {
2243 if (sc_if
->sk_cdata
.sk_tx_ring_map
)
2244 bus_dmamap_unload(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2245 sc_if
->sk_cdata
.sk_tx_ring_map
);
2246 if (sc_if
->sk_cdata
.sk_tx_ring_map
&&
2247 sc_if
->sk_rdata
.sk_tx_ring
)
2248 bus_dmamem_free(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2249 sc_if
->sk_rdata
.sk_tx_ring
,
2250 sc_if
->sk_cdata
.sk_tx_ring_map
);
2251 sc_if
->sk_rdata
.sk_tx_ring
= NULL
;
2252 sc_if
->sk_cdata
.sk_tx_ring_map
= NULL
;
2253 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_tx_ring_tag
);
2254 sc_if
->sk_cdata
.sk_tx_ring_tag
= NULL
;
2257 if (sc_if
->sk_cdata
.sk_rx_ring_tag
) {
2258 if (sc_if
->sk_cdata
.sk_rx_ring_map
)
2259 bus_dmamap_unload(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2260 sc_if
->sk_cdata
.sk_rx_ring_map
);
2261 if (sc_if
->sk_cdata
.sk_rx_ring_map
&&
2262 sc_if
->sk_rdata
.sk_rx_ring
)
2263 bus_dmamem_free(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2264 sc_if
->sk_rdata
.sk_rx_ring
,
2265 sc_if
->sk_cdata
.sk_rx_ring_map
);
2266 sc_if
->sk_rdata
.sk_rx_ring
= NULL
;
2267 sc_if
->sk_cdata
.sk_rx_ring_map
= NULL
;
2268 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_rx_ring_tag
);
2269 sc_if
->sk_cdata
.sk_rx_ring_tag
= NULL
;
2272 if (sc_if
->sk_cdata
.sk_tx_tag
) {
2273 for (i
= 0; i
< SK_TX_RING_CNT
; i
++) {
2274 txd
= &sc_if
->sk_cdata
.sk_txdesc
[i
];
2275 if (txd
->tx_dmamap
) {
2276 bus_dmamap_destroy(sc_if
->sk_cdata
.sk_tx_tag
,
2278 txd
->tx_dmamap
= NULL
;
2281 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_tx_tag
);
2282 sc_if
->sk_cdata
.sk_tx_tag
= NULL
;
2285 if (sc_if
->sk_cdata
.sk_rx_tag
) {
2286 for (i
= 0; i
< SK_RX_RING_CNT
; i
++) {
2287 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[i
];
2288 if (rxd
->rx_dmamap
) {
2289 bus_dmamap_destroy(sc_if
->sk_cdata
.sk_rx_tag
,
2291 rxd
->rx_dmamap
= NULL
;
2294 if (sc_if
->sk_cdata
.sk_rx_sparemap
) {
2295 bus_dmamap_destroy(sc_if
->sk_cdata
.sk_rx_tag
,
2296 sc_if
->sk_cdata
.sk_rx_sparemap
);
2297 sc_if
->sk_cdata
.sk_rx_sparemap
= NULL
;
2299 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_rx_tag
);
2300 sc_if
->sk_cdata
.sk_rx_tag
= NULL
;
2303 if (sc_if
->sk_cdata
.sk_parent_tag
) {
2304 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_parent_tag
);
2305 sc_if
->sk_cdata
.sk_parent_tag
= NULL
;
2310 sk_dma_jumbo_free(sc_if
)
2311 struct sk_if_softc
*sc_if
;
2313 struct sk_rxdesc
*jrxd
;
2317 if (sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
) {
2318 if (sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
)
2319 bus_dmamap_unload(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2320 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
);
2321 if (sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
&&
2322 sc_if
->sk_rdata
.sk_jumbo_rx_ring
)
2323 bus_dmamem_free(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2324 sc_if
->sk_rdata
.sk_jumbo_rx_ring
,
2325 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
);
2326 sc_if
->sk_rdata
.sk_jumbo_rx_ring
= NULL
;
2327 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
= NULL
;
2328 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
);
2329 sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
= NULL
;
2332 /* jumbo Rx buffers */
2333 if (sc_if
->sk_cdata
.sk_jumbo_rx_tag
) {
2334 for (i
= 0; i
< SK_JUMBO_RX_RING_CNT
; i
++) {
2335 jrxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[i
];
2336 if (jrxd
->rx_dmamap
) {
2338 sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
2340 jrxd
->rx_dmamap
= NULL
;
2343 if (sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
) {
2344 bus_dmamap_destroy(sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
2345 sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
);
2346 sc_if
->sk_cdata
.sk_jumbo_rx_sparemap
= NULL
;
2348 bus_dma_tag_destroy(sc_if
->sk_cdata
.sk_jumbo_rx_tag
);
2349 sc_if
->sk_cdata
.sk_jumbo_rx_tag
= NULL
;
2354 sk_txcksum(ifp
, m
, f
)
2357 struct sk_tx_desc
*f
;
2363 offset
= sizeof(struct ip
) + ETHER_HDR_LEN
;
2364 for(; m
&& m
->m_len
== 0; m
= m
->m_next
)
2366 if (m
== NULL
|| m
->m_len
< ETHER_HDR_LEN
) {
2367 if_printf(ifp
, "%s: m_len < ETHER_HDR_LEN\n", __func__
);
2368 /* checksum may be corrupted */
2371 if (m
->m_len
< ETHER_HDR_LEN
+ sizeof(u_int32_t
)) {
2372 if (m
->m_len
!= ETHER_HDR_LEN
) {
2373 if_printf(ifp
, "%s: m_len != ETHER_HDR_LEN\n",
2375 /* checksum may be corrupted */
2378 for(m
= m
->m_next
; m
&& m
->m_len
== 0; m
= m
->m_next
)
2381 offset
= sizeof(struct ip
) + ETHER_HDR_LEN
;
2382 /* checksum may be corrupted */
2385 ip
= mtod(m
, struct ip
*);
2387 p
= mtod(m
, u_int8_t
*);
2389 ip
= (struct ip
*)p
;
2391 offset
= (ip
->ip_hl
<< 2) + ETHER_HDR_LEN
;
2394 f
->sk_csum_startval
= 0;
2395 f
->sk_csum_start
= htole32(((offset
+ m
->m_pkthdr
.csum_data
) & 0xffff) |
2400 sk_encap(sc_if
, m_head
)
2401 struct sk_if_softc
*sc_if
;
2402 struct mbuf
**m_head
;
2404 struct sk_txdesc
*txd
;
2405 struct sk_tx_desc
*f
= NULL
;
2407 bus_dma_segment_t txsegs
[SK_MAXTXSEGS
];
2408 u_int32_t cflags
, frag
, si
, sk_ctl
;
2411 SK_IF_LOCK_ASSERT(sc_if
);
2413 if ((txd
= STAILQ_FIRST(&sc_if
->sk_cdata
.sk_txfreeq
)) == NULL
)
2416 error
= bus_dmamap_load_mbuf_sg(sc_if
->sk_cdata
.sk_tx_tag
,
2417 txd
->tx_dmamap
, *m_head
, txsegs
, &nseg
, 0);
2418 if (error
== EFBIG
) {
2419 m
= m_defrag(*m_head
, M_NOWAIT
);
2426 error
= bus_dmamap_load_mbuf_sg(sc_if
->sk_cdata
.sk_tx_tag
,
2427 txd
->tx_dmamap
, *m_head
, txsegs
, &nseg
, 0);
2433 } else if (error
!= 0)
2440 if (sc_if
->sk_cdata
.sk_tx_cnt
+ nseg
>= SK_TX_RING_CNT
) {
2441 bus_dmamap_unload(sc_if
->sk_cdata
.sk_tx_tag
, txd
->tx_dmamap
);
2446 if ((m
->m_pkthdr
.csum_flags
& sc_if
->sk_ifp
->if_hwassist
) != 0)
2447 cflags
= SK_OPCODE_CSUM
;
2449 cflags
= SK_OPCODE_DEFAULT
;
2450 si
= frag
= sc_if
->sk_cdata
.sk_tx_prod
;
2451 for (i
= 0; i
< nseg
; i
++) {
2452 f
= &sc_if
->sk_rdata
.sk_tx_ring
[frag
];
2453 f
->sk_data_lo
= htole32(SK_ADDR_LO(txsegs
[i
].ds_addr
));
2454 f
->sk_data_hi
= htole32(SK_ADDR_HI(txsegs
[i
].ds_addr
));
2455 sk_ctl
= txsegs
[i
].ds_len
| cflags
;
2457 if (cflags
== SK_OPCODE_CSUM
)
2458 sk_txcksum(sc_if
->sk_ifp
, m
, f
);
2459 sk_ctl
|= SK_TXCTL_FIRSTFRAG
;
2461 sk_ctl
|= SK_TXCTL_OWN
;
2462 f
->sk_ctl
= htole32(sk_ctl
);
2463 sc_if
->sk_cdata
.sk_tx_cnt
++;
2464 SK_INC(frag
, SK_TX_RING_CNT
);
2466 sc_if
->sk_cdata
.sk_tx_prod
= frag
;
2468 /* set EOF on the last desciptor */
2469 frag
= (frag
+ SK_TX_RING_CNT
- 1) % SK_TX_RING_CNT
;
2470 f
= &sc_if
->sk_rdata
.sk_tx_ring
[frag
];
2471 f
->sk_ctl
|= htole32(SK_TXCTL_LASTFRAG
| SK_TXCTL_EOF_INTR
);
2473 /* turn the first descriptor ownership to NIC */
2474 f
= &sc_if
->sk_rdata
.sk_tx_ring
[si
];
2475 f
->sk_ctl
|= htole32(SK_TXCTL_OWN
);
2477 STAILQ_REMOVE_HEAD(&sc_if
->sk_cdata
.sk_txfreeq
, tx_q
);
2478 STAILQ_INSERT_TAIL(&sc_if
->sk_cdata
.sk_txbusyq
, txd
, tx_q
);
2481 /* sync descriptors */
2482 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_tag
, txd
->tx_dmamap
,
2483 BUS_DMASYNC_PREWRITE
);
2484 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2485 sc_if
->sk_cdata
.sk_tx_ring_map
,
2486 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2495 struct sk_if_softc
*sc_if
;
2497 sc_if
= ifp
->if_softc
;
2500 sk_start_locked(ifp
);
2501 SK_IF_UNLOCK(sc_if
);
2507 sk_start_locked(ifp
)
2510 struct sk_softc
*sc
;
2511 struct sk_if_softc
*sc_if
;
2512 struct mbuf
*m_head
;
2515 sc_if
= ifp
->if_softc
;
2516 sc
= sc_if
->sk_softc
;
2518 SK_IF_LOCK_ASSERT(sc_if
);
2520 for (enq
= 0; !IFQ_DRV_IS_EMPTY(&ifp
->if_snd
) &&
2521 sc_if
->sk_cdata
.sk_tx_cnt
< SK_TX_RING_CNT
- 1; ) {
2522 IFQ_DRV_DEQUEUE(&ifp
->if_snd
, m_head
);
2527 * Pack the data into the transmit ring. If we
2528 * don't have room, set the OACTIVE flag and wait
2529 * for the NIC to drain the ring.
2531 if (sk_encap(sc_if
, &m_head
)) {
2534 IFQ_DRV_PREPEND(&ifp
->if_snd
, m_head
);
2535 ifp
->if_drv_flags
|= IFF_DRV_OACTIVE
;
2541 * If there's a BPF listener, bounce a copy of this frame
2544 BPF_MTAP(ifp
, m_head
);
2549 CSR_WRITE_4(sc
, sc_if
->sk_tx_bmu
, SK_TXBMU_TX_START
);
2551 /* Set a timeout in case the chip goes out to lunch. */
2552 sc_if
->sk_watchdog_timer
= 5;
2561 struct sk_if_softc
*sc_if
;
2565 sc_if
= ifp
->if_softc
;
2567 SK_IF_LOCK_ASSERT(sc_if
);
2569 if (sc_if
->sk_watchdog_timer
== 0 || --sc_if
->sk_watchdog_timer
)
2573 * Reclaim first as there is a possibility of losing Tx completion
2577 if (sc_if
->sk_cdata
.sk_tx_cnt
!= 0) {
2578 if_printf(sc_if
->sk_ifp
, "watchdog timeout\n");
2580 ifp
->if_drv_flags
&= ~IFF_DRV_RUNNING
;
2581 sk_init_locked(sc_if
);
2585 callout_reset(&sc_if
->sk_watchdog_ch
, hz
, sk_watchdog
, ifp
);
2594 struct sk_softc
*sc
;
2596 sc
= device_get_softc(dev
);
2599 /* Turn off the 'driver is loaded' LED. */
2600 CSR_WRITE_2(sc
, SK_LED
, SK_LED_GREEN_OFF
);
2603 * Reset the GEnesis controller. Doing this should also
2604 * assert the resets on the attached XMAC(s).
2616 struct sk_softc
*sc
;
2617 struct sk_if_softc
*sc_if0
, *sc_if1
;
2618 struct ifnet
*ifp0
= NULL
, *ifp1
= NULL
;
2620 sc
= device_get_softc(dev
);
2624 sc_if0
= sc
->sk_if
[SK_PORT_A
];
2625 sc_if1
= sc
->sk_if
[SK_PORT_B
];
2627 ifp0
= sc_if0
->sk_ifp
;
2629 ifp1
= sc_if1
->sk_ifp
;
2634 sc
->sk_suspended
= 1;
2645 struct sk_softc
*sc
;
2646 struct sk_if_softc
*sc_if0
, *sc_if1
;
2647 struct ifnet
*ifp0
= NULL
, *ifp1
= NULL
;
2649 sc
= device_get_softc(dev
);
2653 sc_if0
= sc
->sk_if
[SK_PORT_A
];
2654 sc_if1
= sc
->sk_if
[SK_PORT_B
];
2656 ifp0
= sc_if0
->sk_ifp
;
2658 ifp1
= sc_if1
->sk_ifp
;
2659 if (ifp0
!= NULL
&& ifp0
->if_flags
& IFF_UP
)
2660 sk_init_locked(sc_if0
);
2661 if (ifp1
!= NULL
&& ifp1
->if_flags
& IFF_UP
)
2662 sk_init_locked(sc_if1
);
2663 sc
->sk_suspended
= 0;
2671 * According to the data sheet from SK-NET GENESIS the hardware can compute
2672 * two Rx checksums at the same time(Each checksum start position is
2673 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2674 * does not work at least on my Yukon hardware. I tried every possible ways
2675 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2676 * checksum offload was disabled at the moment and only IP checksum offload
2678 * As nomral IP header size is 20 bytes I can't expect it would give an
2679 * increase in throughput. However it seems it doesn't hurt performance in
2680 * my testing. If there is a more detailed information for checksum secret
2681 * of the hardware in question please contact yongari@FreeBSD.org to add
2682 * TCP/UDP checksum offload support.
2684 static __inline
void
2685 sk_rxcksum(ifp
, m
, csum
)
2690 struct ether_header
*eh
;
2692 int32_t hlen
, len
, pktlen
;
2693 u_int16_t csum1
, csum2
, ipcsum
;
2695 pktlen
= m
->m_pkthdr
.len
;
2696 if (pktlen
< sizeof(struct ether_header
) + sizeof(struct ip
))
2698 eh
= mtod(m
, struct ether_header
*);
2699 if (eh
->ether_type
!= htons(ETHERTYPE_IP
))
2701 ip
= (struct ip
*)(eh
+ 1);
2702 if (ip
->ip_v
!= IPVERSION
)
2704 hlen
= ip
->ip_hl
<< 2;
2705 pktlen
-= sizeof(struct ether_header
);
2706 if (hlen
< sizeof(struct ip
))
2708 if (ntohs(ip
->ip_len
) < hlen
)
2710 if (ntohs(ip
->ip_len
) != pktlen
)
2713 csum1
= htons(csum
& 0xffff);
2714 csum2
= htons((csum
>> 16) & 0xffff);
2715 ipcsum
= in_addword(csum1
, ~csum2
& 0xffff);
2716 /* checksum fixup for IP options */
2717 len
= hlen
- sizeof(struct ip
);
2720 * If the second checksum value is correct we can compute IP
2721 * checksum with simple math. Unfortunately the second checksum
2722 * value is wrong so we can't verify the checksum from the
2723 * value(It seems there is some magic here to get correct
2724 * value). If the second checksum value is correct it also
2725 * means we can get TCP/UDP checksum) here. However, it still
2726 * needs pseudo header checksum calculation due to hardware
2731 m
->m_pkthdr
.csum_flags
= CSUM_IP_CHECKED
;
2732 if (ipcsum
== 0xffff)
2733 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2737 sk_rxvalid(sc
, stat
, len
)
2738 struct sk_softc
*sc
;
2739 u_int32_t stat
, len
;
2742 if (sc
->sk_type
== SK_GENESIS
) {
2743 if ((stat
& XM_RXSTAT_ERRFRAME
) == XM_RXSTAT_ERRFRAME
||
2744 XM_RXSTAT_BYTES(stat
) != len
)
2747 if ((stat
& (YU_RXSTAT_CRCERR
| YU_RXSTAT_LONGERR
|
2748 YU_RXSTAT_MIIERR
| YU_RXSTAT_BADFC
| YU_RXSTAT_GOODFC
|
2749 YU_RXSTAT_JABBER
)) != 0 ||
2750 (stat
& YU_RXSTAT_RXOK
) != YU_RXSTAT_RXOK
||
2751 YU_RXSTAT_BYTES(stat
) != len
)
2760 struct sk_if_softc
*sc_if
;
2762 struct sk_softc
*sc
;
2765 struct sk_rx_desc
*cur_rx
;
2766 struct sk_rxdesc
*rxd
;
2768 u_int32_t csum
, rxstat
, sk_ctl
;
2770 sc
= sc_if
->sk_softc
;
2771 ifp
= sc_if
->sk_ifp
;
2773 SK_IF_LOCK_ASSERT(sc_if
);
2775 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2776 sc_if
->sk_cdata
.sk_rx_ring_map
, BUS_DMASYNC_POSTREAD
);
2779 for (cons
= sc_if
->sk_cdata
.sk_rx_cons
; prog
< SK_RX_RING_CNT
;
2780 prog
++, SK_INC(cons
, SK_RX_RING_CNT
)) {
2781 cur_rx
= &sc_if
->sk_rdata
.sk_rx_ring
[cons
];
2782 sk_ctl
= le32toh(cur_rx
->sk_ctl
);
2783 if ((sk_ctl
& SK_RXCTL_OWN
) != 0)
2785 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[cons
];
2786 rxstat
= le32toh(cur_rx
->sk_xmac_rxstat
);
2788 if ((sk_ctl
& (SK_RXCTL_STATUS_VALID
| SK_RXCTL_FIRSTFRAG
|
2789 SK_RXCTL_LASTFRAG
)) != (SK_RXCTL_STATUS_VALID
|
2790 SK_RXCTL_FIRSTFRAG
| SK_RXCTL_LASTFRAG
) ||
2791 SK_RXBYTES(sk_ctl
) < SK_MIN_FRAMELEN
||
2792 SK_RXBYTES(sk_ctl
) > SK_MAX_FRAMELEN
||
2793 sk_rxvalid(sc
, rxstat
, SK_RXBYTES(sk_ctl
)) == 0) {
2795 sk_discard_rxbuf(sc_if
, cons
);
2800 csum
= le32toh(cur_rx
->sk_csum
);
2801 if (sk_newbuf(sc_if
, cons
) != 0) {
2803 /* reuse old buffer */
2804 sk_discard_rxbuf(sc_if
, cons
);
2807 m
->m_pkthdr
.rcvif
= ifp
;
2808 m
->m_pkthdr
.len
= m
->m_len
= SK_RXBYTES(sk_ctl
);
2810 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) != 0)
2811 sk_rxcksum(ifp
, m
, csum
);
2812 SK_IF_UNLOCK(sc_if
);
2813 (*ifp
->if_input
)(ifp
, m
);
2818 sc_if
->sk_cdata
.sk_rx_cons
= cons
;
2819 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_ring_tag
,
2820 sc_if
->sk_cdata
.sk_rx_ring_map
,
2821 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2826 sk_jumbo_rxeof(sc_if
)
2827 struct sk_if_softc
*sc_if
;
2829 struct sk_softc
*sc
;
2832 struct sk_rx_desc
*cur_rx
;
2833 struct sk_rxdesc
*jrxd
;
2835 u_int32_t csum
, rxstat
, sk_ctl
;
2837 sc
= sc_if
->sk_softc
;
2838 ifp
= sc_if
->sk_ifp
;
2840 SK_IF_LOCK_ASSERT(sc_if
);
2842 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2843 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
, BUS_DMASYNC_POSTREAD
);
2846 for (cons
= sc_if
->sk_cdata
.sk_jumbo_rx_cons
;
2847 prog
< SK_JUMBO_RX_RING_CNT
;
2848 prog
++, SK_INC(cons
, SK_JUMBO_RX_RING_CNT
)) {
2849 cur_rx
= &sc_if
->sk_rdata
.sk_jumbo_rx_ring
[cons
];
2850 sk_ctl
= le32toh(cur_rx
->sk_ctl
);
2851 if ((sk_ctl
& SK_RXCTL_OWN
) != 0)
2853 jrxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[cons
];
2854 rxstat
= le32toh(cur_rx
->sk_xmac_rxstat
);
2856 if ((sk_ctl
& (SK_RXCTL_STATUS_VALID
| SK_RXCTL_FIRSTFRAG
|
2857 SK_RXCTL_LASTFRAG
)) != (SK_RXCTL_STATUS_VALID
|
2858 SK_RXCTL_FIRSTFRAG
| SK_RXCTL_LASTFRAG
) ||
2859 SK_RXBYTES(sk_ctl
) < SK_MIN_FRAMELEN
||
2860 SK_RXBYTES(sk_ctl
) > SK_JUMBO_FRAMELEN
||
2861 sk_rxvalid(sc
, rxstat
, SK_RXBYTES(sk_ctl
)) == 0) {
2863 sk_discard_jumbo_rxbuf(sc_if
, cons
);
2868 csum
= le32toh(cur_rx
->sk_csum
);
2869 if (sk_jumbo_newbuf(sc_if
, cons
) != 0) {
2871 /* reuse old buffer */
2872 sk_discard_jumbo_rxbuf(sc_if
, cons
);
2875 m
->m_pkthdr
.rcvif
= ifp
;
2876 m
->m_pkthdr
.len
= m
->m_len
= SK_RXBYTES(sk_ctl
);
2878 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) != 0)
2879 sk_rxcksum(ifp
, m
, csum
);
2880 SK_IF_UNLOCK(sc_if
);
2881 (*ifp
->if_input
)(ifp
, m
);
2886 sc_if
->sk_cdata
.sk_jumbo_rx_cons
= cons
;
2887 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_ring_tag
,
2888 sc_if
->sk_cdata
.sk_jumbo_rx_ring_map
,
2889 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2895 struct sk_if_softc
*sc_if
;
2897 struct sk_txdesc
*txd
;
2898 struct sk_tx_desc
*cur_tx
;
2900 u_int32_t idx
, sk_ctl
;
2902 ifp
= sc_if
->sk_ifp
;
2904 txd
= STAILQ_FIRST(&sc_if
->sk_cdata
.sk_txbusyq
);
2907 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2908 sc_if
->sk_cdata
.sk_tx_ring_map
, BUS_DMASYNC_POSTREAD
);
2910 * Go through our tx ring and free mbufs for those
2911 * frames that have been sent.
2913 for (idx
= sc_if
->sk_cdata
.sk_tx_cons
;; SK_INC(idx
, SK_TX_RING_CNT
)) {
2914 if (sc_if
->sk_cdata
.sk_tx_cnt
<= 0)
2916 cur_tx
= &sc_if
->sk_rdata
.sk_tx_ring
[idx
];
2917 sk_ctl
= le32toh(cur_tx
->sk_ctl
);
2918 if (sk_ctl
& SK_TXCTL_OWN
)
2920 sc_if
->sk_cdata
.sk_tx_cnt
--;
2921 ifp
->if_drv_flags
&= ~IFF_DRV_OACTIVE
;
2922 if ((sk_ctl
& SK_TXCTL_LASTFRAG
) == 0)
2924 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_tag
, txd
->tx_dmamap
,
2925 BUS_DMASYNC_POSTWRITE
);
2926 bus_dmamap_unload(sc_if
->sk_cdata
.sk_tx_tag
, txd
->tx_dmamap
);
2931 STAILQ_REMOVE_HEAD(&sc_if
->sk_cdata
.sk_txbusyq
, tx_q
);
2932 STAILQ_INSERT_TAIL(&sc_if
->sk_cdata
.sk_txfreeq
, txd
, tx_q
);
2933 txd
= STAILQ_FIRST(&sc_if
->sk_cdata
.sk_txbusyq
);
2935 sc_if
->sk_cdata
.sk_tx_cons
= idx
;
2936 sc_if
->sk_watchdog_timer
= sc_if
->sk_cdata
.sk_tx_cnt
> 0 ? 5 : 0;
2938 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_ring_tag
,
2939 sc_if
->sk_cdata
.sk_tx_ring_map
,
2940 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2947 struct sk_if_softc
*sc_if
;
2948 struct mii_data
*mii
;
2953 ifp
= sc_if
->sk_ifp
;
2954 mii
= device_get_softc(sc_if
->sk_miibus
);
2956 if (!(ifp
->if_flags
& IFF_UP
))
2959 if (sc_if
->sk_phytype
== SK_PHYTYPE_BCOM
) {
2960 sk_intr_bcom(sc_if
);
2965 * According to SysKonnect, the correct way to verify that
2966 * the link has come back up is to poll bit 0 of the GPIO
2967 * register three times. This pin has the signal from the
2968 * link_sync pin connected to it; if we read the same link
2969 * state 3 times in a row, we know the link is up.
2971 for (i
= 0; i
< 3; i
++) {
2972 if (SK_XM_READ_2(sc_if
, XM_GPIO
) & XM_GPIO_GP0_SET
)
2977 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_tick
, sc_if
);
2981 /* Turn the GP0 interrupt back on. */
2982 SK_XM_CLRBIT_2(sc_if
, XM_IMR
, XM_IMR_GP0_SET
);
2983 SK_XM_READ_2(sc_if
, XM_ISR
);
2985 callout_stop(&sc_if
->sk_tick_ch
);
2989 sk_yukon_tick(xsc_if
)
2992 struct sk_if_softc
*sc_if
;
2993 struct mii_data
*mii
;
2996 mii
= device_get_softc(sc_if
->sk_miibus
);
2999 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_yukon_tick
, sc_if
);
3004 struct sk_if_softc
*sc_if
;
3006 struct mii_data
*mii
;
3009 mii
= device_get_softc(sc_if
->sk_miibus
);
3010 ifp
= sc_if
->sk_ifp
;
3012 SK_XM_CLRBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_TX_ENB
|XM_MMUCMD_RX_ENB
);
3015 * Read the PHY interrupt register to make sure
3016 * we clear any pending interrupts.
3018 status
= sk_xmac_miibus_readreg(sc_if
, SK_PHYADDR_BCOM
, BRGPHY_MII_ISR
);
3020 if (!(ifp
->if_drv_flags
& IFF_DRV_RUNNING
)) {
3021 sk_init_xmac(sc_if
);
3025 if (status
& (BRGPHY_ISR_LNK_CHG
|BRGPHY_ISR_AN_PR
)) {
3027 lstat
= sk_xmac_miibus_readreg(sc_if
, SK_PHYADDR_BCOM
,
3030 if (!(lstat
& BRGPHY_AUXSTS_LINK
) && sc_if
->sk_link
) {
3032 /* Turn off the link LED. */
3033 SK_IF_WRITE_1(sc_if
, 0,
3034 SK_LINKLED1_CTL
, SK_LINKLED_OFF
);
3036 } else if (status
& BRGPHY_ISR_LNK_CHG
) {
3037 sk_xmac_miibus_writereg(sc_if
, SK_PHYADDR_BCOM
,
3038 BRGPHY_MII_IMR
, 0xFF00);
3041 /* Turn on the link LED. */
3042 SK_IF_WRITE_1(sc_if
, 0, SK_LINKLED1_CTL
,
3043 SK_LINKLED_ON
|SK_LINKLED_LINKSYNC_OFF
|
3044 SK_LINKLED_BLINK_OFF
);
3047 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_tick
, sc_if
);
3051 SK_XM_SETBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_TX_ENB
|XM_MMUCMD_RX_ENB
);
3058 struct sk_if_softc
*sc_if
;
3060 struct sk_softc
*sc
;
3063 sc
= sc_if
->sk_softc
;
3064 status
= SK_XM_READ_2(sc_if
, XM_ISR
);
3067 * Link has gone down. Start MII tick timeout to
3068 * watch for link resync.
3070 if (sc_if
->sk_phytype
== SK_PHYTYPE_XMAC
) {
3071 if (status
& XM_ISR_GP0_SET
) {
3072 SK_XM_SETBIT_2(sc_if
, XM_IMR
, XM_IMR_GP0_SET
);
3073 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_tick
, sc_if
);
3076 if (status
& XM_ISR_AUTONEG_DONE
) {
3077 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_tick
, sc_if
);
3081 if (status
& XM_IMR_TX_UNDERRUN
)
3082 SK_XM_SETBIT_4(sc_if
, XM_MODE
, XM_MODE_FLUSH_TXFIFO
);
3084 if (status
& XM_IMR_RX_OVERRUN
)
3085 SK_XM_SETBIT_4(sc_if
, XM_MODE
, XM_MODE_FLUSH_RXFIFO
);
3087 status
= SK_XM_READ_2(sc_if
, XM_ISR
);
3093 sk_intr_yukon(sc_if
)
3094 struct sk_if_softc
*sc_if
;
3098 status
= SK_IF_READ_1(sc_if
, 0, SK_GMAC_ISR
);
3100 if ((status
& SK_GMAC_INT_RX_OVER
) != 0) {
3101 SK_IF_WRITE_1(sc_if
, 0, SK_RXMF1_CTRL_TEST
,
3102 SK_RFCTL_RX_FIFO_OVER
);
3105 if ((status
& SK_GMAC_INT_TX_UNDER
) != 0) {
3106 SK_IF_WRITE_1(sc_if
, 0, SK_RXMF1_CTRL_TEST
,
3107 SK_TFCTL_TX_FIFO_UNDER
);
3115 struct sk_softc
*sc
= xsc
;
3116 struct sk_if_softc
*sc_if0
, *sc_if1
;
3117 struct ifnet
*ifp0
= NULL
, *ifp1
= NULL
;
3123 status
= CSR_READ_4(sc
, SK_ISSR
);
3124 if (status
== 0 || status
== 0xffffffff || sc
->sk_suspended
)
3128 sc_if0
= sc
->sk_if
[SK_PORT_A
];
3129 sc_if1
= sc
->sk_if
[SK_PORT_B
];
3132 ifp0
= sc_if0
->sk_ifp
;
3134 ifp1
= sc_if1
->sk_ifp
;
3137 for (; (status
&= sc
->sk_intrmask
) != 0;) {
3139 status
= atomic_get((int32
*)&sc
->sk_intstatus
);
3140 status
&= sc
->sk_intrmask
;
3143 if (status
== 0 || status
== 0xffffffff || sc
->sk_suspended
)
3147 /* Handle receive interrupts first. */
3148 if (status
& SK_ISR_RX1_EOF
) {
3149 if (ifp0
->if_mtu
> SK_MAX_FRAMELEN
)
3150 sk_jumbo_rxeof(sc_if0
);
3153 CSR_WRITE_4(sc
, SK_BMU_RX_CSR0
,
3154 SK_RXBMU_CLR_IRQ_EOF
|SK_RXBMU_RX_START
);
3156 if (status
& SK_ISR_RX2_EOF
) {
3157 if (ifp1
->if_mtu
> SK_MAX_FRAMELEN
)
3158 sk_jumbo_rxeof(sc_if1
);
3161 CSR_WRITE_4(sc
, SK_BMU_RX_CSR1
,
3162 SK_RXBMU_CLR_IRQ_EOF
|SK_RXBMU_RX_START
);
3165 /* Then transmit interrupts. */
3166 if (status
& SK_ISR_TX1_S_EOF
) {
3168 CSR_WRITE_4(sc
, SK_BMU_TXS_CSR0
, SK_TXBMU_CLR_IRQ_EOF
);
3170 if (status
& SK_ISR_TX2_S_EOF
) {
3172 CSR_WRITE_4(sc
, SK_BMU_TXS_CSR1
, SK_TXBMU_CLR_IRQ_EOF
);
3175 /* Then MAC interrupts. */
3176 if (status
& SK_ISR_MAC1
&&
3177 ifp0
->if_drv_flags
& IFF_DRV_RUNNING
) {
3178 if (sc
->sk_type
== SK_GENESIS
)
3179 sk_intr_xmac(sc_if0
);
3181 sk_intr_yukon(sc_if0
);
3184 if (status
& SK_ISR_MAC2
&&
3185 ifp1
->if_drv_flags
& IFF_DRV_RUNNING
) {
3186 if (sc
->sk_type
== SK_GENESIS
)
3187 sk_intr_xmac(sc_if1
);
3189 sk_intr_yukon(sc_if1
);
3192 if (status
& SK_ISR_EXTERNAL_REG
) {
3194 sc_if0
->sk_phytype
== SK_PHYTYPE_BCOM
)
3195 sk_intr_bcom(sc_if0
);
3197 sc_if1
->sk_phytype
== SK_PHYTYPE_BCOM
)
3198 sk_intr_bcom(sc_if1
);
3200 status
= CSR_READ_4(sc
, SK_ISSR
);
3202 if (((status
& sc
->sk_intrmask
) == 0) || status
== 0xffffffff ||
3209 CSR_WRITE_4(sc
, SK_IMR
, sc
->sk_intrmask
);
3211 if (ifp0
!= NULL
&& !IFQ_DRV_IS_EMPTY(&ifp0
->if_snd
))
3212 sk_start_locked(ifp0
);
3213 if (ifp1
!= NULL
&& !IFQ_DRV_IS_EMPTY(&ifp1
->if_snd
))
3214 sk_start_locked(ifp1
);
3222 struct sk_if_softc
*sc_if
;
3224 struct sk_softc
*sc
;
3226 u_int16_t eaddr
[(ETHER_ADDR_LEN
+1)/2];
3227 static const struct sk_bcom_hack bhack
[] = {
3228 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3229 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3230 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3233 SK_IF_LOCK_ASSERT(sc_if
);
3235 sc
= sc_if
->sk_softc
;
3236 ifp
= sc_if
->sk_ifp
;
3238 /* Unreset the XMAC. */
3239 SK_IF_WRITE_2(sc_if
, 0, SK_TXF1_MACCTL
, SK_TXMACCTL_XMAC_UNRESET
);
3242 /* Reset the XMAC's internal state. */
3243 SK_XM_SETBIT_2(sc_if
, XM_GPIO
, XM_GPIO_RESETMAC
);
3245 /* Save the XMAC II revision */
3246 sc_if
->sk_xmac_rev
= XM_XMAC_REV(SK_XM_READ_4(sc_if
, XM_DEVID
));
3249 * Perform additional initialization for external PHYs,
3250 * namely for the 1000baseTX cards that use the XMAC's
3253 if (sc_if
->sk_phytype
== SK_PHYTYPE_BCOM
) {
3257 /* Take PHY out of reset. */
3258 val
= sk_win_read_4(sc
, SK_GPIO
);
3259 if (sc_if
->sk_port
== SK_PORT_A
)
3260 val
|= SK_GPIO_DIR0
|SK_GPIO_DAT0
;
3262 val
|= SK_GPIO_DIR2
|SK_GPIO_DAT2
;
3263 sk_win_write_4(sc
, SK_GPIO
, val
);
3265 /* Enable GMII mode on the XMAC. */
3266 SK_XM_SETBIT_2(sc_if
, XM_HWCFG
, XM_HWCFG_GMIIMODE
);
3268 sk_xmac_miibus_writereg(sc_if
, SK_PHYADDR_BCOM
,
3269 BRGPHY_MII_BMCR
, BRGPHY_BMCR_RESET
);
3271 sk_xmac_miibus_writereg(sc_if
, SK_PHYADDR_BCOM
,
3272 BRGPHY_MII_IMR
, 0xFFF0);
3275 * Early versions of the BCM5400 apparently have
3276 * a bug that requires them to have their reserved
3277 * registers initialized to some magic values. I don't
3278 * know what the numbers do, I'm just the messenger.
3280 if (sk_xmac_miibus_readreg(sc_if
, SK_PHYADDR_BCOM
, 0x03)
3282 while(bhack
[i
].reg
) {
3283 sk_xmac_miibus_writereg(sc_if
, SK_PHYADDR_BCOM
,
3284 bhack
[i
].reg
, bhack
[i
].val
);
3290 /* Set station address */
3291 bcopy(IF_LLADDR(sc_if
->sk_ifp
), eaddr
, ETHER_ADDR_LEN
);
3292 SK_XM_WRITE_2(sc_if
, XM_PAR0
, eaddr
[0]);
3293 SK_XM_WRITE_2(sc_if
, XM_PAR1
, eaddr
[1]);
3294 SK_XM_WRITE_2(sc_if
, XM_PAR2
, eaddr
[2]);
3295 SK_XM_SETBIT_4(sc_if
, XM_MODE
, XM_MODE_RX_USE_STATION
);
3297 if (ifp
->if_flags
& IFF_BROADCAST
) {
3298 SK_XM_CLRBIT_4(sc_if
, XM_MODE
, XM_MODE_RX_NOBROAD
);
3300 SK_XM_SETBIT_4(sc_if
, XM_MODE
, XM_MODE_RX_NOBROAD
);
3303 /* We don't need the FCS appended to the packet. */
3304 SK_XM_SETBIT_2(sc_if
, XM_RXCMD
, XM_RXCMD_STRIPFCS
);
3306 /* We want short frames padded to 60 bytes. */
3307 SK_XM_SETBIT_2(sc_if
, XM_TXCMD
, XM_TXCMD_AUTOPAD
);
3310 * Enable the reception of all error frames. This is is
3311 * a necessary evil due to the design of the XMAC. The
3312 * XMAC's receive FIFO is only 8K in size, however jumbo
3313 * frames can be up to 9000 bytes in length. When bad
3314 * frame filtering is enabled, the XMAC's RX FIFO operates
3315 * in 'store and forward' mode. For this to work, the
3316 * entire frame has to fit into the FIFO, but that means
3317 * that jumbo frames larger than 8192 bytes will be
3318 * truncated. Disabling all bad frame filtering causes
3319 * the RX FIFO to operate in streaming mode, in which
3320 * case the XMAC will start transfering frames out of the
3321 * RX FIFO as soon as the FIFO threshold is reached.
3323 if (ifp
->if_mtu
> SK_MAX_FRAMELEN
) {
3324 SK_XM_SETBIT_4(sc_if
, XM_MODE
, XM_MODE_RX_BADFRAMES
|
3325 XM_MODE_RX_GIANTS
|XM_MODE_RX_RUNTS
|XM_MODE_RX_CRCERRS
|
3326 XM_MODE_RX_INRANGELEN
);
3327 SK_XM_SETBIT_2(sc_if
, XM_RXCMD
, XM_RXCMD_BIGPKTOK
);
3329 SK_XM_CLRBIT_2(sc_if
, XM_RXCMD
, XM_RXCMD_BIGPKTOK
);
3332 * Bump up the transmit threshold. This helps hold off transmit
3333 * underruns when we're blasting traffic from both ports at once.
3335 SK_XM_WRITE_2(sc_if
, XM_TX_REQTHRESH
, SK_XM_TX_FIFOTHRESH
);
3338 sk_rxfilter_genesis(sc_if
);
3340 /* Clear and enable interrupts */
3341 SK_XM_READ_2(sc_if
, XM_ISR
);
3342 if (sc_if
->sk_phytype
== SK_PHYTYPE_XMAC
)
3343 SK_XM_WRITE_2(sc_if
, XM_IMR
, XM_INTRS
);
3345 SK_XM_WRITE_2(sc_if
, XM_IMR
, 0xFFFF);
3347 /* Configure MAC arbiter */
3348 switch(sc_if
->sk_xmac_rev
) {
3349 case XM_XMAC_REV_B2
:
3350 sk_win_write_1(sc
, SK_RCINIT_RX1
, SK_RCINIT_XMAC_B2
);
3351 sk_win_write_1(sc
, SK_RCINIT_TX1
, SK_RCINIT_XMAC_B2
);
3352 sk_win_write_1(sc
, SK_RCINIT_RX2
, SK_RCINIT_XMAC_B2
);
3353 sk_win_write_1(sc
, SK_RCINIT_TX2
, SK_RCINIT_XMAC_B2
);
3354 sk_win_write_1(sc
, SK_MINIT_RX1
, SK_MINIT_XMAC_B2
);
3355 sk_win_write_1(sc
, SK_MINIT_TX1
, SK_MINIT_XMAC_B2
);
3356 sk_win_write_1(sc
, SK_MINIT_RX2
, SK_MINIT_XMAC_B2
);
3357 sk_win_write_1(sc
, SK_MINIT_TX2
, SK_MINIT_XMAC_B2
);
3358 sk_win_write_1(sc
, SK_RECOVERY_CTL
, SK_RECOVERY_XMAC_B2
);
3360 case XM_XMAC_REV_C1
:
3361 sk_win_write_1(sc
, SK_RCINIT_RX1
, SK_RCINIT_XMAC_C1
);
3362 sk_win_write_1(sc
, SK_RCINIT_TX1
, SK_RCINIT_XMAC_C1
);
3363 sk_win_write_1(sc
, SK_RCINIT_RX2
, SK_RCINIT_XMAC_C1
);
3364 sk_win_write_1(sc
, SK_RCINIT_TX2
, SK_RCINIT_XMAC_C1
);
3365 sk_win_write_1(sc
, SK_MINIT_RX1
, SK_MINIT_XMAC_C1
);
3366 sk_win_write_1(sc
, SK_MINIT_TX1
, SK_MINIT_XMAC_C1
);
3367 sk_win_write_1(sc
, SK_MINIT_RX2
, SK_MINIT_XMAC_C1
);
3368 sk_win_write_1(sc
, SK_MINIT_TX2
, SK_MINIT_XMAC_C1
);
3369 sk_win_write_1(sc
, SK_RECOVERY_CTL
, SK_RECOVERY_XMAC_B2
);
3374 sk_win_write_2(sc
, SK_MACARB_CTL
,
3375 SK_MACARBCTL_UNRESET
|SK_MACARBCTL_FASTOE_OFF
);
3383 sk_init_yukon(sc_if
)
3384 struct sk_if_softc
*sc_if
;
3388 struct sk_softc
*sc
;
3393 SK_IF_LOCK_ASSERT(sc_if
);
3395 sc
= sc_if
->sk_softc
;
3396 ifp
= sc_if
->sk_ifp
;
3398 if (sc
->sk_type
== SK_YUKON_LITE
&&
3399 sc
->sk_rev
>= SK_YUKON_LITE_REV_A3
) {
3401 * Workaround code for COMA mode, set PHY reset.
3402 * Otherwise it will not correctly take chip out of
3405 v
= sk_win_read_4(sc
, SK_GPIO
);
3406 v
|= SK_GPIO_DIR9
| SK_GPIO_DAT9
;
3407 sk_win_write_4(sc
, SK_GPIO
, v
);
3410 /* GMAC and GPHY Reset */
3411 SK_IF_WRITE_4(sc_if
, 0, SK_GPHY_CTRL
, SK_GPHY_RESET_SET
);
3412 SK_IF_WRITE_4(sc_if
, 0, SK_GMAC_CTRL
, SK_GMAC_RESET_SET
);
3415 if (sc
->sk_type
== SK_YUKON_LITE
&&
3416 sc
->sk_rev
>= SK_YUKON_LITE_REV_A3
) {
3418 * Workaround code for COMA mode, clear PHY reset
3420 v
= sk_win_read_4(sc
, SK_GPIO
);
3423 sk_win_write_4(sc
, SK_GPIO
, v
);
3426 phy
= SK_GPHY_INT_POL_HI
| SK_GPHY_DIS_FC
| SK_GPHY_DIS_SLEEP
|
3427 SK_GPHY_ENA_XC
| SK_GPHY_ANEG_ALL
| SK_GPHY_ENA_PAUSE
;
3429 if (sc
->sk_coppertype
)
3430 phy
|= SK_GPHY_COPPER
;
3432 phy
|= SK_GPHY_FIBER
;
3434 SK_IF_WRITE_4(sc_if
, 0, SK_GPHY_CTRL
, phy
| SK_GPHY_RESET_SET
);
3436 SK_IF_WRITE_4(sc_if
, 0, SK_GPHY_CTRL
, phy
| SK_GPHY_RESET_CLEAR
);
3437 SK_IF_WRITE_4(sc_if
, 0, SK_GMAC_CTRL
, SK_GMAC_LOOP_OFF
|
3438 SK_GMAC_PAUSE_ON
| SK_GMAC_RESET_CLEAR
);
3440 /* unused read of the interrupt source register */
3441 SK_IF_READ_2(sc_if
, 0, SK_GMAC_ISR
);
3443 reg
= SK_YU_READ_2(sc_if
, YUKON_PAR
);
3445 /* MIB Counter Clear Mode set */
3446 reg
|= YU_PAR_MIB_CLR
;
3447 SK_YU_WRITE_2(sc_if
, YUKON_PAR
, reg
);
3449 /* MIB Counter Clear Mode clear */
3450 reg
&= ~YU_PAR_MIB_CLR
;
3451 SK_YU_WRITE_2(sc_if
, YUKON_PAR
, reg
);
3453 /* receive control reg */
3454 SK_YU_WRITE_2(sc_if
, YUKON_RCR
, YU_RCR_CRCR
);
3456 /* transmit parameter register */
3457 SK_YU_WRITE_2(sc_if
, YUKON_TPR
, YU_TPR_JAM_LEN(0x3) |
3458 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3460 /* serial mode register */
3461 reg
= YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN
| YU_SMR_IPG_DATA(0x1e);
3462 if (ifp
->if_mtu
> SK_MAX_FRAMELEN
)
3463 reg
|= YU_SMR_MFL_JUMBO
;
3464 SK_YU_WRITE_2(sc_if
, YUKON_SMR
, reg
);
3466 /* Setup Yukon's station address */
3467 eaddr
= IF_LLADDR(sc_if
->sk_ifp
);
3468 for (i
= 0; i
< 3; i
++)
3469 SK_YU_WRITE_2(sc_if
, SK_MAC0_0
+ i
* 4,
3470 eaddr
[i
* 2] | eaddr
[i
* 2 + 1] << 8);
3471 /* Set GMAC source address of flow control. */
3472 for (i
= 0; i
< 3; i
++)
3473 SK_YU_WRITE_2(sc_if
, YUKON_SAL1
+ i
* 4,
3474 eaddr
[i
* 2] | eaddr
[i
* 2 + 1] << 8);
3475 /* Set GMAC virtual address. */
3476 for (i
= 0; i
< 3; i
++)
3477 SK_YU_WRITE_2(sc_if
, YUKON_SAL2
+ i
* 4,
3478 eaddr
[i
* 2] | eaddr
[i
* 2 + 1] << 8);
3481 sk_rxfilter_yukon(sc_if
);
3483 /* enable interrupt mask for counter overflows */
3484 SK_YU_WRITE_2(sc_if
, YUKON_TIMR
, 0);
3485 SK_YU_WRITE_2(sc_if
, YUKON_RIMR
, 0);
3486 SK_YU_WRITE_2(sc_if
, YUKON_TRIMR
, 0);
3488 /* Configure RX MAC FIFO Flush Mask */
3489 v
= YU_RXSTAT_FOFL
| YU_RXSTAT_CRCERR
| YU_RXSTAT_MIIERR
|
3490 YU_RXSTAT_BADFC
| YU_RXSTAT_GOODFC
| YU_RXSTAT_RUNT
|
3492 SK_IF_WRITE_2(sc_if
, 0, SK_RXMF1_FLUSH_MASK
, v
);
3494 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3495 if (sc
->sk_type
== SK_YUKON_LITE
&& sc
->sk_rev
== SK_YUKON_LITE_REV_A0
)
3496 v
= SK_TFCTL_OPERATION_ON
;
3498 v
= SK_TFCTL_OPERATION_ON
| SK_RFCTL_FIFO_FLUSH_ON
;
3499 /* Configure RX MAC FIFO */
3500 SK_IF_WRITE_1(sc_if
, 0, SK_RXMF1_CTRL_TEST
, SK_RFCTL_RESET_CLEAR
);
3501 SK_IF_WRITE_2(sc_if
, 0, SK_RXMF1_CTRL_TEST
, v
);
3503 /* Increase flush threshould to 64 bytes */
3504 SK_IF_WRITE_2(sc_if
, 0, SK_RXMF1_FLUSH_THRESHOLD
,
3505 SK_RFCTL_FIFO_THRESHOLD
+ 1);
3507 /* Configure TX MAC FIFO */
3508 SK_IF_WRITE_1(sc_if
, 0, SK_TXMF1_CTRL_TEST
, SK_TFCTL_RESET_CLEAR
);
3509 SK_IF_WRITE_2(sc_if
, 0, SK_TXMF1_CTRL_TEST
, SK_TFCTL_OPERATION_ON
);
3513 * Note that to properly initialize any part of the GEnesis chip,
3514 * you first have to take it out of reset mode.
3520 struct sk_if_softc
*sc_if
= xsc
;
3523 sk_init_locked(sc_if
);
3524 SK_IF_UNLOCK(sc_if
);
3530 sk_init_locked(sc_if
)
3531 struct sk_if_softc
*sc_if
;
3533 struct sk_softc
*sc
;
3535 struct mii_data
*mii
;
3540 SK_IF_LOCK_ASSERT(sc_if
);
3542 ifp
= sc_if
->sk_ifp
;
3543 sc
= sc_if
->sk_softc
;
3544 mii
= device_get_softc(sc_if
->sk_miibus
);
3546 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
3549 /* Cancel pending I/O and free all RX/TX buffers. */
3552 if (sc
->sk_type
== SK_GENESIS
) {
3553 /* Configure LINK_SYNC LED */
3554 SK_IF_WRITE_1(sc_if
, 0, SK_LINKLED1_CTL
, SK_LINKLED_ON
);
3555 SK_IF_WRITE_1(sc_if
, 0, SK_LINKLED1_CTL
,
3556 SK_LINKLED_LINKSYNC_ON
);
3558 /* Configure RX LED */
3559 SK_IF_WRITE_1(sc_if
, 0, SK_RXLED1_CTL
,
3560 SK_RXLEDCTL_COUNTER_START
);
3562 /* Configure TX LED */
3563 SK_IF_WRITE_1(sc_if
, 0, SK_TXLED1_CTL
,
3564 SK_TXLEDCTL_COUNTER_START
);
3568 * Configure descriptor poll timer
3570 * SK-NET GENESIS data sheet says that possibility of losing Start
3571 * transmit command due to CPU/cache related interim storage problems
3572 * under certain conditions. The document recommends a polling
3573 * mechanism to send a Start transmit command to initiate transfer
3574 * of ready descriptors regulary. To cope with this issue sk(4) now
3575 * enables descriptor poll timer to initiate descriptor processing
3576 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3577 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3578 * command instead of waiting for next descriptor polling time.
3579 * The same rule may apply to Rx side too but it seems that is not
3580 * needed at the moment.
3581 * Since sk(4) uses descriptor polling as a last resort there is no
3582 * need to set smaller polling time than maximum allowable one.
3584 SK_IF_WRITE_4(sc_if
, 0, SK_DPT_INIT
, SK_DPT_TIMER_MAX
);
3586 /* Configure I2C registers */
3588 /* Configure XMAC(s) */
3589 switch (sc
->sk_type
) {
3591 sk_init_xmac(sc_if
);
3596 sk_init_yukon(sc_if
);
3601 if (sc
->sk_type
== SK_GENESIS
) {
3602 /* Configure MAC FIFOs */
3603 SK_IF_WRITE_4(sc_if
, 0, SK_RXF1_CTL
, SK_FIFO_UNRESET
);
3604 SK_IF_WRITE_4(sc_if
, 0, SK_RXF1_END
, SK_FIFO_END
);
3605 SK_IF_WRITE_4(sc_if
, 0, SK_RXF1_CTL
, SK_FIFO_ON
);
3607 SK_IF_WRITE_4(sc_if
, 0, SK_TXF1_CTL
, SK_FIFO_UNRESET
);
3608 SK_IF_WRITE_4(sc_if
, 0, SK_TXF1_END
, SK_FIFO_END
);
3609 SK_IF_WRITE_4(sc_if
, 0, SK_TXF1_CTL
, SK_FIFO_ON
);
3612 /* Configure transmit arbiter(s) */
3613 SK_IF_WRITE_1(sc_if
, 0, SK_TXAR1_COUNTERCTL
,
3614 SK_TXARCTL_ON
|SK_TXARCTL_FSYNC_ON
);
3616 /* Configure RAMbuffers */
3617 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_CTLTST
, SK_RBCTL_UNRESET
);
3618 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_START
, sc_if
->sk_rx_ramstart
);
3619 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_WR_PTR
, sc_if
->sk_rx_ramstart
);
3620 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_RD_PTR
, sc_if
->sk_rx_ramstart
);
3621 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_END
, sc_if
->sk_rx_ramend
);
3622 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_CTLTST
, SK_RBCTL_ON
);
3624 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_CTLTST
, SK_RBCTL_UNRESET
);
3625 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_CTLTST
, SK_RBCTL_STORENFWD_ON
);
3626 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_START
, sc_if
->sk_tx_ramstart
);
3627 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_WR_PTR
, sc_if
->sk_tx_ramstart
);
3628 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_RD_PTR
, sc_if
->sk_tx_ramstart
);
3629 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_END
, sc_if
->sk_tx_ramend
);
3630 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_CTLTST
, SK_RBCTL_ON
);
3632 /* Configure BMUs */
3633 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_BMU_CSR
, SK_RXBMU_ONLINE
);
3634 if (ifp
->if_mtu
> SK_MAX_FRAMELEN
) {
3635 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_CURADDR_LO
,
3636 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if
, 0)));
3637 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_CURADDR_HI
,
3638 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if
, 0)));
3640 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_CURADDR_LO
,
3641 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if
, 0)));
3642 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_CURADDR_HI
,
3643 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if
, 0)));
3646 SK_IF_WRITE_4(sc_if
, 1, SK_TXQS1_BMU_CSR
, SK_TXBMU_ONLINE
);
3647 SK_IF_WRITE_4(sc_if
, 1, SK_TXQS1_CURADDR_LO
,
3648 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if
, 0)));
3649 SK_IF_WRITE_4(sc_if
, 1, SK_TXQS1_CURADDR_HI
,
3650 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if
, 0)));
3652 /* Init descriptors */
3653 if (ifp
->if_mtu
> SK_MAX_FRAMELEN
)
3654 error
= sk_init_jumbo_rx_ring(sc_if
);
3656 error
= sk_init_rx_ring(sc_if
);
3658 device_printf(sc_if
->sk_if_dev
,
3659 "initialization failed: no memory for rx buffers\n");
3663 sk_init_tx_ring(sc_if
);
3665 /* Set interrupt moderation if changed via sysctl. */
3666 imr
= sk_win_read_4(sc
, SK_IMTIMERINIT
);
3667 if (imr
!= SK_IM_USECS(sc
->sk_int_mod
, sc
->sk_int_ticks
)) {
3668 sk_win_write_4(sc
, SK_IMTIMERINIT
, SK_IM_USECS(sc
->sk_int_mod
,
3671 device_printf(sc_if
->sk_if_dev
,
3672 "interrupt moderation is %d us.\n",
3676 /* Configure interrupt handling */
3677 CSR_READ_4(sc
, SK_ISSR
);
3678 if (sc_if
->sk_port
== SK_PORT_A
)
3679 sc
->sk_intrmask
|= SK_INTRS1
;
3681 sc
->sk_intrmask
|= SK_INTRS2
;
3683 sc
->sk_intrmask
|= SK_ISR_EXTERNAL_REG
;
3685 CSR_WRITE_4(sc
, SK_IMR
, sc
->sk_intrmask
);
3688 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_BMU_CSR
, SK_RXBMU_RX_START
);
3690 switch(sc
->sk_type
) {
3692 /* Enable XMACs TX and RX state machines */
3693 SK_XM_CLRBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_IGNPAUSE
);
3694 SK_XM_SETBIT_2(sc_if
, XM_MMUCMD
, XM_MMUCMD_TX_ENB
|XM_MMUCMD_RX_ENB
);
3699 reg
= SK_YU_READ_2(sc_if
, YUKON_GPCR
);
3700 reg
|= YU_GPCR_TXEN
| YU_GPCR_RXEN
;
3702 /* XXX disable 100Mbps and full duplex mode? */
3703 reg
&= ~(YU_GPCR_SPEED
| YU_GPCR_DPLX_DIS
);
3705 SK_YU_WRITE_2(sc_if
, YUKON_GPCR
, reg
);
3708 /* Activate descriptor polling timer */
3709 SK_IF_WRITE_4(sc_if
, 0, SK_DPT_TIMER_CTRL
, SK_DPT_TCTL_START
);
3710 /* start transfer of Tx descriptors */
3711 CSR_WRITE_4(sc
, sc_if
->sk_tx_bmu
, SK_TXBMU_TX_START
);
3713 ifp
->if_drv_flags
|= IFF_DRV_RUNNING
;
3714 ifp
->if_drv_flags
&= ~IFF_DRV_OACTIVE
;
3716 switch (sc
->sk_type
) {
3720 callout_reset(&sc_if
->sk_tick_ch
, hz
, sk_yukon_tick
, sc_if
);
3724 callout_reset(&sc_if
->sk_watchdog_ch
, hz
, sk_watchdog
, ifp
);
3731 struct sk_if_softc
*sc_if
;
3734 struct sk_softc
*sc
;
3735 struct sk_txdesc
*txd
;
3736 struct sk_rxdesc
*rxd
;
3737 struct sk_rxdesc
*jrxd
;
3741 SK_IF_LOCK_ASSERT(sc_if
);
3742 sc
= sc_if
->sk_softc
;
3743 ifp
= sc_if
->sk_ifp
;
3745 callout_stop(&sc_if
->sk_tick_ch
);
3746 callout_stop(&sc_if
->sk_watchdog_ch
);
3748 /* stop Tx descriptor polling timer */
3749 SK_IF_WRITE_4(sc_if
, 0, SK_DPT_TIMER_CTRL
, SK_DPT_TCTL_STOP
);
3750 /* stop transfer of Tx descriptors */
3751 CSR_WRITE_4(sc
, sc_if
->sk_tx_bmu
, SK_TXBMU_TX_STOP
);
3752 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
3753 val
= CSR_READ_4(sc
, sc_if
->sk_tx_bmu
);
3754 if ((val
& SK_TXBMU_TX_STOP
) == 0)
3758 if (i
== SK_TIMEOUT
)
3759 device_printf(sc_if
->sk_if_dev
,
3760 "can not stop transfer of Tx descriptor\n");
3761 /* stop transfer of Rx descriptors */
3762 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_BMU_CSR
, SK_RXBMU_RX_STOP
);
3763 for (i
= 0; i
< SK_TIMEOUT
; i
++) {
3764 val
= SK_IF_READ_4(sc_if
, 0, SK_RXQ1_BMU_CSR
);
3765 if ((val
& SK_RXBMU_RX_STOP
) == 0)
3769 if (i
== SK_TIMEOUT
)
3770 device_printf(sc_if
->sk_if_dev
,
3771 "can not stop transfer of Rx descriptor\n");
3773 if (sc_if
->sk_phytype
== SK_PHYTYPE_BCOM
) {
3774 /* Put PHY back into reset. */
3775 val
= sk_win_read_4(sc
, SK_GPIO
);
3776 if (sc_if
->sk_port
== SK_PORT_A
) {
3777 val
|= SK_GPIO_DIR0
;
3778 val
&= ~SK_GPIO_DAT0
;
3780 val
|= SK_GPIO_DIR2
;
3781 val
&= ~SK_GPIO_DAT2
;
3783 sk_win_write_4(sc
, SK_GPIO
, val
);
3786 /* Turn off various components of this interface. */
3787 SK_XM_SETBIT_2(sc_if
, XM_GPIO
, XM_GPIO_RESETMAC
);
3788 switch (sc
->sk_type
) {
3790 SK_IF_WRITE_2(sc_if
, 0, SK_TXF1_MACCTL
, SK_TXMACCTL_XMAC_RESET
);
3791 SK_IF_WRITE_4(sc_if
, 0, SK_RXF1_CTL
, SK_FIFO_RESET
);
3796 SK_IF_WRITE_1(sc_if
,0, SK_RXMF1_CTRL_TEST
, SK_RFCTL_RESET_SET
);
3797 SK_IF_WRITE_1(sc_if
,0, SK_TXMF1_CTRL_TEST
, SK_TFCTL_RESET_SET
);
3800 SK_IF_WRITE_4(sc_if
, 0, SK_RXQ1_BMU_CSR
, SK_RXBMU_OFFLINE
);
3801 SK_IF_WRITE_4(sc_if
, 0, SK_RXRB1_CTLTST
, SK_RBCTL_RESET
|SK_RBCTL_OFF
);
3802 SK_IF_WRITE_4(sc_if
, 1, SK_TXQS1_BMU_CSR
, SK_TXBMU_OFFLINE
);
3803 SK_IF_WRITE_4(sc_if
, 1, SK_TXRBS1_CTLTST
, SK_RBCTL_RESET
|SK_RBCTL_OFF
);
3804 SK_IF_WRITE_1(sc_if
, 0, SK_TXAR1_COUNTERCTL
, SK_TXARCTL_OFF
);
3805 SK_IF_WRITE_1(sc_if
, 0, SK_RXLED1_CTL
, SK_RXLEDCTL_COUNTER_STOP
);
3806 SK_IF_WRITE_1(sc_if
, 0, SK_TXLED1_CTL
, SK_RXLEDCTL_COUNTER_STOP
);
3807 SK_IF_WRITE_1(sc_if
, 0, SK_LINKLED1_CTL
, SK_LINKLED_OFF
);
3808 SK_IF_WRITE_1(sc_if
, 0, SK_LINKLED1_CTL
, SK_LINKLED_LINKSYNC_OFF
);
3810 /* Disable interrupts */
3811 if (sc_if
->sk_port
== SK_PORT_A
)
3812 sc
->sk_intrmask
&= ~SK_INTRS1
;
3814 sc
->sk_intrmask
&= ~SK_INTRS2
;
3815 CSR_WRITE_4(sc
, SK_IMR
, sc
->sk_intrmask
);
3817 SK_XM_READ_2(sc_if
, XM_ISR
);
3818 SK_XM_WRITE_2(sc_if
, XM_IMR
, 0xFFFF);
3820 /* Free RX and TX mbufs still in the queues. */
3821 for (i
= 0; i
< SK_RX_RING_CNT
; i
++) {
3822 rxd
= &sc_if
->sk_cdata
.sk_rxdesc
[i
];
3823 if (rxd
->rx_m
!= NULL
) {
3824 bus_dmamap_sync(sc_if
->sk_cdata
.sk_rx_tag
,
3825 rxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
3826 bus_dmamap_unload(sc_if
->sk_cdata
.sk_rx_tag
,
3832 for (i
= 0; i
< SK_JUMBO_RX_RING_CNT
; i
++) {
3833 jrxd
= &sc_if
->sk_cdata
.sk_jumbo_rxdesc
[i
];
3834 if (jrxd
->rx_m
!= NULL
) {
3835 bus_dmamap_sync(sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
3836 jrxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
3837 bus_dmamap_unload(sc_if
->sk_cdata
.sk_jumbo_rx_tag
,
3839 m_freem(jrxd
->rx_m
);
3843 for (i
= 0; i
< SK_TX_RING_CNT
; i
++) {
3844 txd
= &sc_if
->sk_cdata
.sk_txdesc
[i
];
3845 if (txd
->tx_m
!= NULL
) {
3846 bus_dmamap_sync(sc_if
->sk_cdata
.sk_tx_tag
,
3847 txd
->tx_dmamap
, BUS_DMASYNC_POSTWRITE
);
3848 bus_dmamap_unload(sc_if
->sk_cdata
.sk_tx_tag
,
3855 ifp
->if_drv_flags
&= ~(IFF_DRV_RUNNING
|IFF_DRV_OACTIVE
);
3861 sysctl_int_range(SYSCTL_HANDLER_ARGS
, int low
, int high
)
3867 value
= *(int *)arg1
;
3868 error
= sysctl_handle_int(oidp
, &value
, 0, req
);
3869 if (error
|| !req
->newptr
)
3871 if (value
< low
|| value
> high
)
3873 *(int *)arg1
= value
;
3878 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS
)
3880 return (sysctl_int_range(oidp
, arg1
, arg2
, req
, SK_IM_MIN
, SK_IM_MAX
));