vfs: check userland buffers before reading them.
[haiku.git] / src / add-ons / kernel / drivers / network / syskonnect / dev / sk / if_sk.c
blobbfca0ca0b4f04da0a274004ea30ea5b91a69fe97
1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
34 /*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/bus.h>
91 #include <sys/endian.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
101 #include <net/bpf.h>
102 #include <net/ethernet.h>
103 #include <net/if.h>
104 #include <net/if_arp.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108 #include <net/if_vlan_var.h>
110 #include <netinet/in.h>
111 #include <netinet/in_systm.h>
112 #include <netinet/ip.h>
114 #include <machine/bus.h>
115 #include <machine/in_cksum.h>
116 #include <machine/resource.h>
117 #include <sys/rman.h>
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/brgphyreg.h>
123 #include <dev/pci/pcireg.h>
124 #include <dev/pci/pcivar.h>
126 #if 0
127 #define SK_USEIOSPACE
128 #endif
130 #include <dev/sk/if_skreg.h>
131 #include <dev/sk/xmaciireg.h>
132 #include <dev/sk/yukonreg.h>
134 MODULE_DEPEND(sk, pci, 1, 1, 1);
135 MODULE_DEPEND(sk, ether, 1, 1, 1);
136 MODULE_DEPEND(sk, miibus, 1, 1, 1);
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
141 static const struct sk_type sk_devs[] = {
143 VENDORID_SK,
144 DEVICEID_SK_V1,
145 "SysKonnect Gigabit Ethernet (V1.0)"
148 VENDORID_SK,
149 DEVICEID_SK_V2,
150 "SysKonnect Gigabit Ethernet (V2.0)"
153 VENDORID_MARVELL,
154 DEVICEID_SK_V2,
155 "Marvell Gigabit Ethernet"
158 VENDORID_MARVELL,
159 DEVICEID_BELKIN_5005,
160 "Belkin F5D5005 Gigabit Ethernet"
163 VENDORID_3COM,
164 DEVICEID_3COM_3C940,
165 "3Com 3C940 Gigabit Ethernet"
168 VENDORID_LINKSYS,
169 DEVICEID_LINKSYS_EG1032,
170 "Linksys EG1032 Gigabit Ethernet"
173 VENDORID_DLINK,
174 DEVICEID_DLINK_DGE530T_A1,
175 "D-Link DGE-530T Gigabit Ethernet"
178 VENDORID_DLINK,
179 DEVICEID_DLINK_DGE530T_B1,
180 "D-Link DGE-530T Gigabit Ethernet"
182 { 0, 0, NULL }
185 static int skc_probe(device_t);
186 static int skc_attach(device_t);
187 static int skc_detach(device_t);
188 static int skc_shutdown(device_t);
189 static int skc_suspend(device_t);
190 static int skc_resume(device_t);
191 static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
192 static int sk_detach(device_t);
193 static int sk_probe(device_t);
194 static int sk_attach(device_t);
195 static void sk_tick(void *);
196 static void sk_yukon_tick(void *);
197 static void sk_intr(void *);
198 static void sk_intr_xmac(struct sk_if_softc *);
199 static void sk_intr_bcom(struct sk_if_softc *);
200 static void sk_intr_yukon(struct sk_if_softc *);
201 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
202 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
203 static void sk_rxeof(struct sk_if_softc *);
204 static void sk_jumbo_rxeof(struct sk_if_softc *);
205 static void sk_txeof(struct sk_if_softc *);
206 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
207 static int sk_encap(struct sk_if_softc *, struct mbuf **);
208 static void sk_start(struct ifnet *);
209 static void sk_start_locked(struct ifnet *);
210 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
211 static void sk_init(void *);
212 static void sk_init_locked(struct sk_if_softc *);
213 static void sk_init_xmac(struct sk_if_softc *);
214 static void sk_init_yukon(struct sk_if_softc *);
215 static void sk_stop(struct sk_if_softc *);
216 static void sk_watchdog(void *);
217 static int sk_ifmedia_upd(struct ifnet *);
218 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
219 static void sk_reset(struct sk_softc *);
220 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
221 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
222 static int sk_newbuf(struct sk_if_softc *, int);
223 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
224 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
225 static int sk_dma_alloc(struct sk_if_softc *);
226 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
227 static void sk_dma_free(struct sk_if_softc *);
228 static void sk_dma_jumbo_free(struct sk_if_softc *);
229 static int sk_init_rx_ring(struct sk_if_softc *);
230 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
231 static void sk_init_tx_ring(struct sk_if_softc *);
232 static u_int32_t sk_win_read_4(struct sk_softc *, int);
233 static u_int16_t sk_win_read_2(struct sk_softc *, int);
234 static u_int8_t sk_win_read_1(struct sk_softc *, int);
235 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
236 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
237 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
239 static int sk_miibus_readreg(device_t, int, int);
240 static int sk_miibus_writereg(device_t, int, int, int);
241 static void sk_miibus_statchg(device_t);
243 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
244 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
245 int);
246 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
248 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
249 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
250 int);
251 static void sk_marv_miibus_statchg(struct sk_if_softc *);
253 static uint32_t sk_xmchash(const uint8_t *);
254 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
255 static void sk_rxfilter(struct sk_if_softc *);
256 static void sk_rxfilter_genesis(struct sk_if_softc *);
257 static void sk_rxfilter_yukon(struct sk_if_softc *);
259 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
260 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
262 /* Tunables. */
263 static int jumbo_disable = 0;
264 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
266 #ifdef __HAIKU__
267 static u_short in_addword(u_short a, u_short b);
268 #endif
271 * It seems that SK-NET GENESIS supports very simple checksum offload
272 * capability for Tx and I believe it can generate 0 checksum value for
273 * UDP packets in Tx as the hardware can't differenciate UDP packets from
274 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
275 * means sender didn't perforam checksum computation. For the safety I
276 * disabled UDP checksum offload capability at the moment. Alternatively
277 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
278 * offload routine.
280 #define SK_CSUM_FEATURES (CSUM_TCP)
283 * Note that we have newbus methods for both the GEnesis controller
284 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
285 * the miibus code is a child of the XMACs. We need to do it this way
286 * so that the miibus drivers can access the PHY registers on the
287 * right PHY. It's not quite what I had in mind, but it's the only
288 * design that achieves the desired effect.
290 static device_method_t skc_methods[] = {
291 /* Device interface */
292 DEVMETHOD(device_probe, skc_probe),
293 DEVMETHOD(device_attach, skc_attach),
294 DEVMETHOD(device_detach, skc_detach),
295 DEVMETHOD(device_suspend, skc_suspend),
296 DEVMETHOD(device_resume, skc_resume),
297 DEVMETHOD(device_shutdown, skc_shutdown),
299 DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag),
301 DEVMETHOD_END
304 static driver_t skc_driver = {
305 "skc",
306 skc_methods,
307 sizeof(struct sk_softc)
310 static devclass_t skc_devclass;
312 static device_method_t sk_methods[] = {
313 /* Device interface */
314 DEVMETHOD(device_probe, sk_probe),
315 DEVMETHOD(device_attach, sk_attach),
316 DEVMETHOD(device_detach, sk_detach),
317 DEVMETHOD(device_shutdown, bus_generic_shutdown),
319 /* MII interface */
320 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
321 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
322 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
324 DEVMETHOD_END
327 static driver_t sk_driver = {
328 "sk",
329 sk_methods,
330 sizeof(struct sk_if_softc)
333 static devclass_t sk_devclass;
335 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL);
336 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL);
337 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL);
339 static struct resource_spec sk_res_spec_io[] = {
340 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
341 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
342 { -1, 0, 0 }
345 static struct resource_spec sk_res_spec_mem[] = {
346 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
347 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
348 { -1, 0, 0 }
351 #define SK_SETBIT(sc, reg, x) \
352 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
354 #define SK_CLRBIT(sc, reg, x) \
355 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
357 #define SK_WIN_SETBIT_4(sc, reg, x) \
358 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
360 #define SK_WIN_CLRBIT_4(sc, reg, x) \
361 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
363 #define SK_WIN_SETBIT_2(sc, reg, x) \
364 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
366 #define SK_WIN_CLRBIT_2(sc, reg, x) \
367 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
369 static u_int32_t
370 sk_win_read_4(sc, reg)
371 struct sk_softc *sc;
372 int reg;
374 #ifdef SK_USEIOSPACE
375 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
376 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
377 #else
378 return(CSR_READ_4(sc, reg));
379 #endif
382 static u_int16_t
383 sk_win_read_2(sc, reg)
384 struct sk_softc *sc;
385 int reg;
387 #ifdef SK_USEIOSPACE
388 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
389 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
390 #else
391 return(CSR_READ_2(sc, reg));
392 #endif
395 static u_int8_t
396 sk_win_read_1(sc, reg)
397 struct sk_softc *sc;
398 int reg;
400 #ifdef SK_USEIOSPACE
401 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
402 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
403 #else
404 return(CSR_READ_1(sc, reg));
405 #endif
408 static void
409 sk_win_write_4(sc, reg, val)
410 struct sk_softc *sc;
411 int reg;
412 u_int32_t val;
414 #ifdef SK_USEIOSPACE
415 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
416 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
417 #else
418 CSR_WRITE_4(sc, reg, val);
419 #endif
420 return;
423 static void
424 sk_win_write_2(sc, reg, val)
425 struct sk_softc *sc;
426 int reg;
427 u_int32_t val;
429 #ifdef SK_USEIOSPACE
430 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
431 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
432 #else
433 CSR_WRITE_2(sc, reg, val);
434 #endif
435 return;
438 static void
439 sk_win_write_1(sc, reg, val)
440 struct sk_softc *sc;
441 int reg;
442 u_int32_t val;
444 #ifdef SK_USEIOSPACE
445 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
446 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
447 #else
448 CSR_WRITE_1(sc, reg, val);
449 #endif
450 return;
453 #ifdef __HAIKU__
454 /* stole these from in_cksum.c */
455 #define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
456 static u_short
457 in_addword(u_short a, u_short b)
459 u_int64_t sum = a + b;
461 ADDCARRY(sum);
462 return (sum);
464 #endif
466 static int
467 sk_miibus_readreg(dev, phy, reg)
468 device_t dev;
469 int phy, reg;
471 struct sk_if_softc *sc_if;
472 int v;
474 sc_if = device_get_softc(dev);
476 SK_IF_MII_LOCK(sc_if);
477 switch(sc_if->sk_softc->sk_type) {
478 case SK_GENESIS:
479 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
480 break;
481 case SK_YUKON:
482 case SK_YUKON_LITE:
483 case SK_YUKON_LP:
484 v = sk_marv_miibus_readreg(sc_if, phy, reg);
485 break;
486 default:
487 v = 0;
488 break;
490 SK_IF_MII_UNLOCK(sc_if);
492 return (v);
495 static int
496 sk_miibus_writereg(dev, phy, reg, val)
497 device_t dev;
498 int phy, reg, val;
500 struct sk_if_softc *sc_if;
501 int v;
503 sc_if = device_get_softc(dev);
505 SK_IF_MII_LOCK(sc_if);
506 switch(sc_if->sk_softc->sk_type) {
507 case SK_GENESIS:
508 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
509 break;
510 case SK_YUKON:
511 case SK_YUKON_LITE:
512 case SK_YUKON_LP:
513 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
514 break;
515 default:
516 v = 0;
517 break;
519 SK_IF_MII_UNLOCK(sc_if);
521 return (v);
524 static void
525 sk_miibus_statchg(dev)
526 device_t dev;
528 struct sk_if_softc *sc_if;
530 sc_if = device_get_softc(dev);
532 SK_IF_MII_LOCK(sc_if);
533 switch(sc_if->sk_softc->sk_type) {
534 case SK_GENESIS:
535 sk_xmac_miibus_statchg(sc_if);
536 break;
537 case SK_YUKON:
538 case SK_YUKON_LITE:
539 case SK_YUKON_LP:
540 sk_marv_miibus_statchg(sc_if);
541 break;
543 SK_IF_MII_UNLOCK(sc_if);
545 return;
548 static int
549 sk_xmac_miibus_readreg(sc_if, phy, reg)
550 struct sk_if_softc *sc_if;
551 int phy, reg;
553 int i;
555 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
556 SK_XM_READ_2(sc_if, XM_PHY_DATA);
557 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
558 for (i = 0; i < SK_TIMEOUT; i++) {
559 DELAY(1);
560 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
561 XM_MMUCMD_PHYDATARDY)
562 break;
565 if (i == SK_TIMEOUT) {
566 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
567 return(0);
570 DELAY(1);
571 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
573 return(i);
576 static int
577 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
578 struct sk_if_softc *sc_if;
579 int phy, reg, val;
581 int i;
583 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
584 for (i = 0; i < SK_TIMEOUT; i++) {
585 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
586 break;
589 if (i == SK_TIMEOUT) {
590 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
591 return (ETIMEDOUT);
594 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
595 for (i = 0; i < SK_TIMEOUT; i++) {
596 DELAY(1);
597 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
598 break;
600 if (i == SK_TIMEOUT)
601 if_printf(sc_if->sk_ifp, "phy write timed out\n");
603 return(0);
606 static void
607 sk_xmac_miibus_statchg(sc_if)
608 struct sk_if_softc *sc_if;
610 struct mii_data *mii;
612 mii = device_get_softc(sc_if->sk_miibus);
615 * If this is a GMII PHY, manually set the XMAC's
616 * duplex mode accordingly.
618 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
619 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
620 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
621 } else {
622 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
627 static int
628 sk_marv_miibus_readreg(sc_if, phy, reg)
629 struct sk_if_softc *sc_if;
630 int phy, reg;
632 u_int16_t val;
633 int i;
635 if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
636 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
637 return(0);
640 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
641 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
643 for (i = 0; i < SK_TIMEOUT; i++) {
644 DELAY(1);
645 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
646 if (val & YU_SMICR_READ_VALID)
647 break;
650 if (i == SK_TIMEOUT) {
651 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
652 return(0);
655 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
657 return(val);
660 static int
661 sk_marv_miibus_writereg(sc_if, phy, reg, val)
662 struct sk_if_softc *sc_if;
663 int phy, reg, val;
665 int i;
667 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
668 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
669 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
671 for (i = 0; i < SK_TIMEOUT; i++) {
672 DELAY(1);
673 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
674 break;
676 if (i == SK_TIMEOUT)
677 if_printf(sc_if->sk_ifp, "phy write timeout\n");
679 return(0);
682 static void
683 sk_marv_miibus_statchg(sc_if)
684 struct sk_if_softc *sc_if;
686 return;
689 #define HASH_BITS 6
691 static u_int32_t
692 sk_xmchash(addr)
693 const uint8_t *addr;
695 uint32_t crc;
697 /* Compute CRC for the address value. */
698 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
700 return (~crc & ((1 << HASH_BITS) - 1));
703 static void
704 sk_setfilt(sc_if, addr, slot)
705 struct sk_if_softc *sc_if;
706 u_int16_t *addr;
707 int slot;
709 int base;
711 base = XM_RXFILT_ENTRY(slot);
713 SK_XM_WRITE_2(sc_if, base, addr[0]);
714 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
715 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
717 return;
720 static void
721 sk_rxfilter(sc_if)
722 struct sk_if_softc *sc_if;
724 struct sk_softc *sc;
726 SK_IF_LOCK_ASSERT(sc_if);
728 sc = sc_if->sk_softc;
729 if (sc->sk_type == SK_GENESIS)
730 sk_rxfilter_genesis(sc_if);
731 else
732 sk_rxfilter_yukon(sc_if);
735 static void
736 sk_rxfilter_genesis(sc_if)
737 struct sk_if_softc *sc_if;
739 struct ifnet *ifp = sc_if->sk_ifp;
740 u_int32_t hashes[2] = { 0, 0 }, mode;
741 int h = 0, i;
742 struct ifmultiaddr *ifma;
743 u_int16_t dummy[] = { 0, 0, 0 };
744 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
746 SK_IF_LOCK_ASSERT(sc_if);
748 mode = SK_XM_READ_4(sc_if, XM_MODE);
749 mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
750 XM_MODE_RX_USE_PERFECT);
751 /* First, zot all the existing perfect filters. */
752 for (i = 1; i < XM_RXFILT_MAX; i++)
753 sk_setfilt(sc_if, dummy, i);
755 /* Now program new ones. */
756 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
757 if (ifp->if_flags & IFF_ALLMULTI)
758 mode |= XM_MODE_RX_USE_HASH;
759 if (ifp->if_flags & IFF_PROMISC)
760 mode |= XM_MODE_RX_PROMISC;
761 hashes[0] = 0xFFFFFFFF;
762 hashes[1] = 0xFFFFFFFF;
763 } else {
764 i = 1;
765 if_maddr_rlock(ifp);
766 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
767 ifma_link) {
768 if (ifma->ifma_addr->sa_family != AF_LINK)
769 continue;
771 * Program the first XM_RXFILT_MAX multicast groups
772 * into the perfect filter.
774 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
775 maddr, ETHER_ADDR_LEN);
776 if (i < XM_RXFILT_MAX) {
777 sk_setfilt(sc_if, maddr, i);
778 mode |= XM_MODE_RX_USE_PERFECT;
779 i++;
780 continue;
782 h = sk_xmchash((const uint8_t *)maddr);
783 if (h < 32)
784 hashes[0] |= (1 << h);
785 else
786 hashes[1] |= (1 << (h - 32));
787 mode |= XM_MODE_RX_USE_HASH;
789 if_maddr_runlock(ifp);
792 SK_XM_WRITE_4(sc_if, XM_MODE, mode);
793 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
794 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
797 static void
798 sk_rxfilter_yukon(sc_if)
799 struct sk_if_softc *sc_if;
801 struct ifnet *ifp;
802 u_int32_t crc, hashes[2] = { 0, 0 }, mode;
803 struct ifmultiaddr *ifma;
805 SK_IF_LOCK_ASSERT(sc_if);
807 ifp = sc_if->sk_ifp;
808 mode = SK_YU_READ_2(sc_if, YUKON_RCR);
809 if (ifp->if_flags & IFF_PROMISC)
810 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
811 else if (ifp->if_flags & IFF_ALLMULTI) {
812 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
813 hashes[0] = 0xFFFFFFFF;
814 hashes[1] = 0xFFFFFFFF;
815 } else {
816 mode |= YU_RCR_UFLEN;
817 if_maddr_rlock(ifp);
818 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
819 if (ifma->ifma_addr->sa_family != AF_LINK)
820 continue;
821 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
822 ifma->ifma_addr), ETHER_ADDR_LEN);
823 /* Just want the 6 least significant bits. */
824 crc &= 0x3f;
825 /* Set the corresponding bit in the hash table. */
826 hashes[crc >> 5] |= 1 << (crc & 0x1f);
828 if_maddr_runlock(ifp);
829 if (hashes[0] != 0 || hashes[1] != 0)
830 mode |= YU_RCR_MUFLEN;
833 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
834 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
835 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
836 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
837 SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
840 static int
841 sk_init_rx_ring(sc_if)
842 struct sk_if_softc *sc_if;
844 struct sk_ring_data *rd;
845 bus_addr_t addr;
846 u_int32_t csum_start;
847 int i;
849 sc_if->sk_cdata.sk_rx_cons = 0;
851 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
852 ETHER_HDR_LEN;
853 rd = &sc_if->sk_rdata;
854 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
855 for (i = 0; i < SK_RX_RING_CNT; i++) {
856 if (sk_newbuf(sc_if, i) != 0)
857 return (ENOBUFS);
858 if (i == (SK_RX_RING_CNT - 1))
859 addr = SK_RX_RING_ADDR(sc_if, 0);
860 else
861 addr = SK_RX_RING_ADDR(sc_if, i + 1);
862 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
863 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
866 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
867 sc_if->sk_cdata.sk_rx_ring_map,
868 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870 return(0);
873 static int
874 sk_init_jumbo_rx_ring(sc_if)
875 struct sk_if_softc *sc_if;
877 struct sk_ring_data *rd;
878 bus_addr_t addr;
879 u_int32_t csum_start;
880 int i;
882 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
884 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
885 ETHER_HDR_LEN;
886 rd = &sc_if->sk_rdata;
887 bzero(rd->sk_jumbo_rx_ring,
888 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
889 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
890 if (sk_jumbo_newbuf(sc_if, i) != 0)
891 return (ENOBUFS);
892 if (i == (SK_JUMBO_RX_RING_CNT - 1))
893 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
894 else
895 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
896 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
897 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
900 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
901 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
902 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
904 return (0);
907 static void
908 sk_init_tx_ring(sc_if)
909 struct sk_if_softc *sc_if;
911 struct sk_ring_data *rd;
912 struct sk_txdesc *txd;
913 bus_addr_t addr;
914 int i;
916 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
917 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
919 sc_if->sk_cdata.sk_tx_prod = 0;
920 sc_if->sk_cdata.sk_tx_cons = 0;
921 sc_if->sk_cdata.sk_tx_cnt = 0;
923 rd = &sc_if->sk_rdata;
924 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
925 for (i = 0; i < SK_TX_RING_CNT; i++) {
926 if (i == (SK_TX_RING_CNT - 1))
927 addr = SK_TX_RING_ADDR(sc_if, 0);
928 else
929 addr = SK_TX_RING_ADDR(sc_if, i + 1);
930 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
931 txd = &sc_if->sk_cdata.sk_txdesc[i];
932 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
935 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
936 sc_if->sk_cdata.sk_tx_ring_map,
937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 static __inline void
941 sk_discard_rxbuf(sc_if, idx)
942 struct sk_if_softc *sc_if;
943 int idx;
945 struct sk_rx_desc *r;
946 struct sk_rxdesc *rxd;
947 struct mbuf *m;
950 r = &sc_if->sk_rdata.sk_rx_ring[idx];
951 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
952 m = rxd->rx_m;
953 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
956 static __inline void
957 sk_discard_jumbo_rxbuf(sc_if, idx)
958 struct sk_if_softc *sc_if;
959 int idx;
961 struct sk_rx_desc *r;
962 struct sk_rxdesc *rxd;
963 struct mbuf *m;
965 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
966 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
967 m = rxd->rx_m;
968 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
971 static int
972 sk_newbuf(sc_if, idx)
973 struct sk_if_softc *sc_if;
974 int idx;
976 struct sk_rx_desc *r;
977 struct sk_rxdesc *rxd;
978 struct mbuf *m;
979 bus_dma_segment_t segs[1];
980 bus_dmamap_t map;
981 int nsegs;
983 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
984 if (m == NULL)
985 return (ENOBUFS);
986 m->m_len = m->m_pkthdr.len = MCLBYTES;
987 m_adj(m, ETHER_ALIGN);
989 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
990 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
991 m_freem(m);
992 return (ENOBUFS);
994 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
996 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
997 if (rxd->rx_m != NULL) {
998 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
999 BUS_DMASYNC_POSTREAD);
1000 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
1002 map = rxd->rx_dmamap;
1003 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1004 sc_if->sk_cdata.sk_rx_sparemap = map;
1005 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1006 BUS_DMASYNC_PREREAD);
1007 rxd->rx_m = m;
1008 r = &sc_if->sk_rdata.sk_rx_ring[idx];
1009 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1010 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1011 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1013 return (0);
1016 static int
1017 sk_jumbo_newbuf(sc_if, idx)
1018 struct sk_if_softc *sc_if;
1019 int idx;
1021 struct sk_rx_desc *r;
1022 struct sk_rxdesc *rxd;
1023 struct mbuf *m;
1024 bus_dma_segment_t segs[1];
1025 bus_dmamap_t map;
1026 int nsegs;
1028 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1029 if (m == NULL)
1030 return (ENOBUFS);
1031 if ((m->m_flags & M_EXT) == 0) {
1032 m_freem(m);
1033 return (ENOBUFS);
1035 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1037 * Adjust alignment so packet payload begins on a
1038 * longword boundary. Mandatory for Alpha, useful on
1039 * x86 too.
1041 m_adj(m, ETHER_ALIGN);
1043 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1044 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1045 m_freem(m);
1046 return (ENOBUFS);
1048 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1050 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1051 if (rxd->rx_m != NULL) {
1052 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1053 BUS_DMASYNC_POSTREAD);
1054 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1055 rxd->rx_dmamap);
1057 map = rxd->rx_dmamap;
1058 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1059 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1060 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1061 BUS_DMASYNC_PREREAD);
1062 rxd->rx_m = m;
1063 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1064 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1065 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1066 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1068 return (0);
1072 * Set media options.
1074 static int
1075 sk_ifmedia_upd(ifp)
1076 struct ifnet *ifp;
1078 struct sk_if_softc *sc_if = ifp->if_softc;
1079 struct mii_data *mii;
1081 mii = device_get_softc(sc_if->sk_miibus);
1082 sk_init(sc_if);
1083 mii_mediachg(mii);
1085 return(0);
1089 * Report current media status.
1091 static void
1092 sk_ifmedia_sts(ifp, ifmr)
1093 struct ifnet *ifp;
1094 struct ifmediareq *ifmr;
1096 struct sk_if_softc *sc_if;
1097 struct mii_data *mii;
1099 sc_if = ifp->if_softc;
1100 mii = device_get_softc(sc_if->sk_miibus);
1102 mii_pollstat(mii);
1103 ifmr->ifm_active = mii->mii_media_active;
1104 ifmr->ifm_status = mii->mii_media_status;
1106 return;
1109 static int
1110 sk_ioctl(ifp, command, data)
1111 struct ifnet *ifp;
1112 u_long command;
1113 caddr_t data;
1115 struct sk_if_softc *sc_if = ifp->if_softc;
1116 struct ifreq *ifr = (struct ifreq *) data;
1117 int error, mask;
1118 struct mii_data *mii;
1120 error = 0;
1121 switch(command) {
1122 case SIOCSIFMTU:
1123 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1124 error = EINVAL;
1125 else if (ifp->if_mtu != ifr->ifr_mtu) {
1126 if (sc_if->sk_jumbo_disable != 0 &&
1127 ifr->ifr_mtu > SK_MAX_FRAMELEN)
1128 error = EINVAL;
1129 else {
1130 SK_IF_LOCK(sc_if);
1131 ifp->if_mtu = ifr->ifr_mtu;
1132 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1133 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1134 sk_init_locked(sc_if);
1136 SK_IF_UNLOCK(sc_if);
1139 break;
1140 case SIOCSIFFLAGS:
1141 SK_IF_LOCK(sc_if);
1142 if (ifp->if_flags & IFF_UP) {
1143 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1144 if ((ifp->if_flags ^ sc_if->sk_if_flags)
1145 & (IFF_PROMISC | IFF_ALLMULTI))
1146 sk_rxfilter(sc_if);
1147 } else
1148 sk_init_locked(sc_if);
1149 } else {
1150 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1151 sk_stop(sc_if);
1153 sc_if->sk_if_flags = ifp->if_flags;
1154 SK_IF_UNLOCK(sc_if);
1155 break;
1156 case SIOCADDMULTI:
1157 case SIOCDELMULTI:
1158 SK_IF_LOCK(sc_if);
1159 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1160 sk_rxfilter(sc_if);
1161 SK_IF_UNLOCK(sc_if);
1162 break;
1163 case SIOCGIFMEDIA:
1164 case SIOCSIFMEDIA:
1165 mii = device_get_softc(sc_if->sk_miibus);
1166 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1167 break;
1168 case SIOCSIFCAP:
1169 SK_IF_LOCK(sc_if);
1170 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1171 SK_IF_UNLOCK(sc_if);
1172 break;
1174 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1175 if ((mask & IFCAP_TXCSUM) != 0 &&
1176 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1177 ifp->if_capenable ^= IFCAP_TXCSUM;
1178 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1179 ifp->if_hwassist |= SK_CSUM_FEATURES;
1180 else
1181 ifp->if_hwassist &= ~SK_CSUM_FEATURES;
1183 if ((mask & IFCAP_RXCSUM) != 0 &&
1184 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1185 ifp->if_capenable ^= IFCAP_RXCSUM;
1186 SK_IF_UNLOCK(sc_if);
1187 break;
1188 default:
1189 error = ether_ioctl(ifp, command, data);
1190 break;
1193 return (error);
1197 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1198 * IDs against our list and return a device name if we find a match.
1200 static int
1201 skc_probe(dev)
1202 device_t dev;
1204 const struct sk_type *t = sk_devs;
1206 while(t->sk_name != NULL) {
1207 if ((pci_get_vendor(dev) == t->sk_vid) &&
1208 (pci_get_device(dev) == t->sk_did)) {
1210 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1211 * Rev. 3 is supported by re(4).
1213 if ((t->sk_vid == VENDORID_LINKSYS) &&
1214 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1215 (pci_get_subdevice(dev) !=
1216 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1217 t++;
1218 continue;
1220 device_set_desc(dev, t->sk_name);
1221 return (BUS_PROBE_DEFAULT);
1223 t++;
1226 return(ENXIO);
1230 * Force the GEnesis into reset, then bring it out of reset.
1232 static void
1233 sk_reset(sc)
1234 struct sk_softc *sc;
1237 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1238 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1239 if (SK_YUKON_FAMILY(sc->sk_type))
1240 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1242 DELAY(1000);
1243 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1244 DELAY(2);
1245 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1246 if (SK_YUKON_FAMILY(sc->sk_type))
1247 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1249 if (sc->sk_type == SK_GENESIS) {
1250 /* Configure packet arbiter */
1251 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1252 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1253 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1254 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1255 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1258 /* Enable RAM interface */
1259 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1262 * Configure interrupt moderation. The moderation timer
1263 * defers interrupts specified in the interrupt moderation
1264 * timer mask based on the timeout specified in the interrupt
1265 * moderation timer init register. Each bit in the timer
1266 * register represents one tick, so to specify a timeout in
1267 * microseconds, we have to multiply by the correct number of
1268 * ticks-per-microsecond.
1270 switch (sc->sk_type) {
1271 case SK_GENESIS:
1272 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1273 break;
1274 default:
1275 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1276 break;
1278 if (bootverbose)
1279 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1280 sc->sk_int_mod);
1281 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1282 sc->sk_int_ticks));
1283 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1284 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1285 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1287 return;
1290 static int
1291 sk_probe(dev)
1292 device_t dev;
1294 struct sk_softc *sc;
1296 sc = device_get_softc(device_get_parent(dev));
1299 * Not much to do here. We always know there will be
1300 * at least one XMAC present, and if there are two,
1301 * skc_attach() will create a second device instance
1302 * for us.
1304 switch (sc->sk_type) {
1305 case SK_GENESIS:
1306 device_set_desc(dev, "XaQti Corp. XMAC II");
1307 break;
1308 case SK_YUKON:
1309 case SK_YUKON_LITE:
1310 case SK_YUKON_LP:
1311 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1312 break;
1315 return (BUS_PROBE_DEFAULT);
1319 * Each XMAC chip is attached as a separate logical IP interface.
1320 * Single port cards will have only one logical interface of course.
1322 static int
1323 sk_attach(dev)
1324 device_t dev;
1326 struct sk_softc *sc;
1327 struct sk_if_softc *sc_if;
1328 struct ifnet *ifp;
1329 u_int32_t r;
1330 int error, i, phy, port;
1331 u_char eaddr[6];
1332 u_char inv_mac[] = {0, 0, 0, 0, 0, 0};
1334 if (dev == NULL)
1335 return(EINVAL);
1337 error = 0;
1338 sc_if = device_get_softc(dev);
1339 sc = device_get_softc(device_get_parent(dev));
1340 port = *(int *)device_get_ivars(dev);
1342 sc_if->sk_if_dev = dev;
1343 sc_if->sk_port = port;
1344 sc_if->sk_softc = sc;
1345 sc->sk_if[port] = sc_if;
1346 if (port == SK_PORT_A)
1347 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1348 if (port == SK_PORT_B)
1349 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1351 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1352 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1354 if (sk_dma_alloc(sc_if) != 0) {
1355 error = ENOMEM;
1356 goto fail;
1358 sk_dma_jumbo_alloc(sc_if);
1360 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1361 if (ifp == NULL) {
1362 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1363 error = ENOSPC;
1364 goto fail;
1366 ifp->if_softc = sc_if;
1367 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1368 ifp->if_mtu = ETHERMTU;
1369 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1371 * SK_GENESIS has a bug in checksum offload - From linux.
1373 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1374 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
1375 ifp->if_hwassist = 0;
1376 } else {
1377 ifp->if_capabilities = 0;
1378 ifp->if_hwassist = 0;
1380 ifp->if_capenable = ifp->if_capabilities;
1382 * Some revision of Yukon controller generates corrupted
1383 * frame when TX checksum offloading is enabled. The
1384 * frame has a valid checksum value so payload might be
1385 * modified during TX checksum calculation. Disable TX
1386 * checksum offloading but give users chance to enable it
1387 * when they know their controller works without problems
1388 * with TX checksum offloading.
1390 ifp->if_capenable &= ~IFCAP_TXCSUM;
1391 ifp->if_ioctl = sk_ioctl;
1392 ifp->if_start = sk_start;
1393 ifp->if_init = sk_init;
1394 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1395 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1396 IFQ_SET_READY(&ifp->if_snd);
1399 * Get station address for this interface. Note that
1400 * dual port cards actually come with three station
1401 * addresses: one for each port, plus an extra. The
1402 * extra one is used by the SysKonnect driver software
1403 * as a 'virtual' station address for when both ports
1404 * are operating in failover mode. Currently we don't
1405 * use this extra address.
1407 SK_IF_LOCK(sc_if);
1408 for (i = 0; i < ETHER_ADDR_LEN; i++)
1409 eaddr[i] =
1410 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1412 /* Verify whether the station address is invalid or not. */
1413 if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
1414 device_printf(sc_if->sk_if_dev,
1415 "Generating random ethernet address\n");
1416 r = arc4random();
1418 * Set OUI to convenient locally assigned address. 'b'
1419 * is 0x62, which has the locally assigned bit set, and
1420 * the broadcast/multicast bit clear.
1422 eaddr[0] = 'b';
1423 eaddr[1] = 's';
1424 eaddr[2] = 'd';
1425 eaddr[3] = (r >> 16) & 0xff;
1426 eaddr[4] = (r >> 8) & 0xff;
1427 eaddr[5] = (r >> 0) & 0xff;
1430 * Set up RAM buffer addresses. The NIC will have a certain
1431 * amount of SRAM on it, somewhere between 512K and 2MB. We
1432 * need to divide this up a) between the transmitter and
1433 * receiver and b) between the two XMACs, if this is a
1434 * dual port NIC. Our algotithm is to divide up the memory
1435 * evenly so that everyone gets a fair share.
1437 * Just to be contrary, Yukon2 appears to have separate memory
1438 * for each MAC.
1440 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1441 u_int32_t chunk, val;
1443 chunk = sc->sk_ramsize / 2;
1444 val = sc->sk_rboff / sizeof(u_int64_t);
1445 sc_if->sk_rx_ramstart = val;
1446 val += (chunk / sizeof(u_int64_t));
1447 sc_if->sk_rx_ramend = val - 1;
1448 sc_if->sk_tx_ramstart = val;
1449 val += (chunk / sizeof(u_int64_t));
1450 sc_if->sk_tx_ramend = val - 1;
1451 } else {
1452 u_int32_t chunk, val;
1454 chunk = sc->sk_ramsize / 4;
1455 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1456 sizeof(u_int64_t);
1457 sc_if->sk_rx_ramstart = val;
1458 val += (chunk / sizeof(u_int64_t));
1459 sc_if->sk_rx_ramend = val - 1;
1460 sc_if->sk_tx_ramstart = val;
1461 val += (chunk / sizeof(u_int64_t));
1462 sc_if->sk_tx_ramend = val - 1;
1465 /* Read and save PHY type and set PHY address */
1466 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1467 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1468 switch(sc_if->sk_phytype) {
1469 case SK_PHYTYPE_XMAC:
1470 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1471 break;
1472 case SK_PHYTYPE_BCOM:
1473 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1474 break;
1475 default:
1476 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1477 sc_if->sk_phytype);
1478 error = ENODEV;
1479 SK_IF_UNLOCK(sc_if);
1480 goto fail;
1482 } else {
1483 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1484 sc->sk_pmd != 'S') {
1485 /* not initialized, punt */
1486 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1487 sc->sk_coppertype = 1;
1490 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1492 if (!(sc->sk_coppertype))
1493 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1497 * Call MI attach routine. Can't hold locks when calling into ether_*.
1499 SK_IF_UNLOCK(sc_if);
1500 ether_ifattach(ifp, eaddr);
1501 SK_IF_LOCK(sc_if);
1504 * The hardware should be ready for VLAN_MTU by default:
1505 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1506 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1509 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1510 ifp->if_capenable |= IFCAP_VLAN_MTU;
1512 * Tell the upper layer(s) we support long frames.
1513 * Must appear after the call to ether_ifattach() because
1514 * ether_ifattach() sets ifi_hdrlen to the default value.
1516 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1519 * Do miibus setup.
1521 phy = MII_PHY_ANY;
1522 switch (sc->sk_type) {
1523 case SK_GENESIS:
1524 sk_init_xmac(sc_if);
1525 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1526 phy = 0;
1527 break;
1528 case SK_YUKON:
1529 case SK_YUKON_LITE:
1530 case SK_YUKON_LP:
1531 sk_init_yukon(sc_if);
1532 phy = 0;
1533 break;
1536 SK_IF_UNLOCK(sc_if);
1537 error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1538 sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1539 if (error != 0) {
1540 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1541 ether_ifdetach(ifp);
1542 goto fail;
1545 fail:
1546 if (error) {
1547 /* Access should be ok even though lock has been dropped */
1548 sc->sk_if[port] = NULL;
1549 sk_detach(dev);
1552 return(error);
1556 * Attach the interface. Allocate softc structures, do ifmedia
1557 * setup and ethernet/BPF attach.
1559 static int
1560 skc_attach(dev)
1561 device_t dev;
1563 struct sk_softc *sc;
1564 int error = 0, *port;
1565 uint8_t skrs;
1566 const char *pname = NULL;
1567 char *revstr;
1569 sc = device_get_softc(dev);
1570 sc->sk_dev = dev;
1572 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1573 MTX_DEF);
1574 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1576 * Map control/status registers.
1578 pci_enable_busmaster(dev);
1580 /* Allocate resources */
1581 #ifdef SK_USEIOSPACE
1582 sc->sk_res_spec = sk_res_spec_io;
1583 #else
1584 sc->sk_res_spec = sk_res_spec_mem;
1585 #endif
1586 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1587 if (error) {
1588 if (sc->sk_res_spec == sk_res_spec_mem)
1589 sc->sk_res_spec = sk_res_spec_io;
1590 else
1591 sc->sk_res_spec = sk_res_spec_mem;
1592 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1593 if (error) {
1594 device_printf(dev, "couldn't allocate %s resources\n",
1595 sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1596 "I/O");
1597 goto fail;
1601 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1602 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1604 /* Bail out if chip is not recognized. */
1605 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1606 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1607 sc->sk_type, sc->sk_rev);
1608 error = ENXIO;
1609 goto fail;
1612 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1613 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1614 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1615 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1616 "SK interrupt moderation");
1618 /* Pull in device tunables. */
1619 sc->sk_int_mod = SK_IM_DEFAULT;
1620 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1621 "int_mod", &sc->sk_int_mod);
1622 if (error == 0) {
1623 if (sc->sk_int_mod < SK_IM_MIN ||
1624 sc->sk_int_mod > SK_IM_MAX) {
1625 device_printf(dev, "int_mod value out of range; "
1626 "using default: %d\n", SK_IM_DEFAULT);
1627 sc->sk_int_mod = SK_IM_DEFAULT;
1631 /* Reset the adapter. */
1632 sk_reset(sc);
1634 skrs = sk_win_read_1(sc, SK_EPROM0);
1635 if (sc->sk_type == SK_GENESIS) {
1636 /* Read and save RAM size and RAMbuffer offset */
1637 switch(skrs) {
1638 case SK_RAMSIZE_512K_64:
1639 sc->sk_ramsize = 0x80000;
1640 sc->sk_rboff = SK_RBOFF_0;
1641 break;
1642 case SK_RAMSIZE_1024K_64:
1643 sc->sk_ramsize = 0x100000;
1644 sc->sk_rboff = SK_RBOFF_80000;
1645 break;
1646 case SK_RAMSIZE_1024K_128:
1647 sc->sk_ramsize = 0x100000;
1648 sc->sk_rboff = SK_RBOFF_0;
1649 break;
1650 case SK_RAMSIZE_2048K_128:
1651 sc->sk_ramsize = 0x200000;
1652 sc->sk_rboff = SK_RBOFF_0;
1653 break;
1654 default:
1655 device_printf(dev, "unknown ram size: %d\n", skrs);
1656 error = ENXIO;
1657 goto fail;
1659 } else { /* SK_YUKON_FAMILY */
1660 if (skrs == 0x00)
1661 sc->sk_ramsize = 0x20000;
1662 else
1663 sc->sk_ramsize = skrs * (1<<12);
1664 sc->sk_rboff = SK_RBOFF_0;
1667 /* Read and save physical media type */
1668 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1670 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1671 sc->sk_coppertype = 1;
1672 else
1673 sc->sk_coppertype = 0;
1675 /* Determine whether to name it with VPD PN or just make it up.
1676 * Marvell Yukon VPD PN seems to freqently be bogus. */
1677 switch (pci_get_device(dev)) {
1678 case DEVICEID_SK_V1:
1679 case DEVICEID_BELKIN_5005:
1680 case DEVICEID_3COM_3C940:
1681 case DEVICEID_LINKSYS_EG1032:
1682 case DEVICEID_DLINK_DGE530T_A1:
1683 case DEVICEID_DLINK_DGE530T_B1:
1684 /* Stay with VPD PN. */
1685 (void) pci_get_vpd_ident(dev, &pname);
1686 break;
1687 case DEVICEID_SK_V2:
1688 /* YUKON VPD PN might bear no resemblance to reality. */
1689 switch (sc->sk_type) {
1690 case SK_GENESIS:
1691 /* Stay with VPD PN. */
1692 (void) pci_get_vpd_ident(dev, &pname);
1693 break;
1694 case SK_YUKON:
1695 pname = "Marvell Yukon Gigabit Ethernet";
1696 break;
1697 case SK_YUKON_LITE:
1698 pname = "Marvell Yukon Lite Gigabit Ethernet";
1699 break;
1700 case SK_YUKON_LP:
1701 pname = "Marvell Yukon LP Gigabit Ethernet";
1702 break;
1703 default:
1704 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1705 break;
1708 /* Yukon Lite Rev. A0 needs special test. */
1709 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1710 u_int32_t far;
1711 u_int8_t testbyte;
1713 /* Save flash address register before testing. */
1714 far = sk_win_read_4(sc, SK_EP_ADDR);
1716 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1717 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1719 if (testbyte != 0x00) {
1720 /* Yukon Lite Rev. A0 detected. */
1721 sc->sk_type = SK_YUKON_LITE;
1722 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1723 /* Restore flash address register. */
1724 sk_win_write_4(sc, SK_EP_ADDR, far);
1727 break;
1728 default:
1729 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1730 "chipver=%02x, rev=%x\n",
1731 pci_get_vendor(dev), pci_get_device(dev),
1732 sc->sk_type, sc->sk_rev);
1733 error = ENXIO;
1734 goto fail;
1737 if (sc->sk_type == SK_YUKON_LITE) {
1738 switch (sc->sk_rev) {
1739 case SK_YUKON_LITE_REV_A0:
1740 revstr = "A0";
1741 break;
1742 case SK_YUKON_LITE_REV_A1:
1743 revstr = "A1";
1744 break;
1745 case SK_YUKON_LITE_REV_A3:
1746 revstr = "A3";
1747 break;
1748 default:
1749 revstr = "";
1750 break;
1752 } else {
1753 revstr = "";
1756 /* Announce the product name and more VPD data if there. */
1757 if (pname != NULL)
1758 device_printf(dev, "%s rev. %s(0x%x)\n",
1759 pname, revstr, sc->sk_rev);
1761 if (bootverbose) {
1762 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1763 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1764 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1765 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1768 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1769 if (sc->sk_devs[SK_PORT_A] == NULL) {
1770 device_printf(dev, "failed to add child for PORT_A\n");
1771 error = ENXIO;
1772 goto fail;
1774 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1775 if (port == NULL) {
1776 device_printf(dev, "failed to allocate memory for "
1777 "ivars of PORT_A\n");
1778 error = ENXIO;
1779 goto fail;
1781 *port = SK_PORT_A;
1782 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1784 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1785 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1786 if (sc->sk_devs[SK_PORT_B] == NULL) {
1787 device_printf(dev, "failed to add child for PORT_B\n");
1788 error = ENXIO;
1789 goto fail;
1791 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1792 if (port == NULL) {
1793 device_printf(dev, "failed to allocate memory for "
1794 "ivars of PORT_B\n");
1795 error = ENXIO;
1796 goto fail;
1798 *port = SK_PORT_B;
1799 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1802 /* Turn on the 'driver is loaded' LED. */
1803 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1805 error = bus_generic_attach(dev);
1806 if (error) {
1807 device_printf(dev, "failed to attach port(s)\n");
1808 goto fail;
1811 /* Hook interrupt last to avoid having to lock softc */
1812 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1813 NULL, sk_intr, sc, &sc->sk_intrhand);
1815 if (error) {
1816 device_printf(dev, "couldn't set up irq\n");
1817 goto fail;
1820 fail:
1821 if (error)
1822 skc_detach(dev);
1824 return(error);
1828 * Shutdown hardware and free up resources. This can be called any
1829 * time after the mutex has been initialized. It is called in both
1830 * the error case in attach and the normal detach case so it needs
1831 * to be careful about only freeing resources that have actually been
1832 * allocated.
1834 static int
1835 sk_detach(dev)
1836 device_t dev;
1838 struct sk_if_softc *sc_if;
1839 struct ifnet *ifp;
1841 sc_if = device_get_softc(dev);
1842 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1843 ("sk mutex not initialized in sk_detach"));
1844 SK_IF_LOCK(sc_if);
1846 ifp = sc_if->sk_ifp;
1847 /* These should only be active if attach_xmac succeeded */
1848 if (device_is_attached(dev)) {
1849 sk_stop(sc_if);
1850 /* Can't hold locks while calling detach */
1851 SK_IF_UNLOCK(sc_if);
1852 callout_drain(&sc_if->sk_tick_ch);
1853 callout_drain(&sc_if->sk_watchdog_ch);
1854 ether_ifdetach(ifp);
1855 SK_IF_LOCK(sc_if);
1857 if (ifp)
1858 if_free(ifp);
1860 * We're generally called from skc_detach() which is using
1861 * device_delete_child() to get to here. It's already trashed
1862 * miibus for us, so don't do it here or we'll panic.
1865 if (sc_if->sk_miibus != NULL)
1866 device_delete_child(dev, sc_if->sk_miibus);
1868 bus_generic_detach(dev);
1869 sk_dma_jumbo_free(sc_if);
1870 sk_dma_free(sc_if);
1871 SK_IF_UNLOCK(sc_if);
1873 return(0);
1876 static int
1877 skc_detach(dev)
1878 device_t dev;
1880 struct sk_softc *sc;
1882 sc = device_get_softc(dev);
1883 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1885 if (device_is_alive(dev)) {
1886 if (sc->sk_devs[SK_PORT_A] != NULL) {
1887 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1888 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1890 if (sc->sk_devs[SK_PORT_B] != NULL) {
1891 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1892 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1894 bus_generic_detach(dev);
1897 if (sc->sk_intrhand)
1898 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1899 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1901 mtx_destroy(&sc->sk_mii_mtx);
1902 mtx_destroy(&sc->sk_mtx);
1904 return(0);
1907 static bus_dma_tag_t
1908 skc_get_dma_tag(device_t bus, device_t child __unused)
1911 return (bus_get_dma_tag(bus));
1914 struct sk_dmamap_arg {
1915 bus_addr_t sk_busaddr;
1918 static void
1919 sk_dmamap_cb(arg, segs, nseg, error)
1920 void *arg;
1921 bus_dma_segment_t *segs;
1922 int nseg;
1923 int error;
1925 struct sk_dmamap_arg *ctx;
1927 if (error != 0)
1928 return;
1930 ctx = arg;
1931 ctx->sk_busaddr = segs[0].ds_addr;
1935 * Allocate jumbo buffer storage. The SysKonnect adapters support
1936 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1937 * use them in their drivers. In order for us to use them, we need
1938 * large 9K receive buffers, however standard mbuf clusters are only
1939 * 2048 bytes in size. Consequently, we need to allocate and manage
1940 * our own jumbo buffer pool. Fortunately, this does not require an
1941 * excessive amount of additional code.
1943 static int
1944 sk_dma_alloc(sc_if)
1945 struct sk_if_softc *sc_if;
1947 struct sk_dmamap_arg ctx;
1948 struct sk_txdesc *txd;
1949 struct sk_rxdesc *rxd;
1950 int error, i;
1952 /* create parent tag */
1954 * XXX
1955 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1956 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1957 * However bz@ reported that it does not work on amd64 with > 4GB
1958 * RAM. Until we have more clues of the breakage, disable DAC mode
1959 * by limiting DMA address to be in 32bit address space.
1961 error = bus_dma_tag_create(
1962 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1963 1, 0, /* algnmnt, boundary */
1964 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1965 BUS_SPACE_MAXADDR, /* highaddr */
1966 NULL, NULL, /* filter, filterarg */
1967 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1968 0, /* nsegments */
1969 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1970 0, /* flags */
1971 NULL, NULL, /* lockfunc, lockarg */
1972 &sc_if->sk_cdata.sk_parent_tag);
1973 if (error != 0) {
1974 device_printf(sc_if->sk_if_dev,
1975 "failed to create parent DMA tag\n");
1976 goto fail;
1979 /* create tag for Tx ring */
1980 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1981 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1982 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1983 BUS_SPACE_MAXADDR, /* highaddr */
1984 NULL, NULL, /* filter, filterarg */
1985 SK_TX_RING_SZ, /* maxsize */
1986 1, /* nsegments */
1987 SK_TX_RING_SZ, /* maxsegsize */
1988 0, /* flags */
1989 NULL, NULL, /* lockfunc, lockarg */
1990 &sc_if->sk_cdata.sk_tx_ring_tag);
1991 if (error != 0) {
1992 device_printf(sc_if->sk_if_dev,
1993 "failed to allocate Tx ring DMA tag\n");
1994 goto fail;
1997 /* create tag for Rx ring */
1998 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1999 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2000 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2001 BUS_SPACE_MAXADDR, /* highaddr */
2002 NULL, NULL, /* filter, filterarg */
2003 SK_RX_RING_SZ, /* maxsize */
2004 1, /* nsegments */
2005 SK_RX_RING_SZ, /* maxsegsize */
2006 0, /* flags */
2007 NULL, NULL, /* lockfunc, lockarg */
2008 &sc_if->sk_cdata.sk_rx_ring_tag);
2009 if (error != 0) {
2010 device_printf(sc_if->sk_if_dev,
2011 "failed to allocate Rx ring DMA tag\n");
2012 goto fail;
2015 /* create tag for Tx buffers */
2016 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2017 1, 0, /* algnmnt, boundary */
2018 BUS_SPACE_MAXADDR, /* lowaddr */
2019 BUS_SPACE_MAXADDR, /* highaddr */
2020 NULL, NULL, /* filter, filterarg */
2021 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
2022 SK_MAXTXSEGS, /* nsegments */
2023 MCLBYTES, /* maxsegsize */
2024 0, /* flags */
2025 NULL, NULL, /* lockfunc, lockarg */
2026 &sc_if->sk_cdata.sk_tx_tag);
2027 if (error != 0) {
2028 device_printf(sc_if->sk_if_dev,
2029 "failed to allocate Tx DMA tag\n");
2030 goto fail;
2033 /* create tag for Rx buffers */
2034 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2035 1, 0, /* algnmnt, boundary */
2036 BUS_SPACE_MAXADDR, /* lowaddr */
2037 BUS_SPACE_MAXADDR, /* highaddr */
2038 NULL, NULL, /* filter, filterarg */
2039 MCLBYTES, /* maxsize */
2040 1, /* nsegments */
2041 MCLBYTES, /* maxsegsize */
2042 0, /* flags */
2043 NULL, NULL, /* lockfunc, lockarg */
2044 &sc_if->sk_cdata.sk_rx_tag);
2045 if (error != 0) {
2046 device_printf(sc_if->sk_if_dev,
2047 "failed to allocate Rx DMA tag\n");
2048 goto fail;
2051 /* allocate DMA'able memory and load the DMA map for Tx ring */
2052 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2053 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
2054 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
2055 if (error != 0) {
2056 device_printf(sc_if->sk_if_dev,
2057 "failed to allocate DMA'able memory for Tx ring\n");
2058 goto fail;
2061 ctx.sk_busaddr = 0;
2062 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2063 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2064 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2065 if (error != 0) {
2066 device_printf(sc_if->sk_if_dev,
2067 "failed to load DMA'able memory for Tx ring\n");
2068 goto fail;
2070 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2072 /* allocate DMA'able memory and load the DMA map for Rx ring */
2073 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2074 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
2075 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
2076 if (error != 0) {
2077 device_printf(sc_if->sk_if_dev,
2078 "failed to allocate DMA'able memory for Rx ring\n");
2079 goto fail;
2082 ctx.sk_busaddr = 0;
2083 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2084 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2085 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2086 if (error != 0) {
2087 device_printf(sc_if->sk_if_dev,
2088 "failed to load DMA'able memory for Rx ring\n");
2089 goto fail;
2091 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2093 /* create DMA maps for Tx buffers */
2094 for (i = 0; i < SK_TX_RING_CNT; i++) {
2095 txd = &sc_if->sk_cdata.sk_txdesc[i];
2096 txd->tx_m = NULL;
2097 txd->tx_dmamap = NULL;
2098 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2099 &txd->tx_dmamap);
2100 if (error != 0) {
2101 device_printf(sc_if->sk_if_dev,
2102 "failed to create Tx dmamap\n");
2103 goto fail;
2107 /* create DMA maps for Rx buffers */
2108 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2109 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2110 device_printf(sc_if->sk_if_dev,
2111 "failed to create spare Rx dmamap\n");
2112 goto fail;
2114 for (i = 0; i < SK_RX_RING_CNT; i++) {
2115 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2116 rxd->rx_m = NULL;
2117 rxd->rx_dmamap = NULL;
2118 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2119 &rxd->rx_dmamap);
2120 if (error != 0) {
2121 device_printf(sc_if->sk_if_dev,
2122 "failed to create Rx dmamap\n");
2123 goto fail;
2127 fail:
2128 return (error);
2131 static int
2132 sk_dma_jumbo_alloc(sc_if)
2133 struct sk_if_softc *sc_if;
2135 struct sk_dmamap_arg ctx;
2136 struct sk_rxdesc *jrxd;
2137 int error, i;
2139 if (jumbo_disable != 0) {
2140 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2141 sc_if->sk_jumbo_disable = 1;
2142 return (0);
2144 /* create tag for jumbo Rx ring */
2145 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2146 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2147 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2148 BUS_SPACE_MAXADDR, /* highaddr */
2149 NULL, NULL, /* filter, filterarg */
2150 SK_JUMBO_RX_RING_SZ, /* maxsize */
2151 1, /* nsegments */
2152 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2153 0, /* flags */
2154 NULL, NULL, /* lockfunc, lockarg */
2155 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2156 if (error != 0) {
2157 device_printf(sc_if->sk_if_dev,
2158 "failed to allocate jumbo Rx ring DMA tag\n");
2159 goto jumbo_fail;
2162 /* create tag for jumbo Rx buffers */
2163 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2164 1, 0, /* algnmnt, boundary */
2165 BUS_SPACE_MAXADDR, /* lowaddr */
2166 BUS_SPACE_MAXADDR, /* highaddr */
2167 NULL, NULL, /* filter, filterarg */
2168 MJUM9BYTES, /* maxsize */
2169 1, /* nsegments */
2170 MJUM9BYTES, /* maxsegsize */
2171 0, /* flags */
2172 NULL, NULL, /* lockfunc, lockarg */
2173 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2174 if (error != 0) {
2175 device_printf(sc_if->sk_if_dev,
2176 "failed to allocate jumbo Rx DMA tag\n");
2177 goto jumbo_fail;
2180 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2181 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2182 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
2183 BUS_DMA_COHERENT | BUS_DMA_ZERO,
2184 &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2185 if (error != 0) {
2186 device_printf(sc_if->sk_if_dev,
2187 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2188 goto jumbo_fail;
2191 ctx.sk_busaddr = 0;
2192 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2193 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2194 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2195 &ctx, BUS_DMA_NOWAIT);
2196 if (error != 0) {
2197 device_printf(sc_if->sk_if_dev,
2198 "failed to load DMA'able memory for jumbo Rx ring\n");
2199 goto jumbo_fail;
2201 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2203 /* create DMA maps for jumbo Rx buffers */
2204 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2205 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2206 device_printf(sc_if->sk_if_dev,
2207 "failed to create spare jumbo Rx dmamap\n");
2208 goto jumbo_fail;
2210 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2211 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2212 jrxd->rx_m = NULL;
2213 jrxd->rx_dmamap = NULL;
2214 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2215 &jrxd->rx_dmamap);
2216 if (error != 0) {
2217 device_printf(sc_if->sk_if_dev,
2218 "failed to create jumbo Rx dmamap\n");
2219 goto jumbo_fail;
2223 return (0);
2225 jumbo_fail:
2226 sk_dma_jumbo_free(sc_if);
2227 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2228 "resource shortage\n");
2229 sc_if->sk_jumbo_disable = 1;
2230 return (0);
2233 static void
2234 sk_dma_free(sc_if)
2235 struct sk_if_softc *sc_if;
2237 struct sk_txdesc *txd;
2238 struct sk_rxdesc *rxd;
2239 int i;
2241 /* Tx ring */
2242 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2243 if (sc_if->sk_cdata.sk_tx_ring_map)
2244 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2245 sc_if->sk_cdata.sk_tx_ring_map);
2246 if (sc_if->sk_cdata.sk_tx_ring_map &&
2247 sc_if->sk_rdata.sk_tx_ring)
2248 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2249 sc_if->sk_rdata.sk_tx_ring,
2250 sc_if->sk_cdata.sk_tx_ring_map);
2251 sc_if->sk_rdata.sk_tx_ring = NULL;
2252 sc_if->sk_cdata.sk_tx_ring_map = NULL;
2253 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2254 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2256 /* Rx ring */
2257 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2258 if (sc_if->sk_cdata.sk_rx_ring_map)
2259 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2260 sc_if->sk_cdata.sk_rx_ring_map);
2261 if (sc_if->sk_cdata.sk_rx_ring_map &&
2262 sc_if->sk_rdata.sk_rx_ring)
2263 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2264 sc_if->sk_rdata.sk_rx_ring,
2265 sc_if->sk_cdata.sk_rx_ring_map);
2266 sc_if->sk_rdata.sk_rx_ring = NULL;
2267 sc_if->sk_cdata.sk_rx_ring_map = NULL;
2268 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2269 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2271 /* Tx buffers */
2272 if (sc_if->sk_cdata.sk_tx_tag) {
2273 for (i = 0; i < SK_TX_RING_CNT; i++) {
2274 txd = &sc_if->sk_cdata.sk_txdesc[i];
2275 if (txd->tx_dmamap) {
2276 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2277 txd->tx_dmamap);
2278 txd->tx_dmamap = NULL;
2281 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2282 sc_if->sk_cdata.sk_tx_tag = NULL;
2284 /* Rx buffers */
2285 if (sc_if->sk_cdata.sk_rx_tag) {
2286 for (i = 0; i < SK_RX_RING_CNT; i++) {
2287 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2288 if (rxd->rx_dmamap) {
2289 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2290 rxd->rx_dmamap);
2291 rxd->rx_dmamap = NULL;
2294 if (sc_if->sk_cdata.sk_rx_sparemap) {
2295 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2296 sc_if->sk_cdata.sk_rx_sparemap);
2297 sc_if->sk_cdata.sk_rx_sparemap = NULL;
2299 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2300 sc_if->sk_cdata.sk_rx_tag = NULL;
2303 if (sc_if->sk_cdata.sk_parent_tag) {
2304 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2305 sc_if->sk_cdata.sk_parent_tag = NULL;
2309 static void
2310 sk_dma_jumbo_free(sc_if)
2311 struct sk_if_softc *sc_if;
2313 struct sk_rxdesc *jrxd;
2314 int i;
2316 /* jumbo Rx ring */
2317 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2318 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2319 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2320 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2321 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2322 sc_if->sk_rdata.sk_jumbo_rx_ring)
2323 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2324 sc_if->sk_rdata.sk_jumbo_rx_ring,
2325 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2326 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2327 sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
2328 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2329 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2332 /* jumbo Rx buffers */
2333 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2334 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2335 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2336 if (jrxd->rx_dmamap) {
2337 bus_dmamap_destroy(
2338 sc_if->sk_cdata.sk_jumbo_rx_tag,
2339 jrxd->rx_dmamap);
2340 jrxd->rx_dmamap = NULL;
2343 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2344 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2345 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2346 sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2348 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2349 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2353 static void
2354 sk_txcksum(ifp, m, f)
2355 struct ifnet *ifp;
2356 struct mbuf *m;
2357 struct sk_tx_desc *f;
2359 struct ip *ip;
2360 u_int16_t offset;
2361 u_int8_t *p;
2363 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2364 for(; m && m->m_len == 0; m = m->m_next)
2366 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2367 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2368 /* checksum may be corrupted */
2369 goto sendit;
2371 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2372 if (m->m_len != ETHER_HDR_LEN) {
2373 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2374 __func__);
2375 /* checksum may be corrupted */
2376 goto sendit;
2378 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2380 if (m == NULL) {
2381 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2382 /* checksum may be corrupted */
2383 goto sendit;
2385 ip = mtod(m, struct ip *);
2386 } else {
2387 p = mtod(m, u_int8_t *);
2388 p += ETHER_HDR_LEN;
2389 ip = (struct ip *)p;
2391 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2393 sendit:
2394 f->sk_csum_startval = 0;
2395 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2396 (offset << 16));
2399 static int
2400 sk_encap(sc_if, m_head)
2401 struct sk_if_softc *sc_if;
2402 struct mbuf **m_head;
2404 struct sk_txdesc *txd;
2405 struct sk_tx_desc *f = NULL;
2406 struct mbuf *m;
2407 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2408 u_int32_t cflags, frag, si, sk_ctl;
2409 int error, i, nseg;
2411 SK_IF_LOCK_ASSERT(sc_if);
2413 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2414 return (ENOBUFS);
2416 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2417 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2418 if (error == EFBIG) {
2419 m = m_defrag(*m_head, M_NOWAIT);
2420 if (m == NULL) {
2421 m_freem(*m_head);
2422 *m_head = NULL;
2423 return (ENOMEM);
2425 *m_head = m;
2426 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2427 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2428 if (error != 0) {
2429 m_freem(*m_head);
2430 *m_head = NULL;
2431 return (error);
2433 } else if (error != 0)
2434 return (error);
2435 if (nseg == 0) {
2436 m_freem(*m_head);
2437 *m_head = NULL;
2438 return (EIO);
2440 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2441 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2442 return (ENOBUFS);
2445 m = *m_head;
2446 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2447 cflags = SK_OPCODE_CSUM;
2448 else
2449 cflags = SK_OPCODE_DEFAULT;
2450 si = frag = sc_if->sk_cdata.sk_tx_prod;
2451 for (i = 0; i < nseg; i++) {
2452 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2453 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2454 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2455 sk_ctl = txsegs[i].ds_len | cflags;
2456 if (i == 0) {
2457 if (cflags == SK_OPCODE_CSUM)
2458 sk_txcksum(sc_if->sk_ifp, m, f);
2459 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2460 } else
2461 sk_ctl |= SK_TXCTL_OWN;
2462 f->sk_ctl = htole32(sk_ctl);
2463 sc_if->sk_cdata.sk_tx_cnt++;
2464 SK_INC(frag, SK_TX_RING_CNT);
2466 sc_if->sk_cdata.sk_tx_prod = frag;
2468 /* set EOF on the last desciptor */
2469 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2470 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2471 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2473 /* turn the first descriptor ownership to NIC */
2474 f = &sc_if->sk_rdata.sk_tx_ring[si];
2475 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2477 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2478 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2479 txd->tx_m = m;
2481 /* sync descriptors */
2482 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2483 BUS_DMASYNC_PREWRITE);
2484 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2485 sc_if->sk_cdata.sk_tx_ring_map,
2486 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2488 return (0);
2491 static void
2492 sk_start(ifp)
2493 struct ifnet *ifp;
2495 struct sk_if_softc *sc_if;
2497 sc_if = ifp->if_softc;
2499 SK_IF_LOCK(sc_if);
2500 sk_start_locked(ifp);
2501 SK_IF_UNLOCK(sc_if);
2503 return;
2506 static void
2507 sk_start_locked(ifp)
2508 struct ifnet *ifp;
2510 struct sk_softc *sc;
2511 struct sk_if_softc *sc_if;
2512 struct mbuf *m_head;
2513 int enq;
2515 sc_if = ifp->if_softc;
2516 sc = sc_if->sk_softc;
2518 SK_IF_LOCK_ASSERT(sc_if);
2520 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2521 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2522 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2523 if (m_head == NULL)
2524 break;
2527 * Pack the data into the transmit ring. If we
2528 * don't have room, set the OACTIVE flag and wait
2529 * for the NIC to drain the ring.
2531 if (sk_encap(sc_if, &m_head)) {
2532 if (m_head == NULL)
2533 break;
2534 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2535 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2536 break;
2539 enq++;
2541 * If there's a BPF listener, bounce a copy of this frame
2542 * to him.
2544 BPF_MTAP(ifp, m_head);
2547 if (enq > 0) {
2548 /* Transmit */
2549 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2551 /* Set a timeout in case the chip goes out to lunch. */
2552 sc_if->sk_watchdog_timer = 5;
2557 static void
2558 sk_watchdog(arg)
2559 void *arg;
2561 struct sk_if_softc *sc_if;
2562 struct ifnet *ifp;
2564 ifp = arg;
2565 sc_if = ifp->if_softc;
2567 SK_IF_LOCK_ASSERT(sc_if);
2569 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2570 goto done;
2573 * Reclaim first as there is a possibility of losing Tx completion
2574 * interrupts.
2576 sk_txeof(sc_if);
2577 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2578 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2579 ifp->if_oerrors++;
2580 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2581 sk_init_locked(sc_if);
2584 done:
2585 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2587 return;
2590 static int
2591 skc_shutdown(dev)
2592 device_t dev;
2594 struct sk_softc *sc;
2596 sc = device_get_softc(dev);
2597 SK_LOCK(sc);
2599 /* Turn off the 'driver is loaded' LED. */
2600 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2603 * Reset the GEnesis controller. Doing this should also
2604 * assert the resets on the attached XMAC(s).
2606 sk_reset(sc);
2607 SK_UNLOCK(sc);
2609 return (0);
2612 static int
2613 skc_suspend(dev)
2614 device_t dev;
2616 struct sk_softc *sc;
2617 struct sk_if_softc *sc_if0, *sc_if1;
2618 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2620 sc = device_get_softc(dev);
2622 SK_LOCK(sc);
2624 sc_if0 = sc->sk_if[SK_PORT_A];
2625 sc_if1 = sc->sk_if[SK_PORT_B];
2626 if (sc_if0 != NULL)
2627 ifp0 = sc_if0->sk_ifp;
2628 if (sc_if1 != NULL)
2629 ifp1 = sc_if1->sk_ifp;
2630 if (ifp0 != NULL)
2631 sk_stop(sc_if0);
2632 if (ifp1 != NULL)
2633 sk_stop(sc_if1);
2634 sc->sk_suspended = 1;
2636 SK_UNLOCK(sc);
2638 return (0);
2641 static int
2642 skc_resume(dev)
2643 device_t dev;
2645 struct sk_softc *sc;
2646 struct sk_if_softc *sc_if0, *sc_if1;
2647 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2649 sc = device_get_softc(dev);
2651 SK_LOCK(sc);
2653 sc_if0 = sc->sk_if[SK_PORT_A];
2654 sc_if1 = sc->sk_if[SK_PORT_B];
2655 if (sc_if0 != NULL)
2656 ifp0 = sc_if0->sk_ifp;
2657 if (sc_if1 != NULL)
2658 ifp1 = sc_if1->sk_ifp;
2659 if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2660 sk_init_locked(sc_if0);
2661 if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2662 sk_init_locked(sc_if1);
2663 sc->sk_suspended = 0;
2665 SK_UNLOCK(sc);
2667 return (0);
2671 * According to the data sheet from SK-NET GENESIS the hardware can compute
2672 * two Rx checksums at the same time(Each checksum start position is
2673 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2674 * does not work at least on my Yukon hardware. I tried every possible ways
2675 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2676 * checksum offload was disabled at the moment and only IP checksum offload
2677 * was enabled.
2678 * As nomral IP header size is 20 bytes I can't expect it would give an
2679 * increase in throughput. However it seems it doesn't hurt performance in
2680 * my testing. If there is a more detailed information for checksum secret
2681 * of the hardware in question please contact yongari@FreeBSD.org to add
2682 * TCP/UDP checksum offload support.
2684 static __inline void
2685 sk_rxcksum(ifp, m, csum)
2686 struct ifnet *ifp;
2687 struct mbuf *m;
2688 u_int32_t csum;
2690 struct ether_header *eh;
2691 struct ip *ip;
2692 int32_t hlen, len, pktlen;
2693 u_int16_t csum1, csum2, ipcsum;
2695 pktlen = m->m_pkthdr.len;
2696 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2697 return;
2698 eh = mtod(m, struct ether_header *);
2699 if (eh->ether_type != htons(ETHERTYPE_IP))
2700 return;
2701 ip = (struct ip *)(eh + 1);
2702 if (ip->ip_v != IPVERSION)
2703 return;
2704 hlen = ip->ip_hl << 2;
2705 pktlen -= sizeof(struct ether_header);
2706 if (hlen < sizeof(struct ip))
2707 return;
2708 if (ntohs(ip->ip_len) < hlen)
2709 return;
2710 if (ntohs(ip->ip_len) != pktlen)
2711 return;
2713 csum1 = htons(csum & 0xffff);
2714 csum2 = htons((csum >> 16) & 0xffff);
2715 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2716 /* checksum fixup for IP options */
2717 len = hlen - sizeof(struct ip);
2718 if (len > 0) {
2720 * If the second checksum value is correct we can compute IP
2721 * checksum with simple math. Unfortunately the second checksum
2722 * value is wrong so we can't verify the checksum from the
2723 * value(It seems there is some magic here to get correct
2724 * value). If the second checksum value is correct it also
2725 * means we can get TCP/UDP checksum) here. However, it still
2726 * needs pseudo header checksum calculation due to hardware
2727 * limitations.
2729 return;
2731 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2732 if (ipcsum == 0xffff)
2733 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2736 static __inline int
2737 sk_rxvalid(sc, stat, len)
2738 struct sk_softc *sc;
2739 u_int32_t stat, len;
2742 if (sc->sk_type == SK_GENESIS) {
2743 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2744 XM_RXSTAT_BYTES(stat) != len)
2745 return (0);
2746 } else {
2747 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2748 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2749 YU_RXSTAT_JABBER)) != 0 ||
2750 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2751 YU_RXSTAT_BYTES(stat) != len)
2752 return (0);
2755 return (1);
2758 static void
2759 sk_rxeof(sc_if)
2760 struct sk_if_softc *sc_if;
2762 struct sk_softc *sc;
2763 struct mbuf *m;
2764 struct ifnet *ifp;
2765 struct sk_rx_desc *cur_rx;
2766 struct sk_rxdesc *rxd;
2767 int cons, prog;
2768 u_int32_t csum, rxstat, sk_ctl;
2770 sc = sc_if->sk_softc;
2771 ifp = sc_if->sk_ifp;
2773 SK_IF_LOCK_ASSERT(sc_if);
2775 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2776 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2778 prog = 0;
2779 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2780 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2781 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2782 sk_ctl = le32toh(cur_rx->sk_ctl);
2783 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2784 break;
2785 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2786 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2788 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2789 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2790 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2791 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2792 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2793 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2794 ifp->if_ierrors++;
2795 sk_discard_rxbuf(sc_if, cons);
2796 continue;
2799 m = rxd->rx_m;
2800 csum = le32toh(cur_rx->sk_csum);
2801 if (sk_newbuf(sc_if, cons) != 0) {
2802 ifp->if_iqdrops++;
2803 /* reuse old buffer */
2804 sk_discard_rxbuf(sc_if, cons);
2805 continue;
2807 m->m_pkthdr.rcvif = ifp;
2808 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2809 ifp->if_ipackets++;
2810 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2811 sk_rxcksum(ifp, m, csum);
2812 SK_IF_UNLOCK(sc_if);
2813 (*ifp->if_input)(ifp, m);
2814 SK_IF_LOCK(sc_if);
2817 if (prog > 0) {
2818 sc_if->sk_cdata.sk_rx_cons = cons;
2819 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2820 sc_if->sk_cdata.sk_rx_ring_map,
2821 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2825 static void
2826 sk_jumbo_rxeof(sc_if)
2827 struct sk_if_softc *sc_if;
2829 struct sk_softc *sc;
2830 struct mbuf *m;
2831 struct ifnet *ifp;
2832 struct sk_rx_desc *cur_rx;
2833 struct sk_rxdesc *jrxd;
2834 int cons, prog;
2835 u_int32_t csum, rxstat, sk_ctl;
2837 sc = sc_if->sk_softc;
2838 ifp = sc_if->sk_ifp;
2840 SK_IF_LOCK_ASSERT(sc_if);
2842 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2843 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2845 prog = 0;
2846 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2847 prog < SK_JUMBO_RX_RING_CNT;
2848 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2849 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2850 sk_ctl = le32toh(cur_rx->sk_ctl);
2851 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2852 break;
2853 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2854 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2856 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2857 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2858 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2859 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2860 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2861 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2862 ifp->if_ierrors++;
2863 sk_discard_jumbo_rxbuf(sc_if, cons);
2864 continue;
2867 m = jrxd->rx_m;
2868 csum = le32toh(cur_rx->sk_csum);
2869 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2870 ifp->if_iqdrops++;
2871 /* reuse old buffer */
2872 sk_discard_jumbo_rxbuf(sc_if, cons);
2873 continue;
2875 m->m_pkthdr.rcvif = ifp;
2876 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2877 ifp->if_ipackets++;
2878 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2879 sk_rxcksum(ifp, m, csum);
2880 SK_IF_UNLOCK(sc_if);
2881 (*ifp->if_input)(ifp, m);
2882 SK_IF_LOCK(sc_if);
2885 if (prog > 0) {
2886 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2887 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2888 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2889 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2893 static void
2894 sk_txeof(sc_if)
2895 struct sk_if_softc *sc_if;
2897 struct sk_txdesc *txd;
2898 struct sk_tx_desc *cur_tx;
2899 struct ifnet *ifp;
2900 u_int32_t idx, sk_ctl;
2902 ifp = sc_if->sk_ifp;
2904 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2905 if (txd == NULL)
2906 return;
2907 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2908 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2910 * Go through our tx ring and free mbufs for those
2911 * frames that have been sent.
2913 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2914 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2915 break;
2916 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2917 sk_ctl = le32toh(cur_tx->sk_ctl);
2918 if (sk_ctl & SK_TXCTL_OWN)
2919 break;
2920 sc_if->sk_cdata.sk_tx_cnt--;
2921 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2922 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2923 continue;
2924 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2925 BUS_DMASYNC_POSTWRITE);
2926 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2928 ifp->if_opackets++;
2929 m_freem(txd->tx_m);
2930 txd->tx_m = NULL;
2931 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2932 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2933 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2935 sc_if->sk_cdata.sk_tx_cons = idx;
2936 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2938 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2939 sc_if->sk_cdata.sk_tx_ring_map,
2940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2943 static void
2944 sk_tick(xsc_if)
2945 void *xsc_if;
2947 struct sk_if_softc *sc_if;
2948 struct mii_data *mii;
2949 struct ifnet *ifp;
2950 int i;
2952 sc_if = xsc_if;
2953 ifp = sc_if->sk_ifp;
2954 mii = device_get_softc(sc_if->sk_miibus);
2956 if (!(ifp->if_flags & IFF_UP))
2957 return;
2959 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2960 sk_intr_bcom(sc_if);
2961 return;
2965 * According to SysKonnect, the correct way to verify that
2966 * the link has come back up is to poll bit 0 of the GPIO
2967 * register three times. This pin has the signal from the
2968 * link_sync pin connected to it; if we read the same link
2969 * state 3 times in a row, we know the link is up.
2971 for (i = 0; i < 3; i++) {
2972 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2973 break;
2976 if (i != 3) {
2977 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2978 return;
2981 /* Turn the GP0 interrupt back on. */
2982 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2983 SK_XM_READ_2(sc_if, XM_ISR);
2984 mii_tick(mii);
2985 callout_stop(&sc_if->sk_tick_ch);
2988 static void
2989 sk_yukon_tick(xsc_if)
2990 void *xsc_if;
2992 struct sk_if_softc *sc_if;
2993 struct mii_data *mii;
2995 sc_if = xsc_if;
2996 mii = device_get_softc(sc_if->sk_miibus);
2998 mii_tick(mii);
2999 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3002 static void
3003 sk_intr_bcom(sc_if)
3004 struct sk_if_softc *sc_if;
3006 struct mii_data *mii;
3007 struct ifnet *ifp;
3008 int status;
3009 mii = device_get_softc(sc_if->sk_miibus);
3010 ifp = sc_if->sk_ifp;
3012 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3015 * Read the PHY interrupt register to make sure
3016 * we clear any pending interrupts.
3018 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3020 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3021 sk_init_xmac(sc_if);
3022 return;
3025 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3026 int lstat;
3027 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3028 BRGPHY_MII_AUXSTS);
3030 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3031 mii_mediachg(mii);
3032 /* Turn off the link LED. */
3033 SK_IF_WRITE_1(sc_if, 0,
3034 SK_LINKLED1_CTL, SK_LINKLED_OFF);
3035 sc_if->sk_link = 0;
3036 } else if (status & BRGPHY_ISR_LNK_CHG) {
3037 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3038 BRGPHY_MII_IMR, 0xFF00);
3039 mii_tick(mii);
3040 sc_if->sk_link = 1;
3041 /* Turn on the link LED. */
3042 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3043 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3044 SK_LINKLED_BLINK_OFF);
3045 } else {
3046 mii_tick(mii);
3047 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3051 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3053 return;
3056 static void
3057 sk_intr_xmac(sc_if)
3058 struct sk_if_softc *sc_if;
3060 struct sk_softc *sc;
3061 u_int16_t status;
3063 sc = sc_if->sk_softc;
3064 status = SK_XM_READ_2(sc_if, XM_ISR);
3067 * Link has gone down. Start MII tick timeout to
3068 * watch for link resync.
3070 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3071 if (status & XM_ISR_GP0_SET) {
3072 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3073 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3076 if (status & XM_ISR_AUTONEG_DONE) {
3077 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3081 if (status & XM_IMR_TX_UNDERRUN)
3082 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3084 if (status & XM_IMR_RX_OVERRUN)
3085 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3087 status = SK_XM_READ_2(sc_if, XM_ISR);
3089 return;
3092 static void
3093 sk_intr_yukon(sc_if)
3094 struct sk_if_softc *sc_if;
3096 u_int8_t status;
3098 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3099 /* RX overrun */
3100 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3101 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3102 SK_RFCTL_RX_FIFO_OVER);
3104 /* TX underrun */
3105 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3106 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3107 SK_TFCTL_TX_FIFO_UNDER);
3111 static void
3112 sk_intr(xsc)
3113 void *xsc;
3115 struct sk_softc *sc = xsc;
3116 struct sk_if_softc *sc_if0, *sc_if1;
3117 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
3118 u_int32_t status;
3120 SK_LOCK(sc);
3122 #ifndef __HAIKU__
3123 status = CSR_READ_4(sc, SK_ISSR);
3124 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3125 goto done_locked;
3126 #endif
3128 sc_if0 = sc->sk_if[SK_PORT_A];
3129 sc_if1 = sc->sk_if[SK_PORT_B];
3131 if (sc_if0 != NULL)
3132 ifp0 = sc_if0->sk_ifp;
3133 if (sc_if1 != NULL)
3134 ifp1 = sc_if1->sk_ifp;
3136 #ifndef __HAIKU__
3137 for (; (status &= sc->sk_intrmask) != 0;) {
3138 #else
3139 status = atomic_get((int32 *)&sc->sk_intstatus);
3140 status &= sc->sk_intrmask;
3141 while (true) {
3143 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3144 goto done_locked;
3145 #endif
3147 /* Handle receive interrupts first. */
3148 if (status & SK_ISR_RX1_EOF) {
3149 if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3150 sk_jumbo_rxeof(sc_if0);
3151 else
3152 sk_rxeof(sc_if0);
3153 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3154 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3156 if (status & SK_ISR_RX2_EOF) {
3157 if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3158 sk_jumbo_rxeof(sc_if1);
3159 else
3160 sk_rxeof(sc_if1);
3161 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3162 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3165 /* Then transmit interrupts. */
3166 if (status & SK_ISR_TX1_S_EOF) {
3167 sk_txeof(sc_if0);
3168 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3170 if (status & SK_ISR_TX2_S_EOF) {
3171 sk_txeof(sc_if1);
3172 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3175 /* Then MAC interrupts. */
3176 if (status & SK_ISR_MAC1 &&
3177 ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3178 if (sc->sk_type == SK_GENESIS)
3179 sk_intr_xmac(sc_if0);
3180 else
3181 sk_intr_yukon(sc_if0);
3184 if (status & SK_ISR_MAC2 &&
3185 ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3186 if (sc->sk_type == SK_GENESIS)
3187 sk_intr_xmac(sc_if1);
3188 else
3189 sk_intr_yukon(sc_if1);
3192 if (status & SK_ISR_EXTERNAL_REG) {
3193 if (ifp0 != NULL &&
3194 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3195 sk_intr_bcom(sc_if0);
3196 if (ifp1 != NULL &&
3197 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3198 sk_intr_bcom(sc_if1);
3200 status = CSR_READ_4(sc, SK_ISSR);
3201 #ifdef __HAIKU__
3202 if (((status & sc->sk_intrmask) == 0) || status == 0xffffffff ||
3203 sc->sk_suspended) {
3204 break;
3206 #endif
3209 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3211 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3212 sk_start_locked(ifp0);
3213 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3214 sk_start_locked(ifp1);
3216 done_locked:
3217 SK_UNLOCK(sc);
3220 static void
3221 sk_init_xmac(sc_if)
3222 struct sk_if_softc *sc_if;
3224 struct sk_softc *sc;
3225 struct ifnet *ifp;
3226 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3227 static const struct sk_bcom_hack bhack[] = {
3228 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3229 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3230 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3231 { 0, 0 } };
3233 SK_IF_LOCK_ASSERT(sc_if);
3235 sc = sc_if->sk_softc;
3236 ifp = sc_if->sk_ifp;
3238 /* Unreset the XMAC. */
3239 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3240 DELAY(1000);
3242 /* Reset the XMAC's internal state. */
3243 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3245 /* Save the XMAC II revision */
3246 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3249 * Perform additional initialization for external PHYs,
3250 * namely for the 1000baseTX cards that use the XMAC's
3251 * GMII mode.
3253 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3254 int i = 0;
3255 u_int32_t val;
3257 /* Take PHY out of reset. */
3258 val = sk_win_read_4(sc, SK_GPIO);
3259 if (sc_if->sk_port == SK_PORT_A)
3260 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3261 else
3262 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3263 sk_win_write_4(sc, SK_GPIO, val);
3265 /* Enable GMII mode on the XMAC. */
3266 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3268 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3269 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3270 DELAY(10000);
3271 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3272 BRGPHY_MII_IMR, 0xFFF0);
3275 * Early versions of the BCM5400 apparently have
3276 * a bug that requires them to have their reserved
3277 * registers initialized to some magic values. I don't
3278 * know what the numbers do, I'm just the messenger.
3280 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3281 == 0x6041) {
3282 while(bhack[i].reg) {
3283 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3284 bhack[i].reg, bhack[i].val);
3285 i++;
3290 /* Set station address */
3291 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3292 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3293 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3294 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3295 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3297 if (ifp->if_flags & IFF_BROADCAST) {
3298 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3299 } else {
3300 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3303 /* We don't need the FCS appended to the packet. */
3304 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3306 /* We want short frames padded to 60 bytes. */
3307 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3310 * Enable the reception of all error frames. This is is
3311 * a necessary evil due to the design of the XMAC. The
3312 * XMAC's receive FIFO is only 8K in size, however jumbo
3313 * frames can be up to 9000 bytes in length. When bad
3314 * frame filtering is enabled, the XMAC's RX FIFO operates
3315 * in 'store and forward' mode. For this to work, the
3316 * entire frame has to fit into the FIFO, but that means
3317 * that jumbo frames larger than 8192 bytes will be
3318 * truncated. Disabling all bad frame filtering causes
3319 * the RX FIFO to operate in streaming mode, in which
3320 * case the XMAC will start transfering frames out of the
3321 * RX FIFO as soon as the FIFO threshold is reached.
3323 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3324 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3325 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3326 XM_MODE_RX_INRANGELEN);
3327 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3328 } else
3329 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3332 * Bump up the transmit threshold. This helps hold off transmit
3333 * underruns when we're blasting traffic from both ports at once.
3335 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3337 /* Set Rx filter */
3338 sk_rxfilter_genesis(sc_if);
3340 /* Clear and enable interrupts */
3341 SK_XM_READ_2(sc_if, XM_ISR);
3342 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3343 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3344 else
3345 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3347 /* Configure MAC arbiter */
3348 switch(sc_if->sk_xmac_rev) {
3349 case XM_XMAC_REV_B2:
3350 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3351 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3352 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3353 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3354 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3355 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3356 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3357 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3358 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3359 break;
3360 case XM_XMAC_REV_C1:
3361 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3362 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3363 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3364 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3365 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3366 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3367 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3368 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3369 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3370 break;
3371 default:
3372 break;
3374 sk_win_write_2(sc, SK_MACARB_CTL,
3375 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3377 sc_if->sk_link = 1;
3379 return;
3382 static void
3383 sk_init_yukon(sc_if)
3384 struct sk_if_softc *sc_if;
3386 u_int32_t phy, v;
3387 u_int16_t reg;
3388 struct sk_softc *sc;
3389 struct ifnet *ifp;
3390 u_int8_t *eaddr;
3391 int i;
3393 SK_IF_LOCK_ASSERT(sc_if);
3395 sc = sc_if->sk_softc;
3396 ifp = sc_if->sk_ifp;
3398 if (sc->sk_type == SK_YUKON_LITE &&
3399 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3401 * Workaround code for COMA mode, set PHY reset.
3402 * Otherwise it will not correctly take chip out of
3403 * powerdown (coma)
3405 v = sk_win_read_4(sc, SK_GPIO);
3406 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3407 sk_win_write_4(sc, SK_GPIO, v);
3410 /* GMAC and GPHY Reset */
3411 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3412 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3413 DELAY(1000);
3415 if (sc->sk_type == SK_YUKON_LITE &&
3416 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3418 * Workaround code for COMA mode, clear PHY reset
3420 v = sk_win_read_4(sc, SK_GPIO);
3421 v |= SK_GPIO_DIR9;
3422 v &= ~SK_GPIO_DAT9;
3423 sk_win_write_4(sc, SK_GPIO, v);
3426 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3427 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3429 if (sc->sk_coppertype)
3430 phy |= SK_GPHY_COPPER;
3431 else
3432 phy |= SK_GPHY_FIBER;
3434 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3435 DELAY(1000);
3436 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3437 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3438 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3440 /* unused read of the interrupt source register */
3441 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3443 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3445 /* MIB Counter Clear Mode set */
3446 reg |= YU_PAR_MIB_CLR;
3447 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3449 /* MIB Counter Clear Mode clear */
3450 reg &= ~YU_PAR_MIB_CLR;
3451 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3453 /* receive control reg */
3454 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3456 /* transmit parameter register */
3457 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3458 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3460 /* serial mode register */
3461 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3462 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3463 reg |= YU_SMR_MFL_JUMBO;
3464 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3466 /* Setup Yukon's station address */
3467 eaddr = IF_LLADDR(sc_if->sk_ifp);
3468 for (i = 0; i < 3; i++)
3469 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
3470 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3471 /* Set GMAC source address of flow control. */
3472 for (i = 0; i < 3; i++)
3473 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3474 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3475 /* Set GMAC virtual address. */
3476 for (i = 0; i < 3; i++)
3477 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
3478 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3480 /* Set Rx filter */
3481 sk_rxfilter_yukon(sc_if);
3483 /* enable interrupt mask for counter overflows */
3484 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3485 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3486 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3488 /* Configure RX MAC FIFO Flush Mask */
3489 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3490 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3491 YU_RXSTAT_JABBER;
3492 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3494 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3495 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3496 v = SK_TFCTL_OPERATION_ON;
3497 else
3498 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3499 /* Configure RX MAC FIFO */
3500 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3501 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3503 /* Increase flush threshould to 64 bytes */
3504 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3505 SK_RFCTL_FIFO_THRESHOLD + 1);
3507 /* Configure TX MAC FIFO */
3508 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3509 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3513 * Note that to properly initialize any part of the GEnesis chip,
3514 * you first have to take it out of reset mode.
3516 static void
3517 sk_init(xsc)
3518 void *xsc;
3520 struct sk_if_softc *sc_if = xsc;
3522 SK_IF_LOCK(sc_if);
3523 sk_init_locked(sc_if);
3524 SK_IF_UNLOCK(sc_if);
3526 return;
3529 static void
3530 sk_init_locked(sc_if)
3531 struct sk_if_softc *sc_if;
3533 struct sk_softc *sc;
3534 struct ifnet *ifp;
3535 struct mii_data *mii;
3536 u_int16_t reg;
3537 u_int32_t imr;
3538 int error;
3540 SK_IF_LOCK_ASSERT(sc_if);
3542 ifp = sc_if->sk_ifp;
3543 sc = sc_if->sk_softc;
3544 mii = device_get_softc(sc_if->sk_miibus);
3546 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3547 return;
3549 /* Cancel pending I/O and free all RX/TX buffers. */
3550 sk_stop(sc_if);
3552 if (sc->sk_type == SK_GENESIS) {
3553 /* Configure LINK_SYNC LED */
3554 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3555 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3556 SK_LINKLED_LINKSYNC_ON);
3558 /* Configure RX LED */
3559 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3560 SK_RXLEDCTL_COUNTER_START);
3562 /* Configure TX LED */
3563 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3564 SK_TXLEDCTL_COUNTER_START);
3568 * Configure descriptor poll timer
3570 * SK-NET GENESIS data sheet says that possibility of losing Start
3571 * transmit command due to CPU/cache related interim storage problems
3572 * under certain conditions. The document recommends a polling
3573 * mechanism to send a Start transmit command to initiate transfer
3574 * of ready descriptors regulary. To cope with this issue sk(4) now
3575 * enables descriptor poll timer to initiate descriptor processing
3576 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3577 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3578 * command instead of waiting for next descriptor polling time.
3579 * The same rule may apply to Rx side too but it seems that is not
3580 * needed at the moment.
3581 * Since sk(4) uses descriptor polling as a last resort there is no
3582 * need to set smaller polling time than maximum allowable one.
3584 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3586 /* Configure I2C registers */
3588 /* Configure XMAC(s) */
3589 switch (sc->sk_type) {
3590 case SK_GENESIS:
3591 sk_init_xmac(sc_if);
3592 break;
3593 case SK_YUKON:
3594 case SK_YUKON_LITE:
3595 case SK_YUKON_LP:
3596 sk_init_yukon(sc_if);
3597 break;
3599 mii_mediachg(mii);
3601 if (sc->sk_type == SK_GENESIS) {
3602 /* Configure MAC FIFOs */
3603 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3604 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3605 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3607 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3608 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3609 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3612 /* Configure transmit arbiter(s) */
3613 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3614 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3616 /* Configure RAMbuffers */
3617 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3618 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3619 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3620 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3621 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3622 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3624 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3625 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3626 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3627 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3628 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3629 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3630 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3632 /* Configure BMUs */
3633 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3634 if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3635 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3636 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3637 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3638 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3639 } else {
3640 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3641 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3642 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3643 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3646 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3647 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3648 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3649 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3650 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3652 /* Init descriptors */
3653 if (ifp->if_mtu > SK_MAX_FRAMELEN)
3654 error = sk_init_jumbo_rx_ring(sc_if);
3655 else
3656 error = sk_init_rx_ring(sc_if);
3657 if (error != 0) {
3658 device_printf(sc_if->sk_if_dev,
3659 "initialization failed: no memory for rx buffers\n");
3660 sk_stop(sc_if);
3661 return;
3663 sk_init_tx_ring(sc_if);
3665 /* Set interrupt moderation if changed via sysctl. */
3666 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3667 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3668 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3669 sc->sk_int_ticks));
3670 if (bootverbose)
3671 device_printf(sc_if->sk_if_dev,
3672 "interrupt moderation is %d us.\n",
3673 sc->sk_int_mod);
3676 /* Configure interrupt handling */
3677 CSR_READ_4(sc, SK_ISSR);
3678 if (sc_if->sk_port == SK_PORT_A)
3679 sc->sk_intrmask |= SK_INTRS1;
3680 else
3681 sc->sk_intrmask |= SK_INTRS2;
3683 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3685 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3687 /* Start BMUs. */
3688 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3690 switch(sc->sk_type) {
3691 case SK_GENESIS:
3692 /* Enable XMACs TX and RX state machines */
3693 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3694 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3695 break;
3696 case SK_YUKON:
3697 case SK_YUKON_LITE:
3698 case SK_YUKON_LP:
3699 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3700 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3701 #if 0
3702 /* XXX disable 100Mbps and full duplex mode? */
3703 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3704 #endif
3705 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3708 /* Activate descriptor polling timer */
3709 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3710 /* start transfer of Tx descriptors */
3711 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3713 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3716 switch (sc->sk_type) {
3717 case SK_YUKON:
3718 case SK_YUKON_LITE:
3719 case SK_YUKON_LP:
3720 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3721 break;
3724 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3726 return;
3729 static void
3730 sk_stop(sc_if)
3731 struct sk_if_softc *sc_if;
3733 int i;
3734 struct sk_softc *sc;
3735 struct sk_txdesc *txd;
3736 struct sk_rxdesc *rxd;
3737 struct sk_rxdesc *jrxd;
3738 struct ifnet *ifp;
3739 u_int32_t val;
3741 SK_IF_LOCK_ASSERT(sc_if);
3742 sc = sc_if->sk_softc;
3743 ifp = sc_if->sk_ifp;
3745 callout_stop(&sc_if->sk_tick_ch);
3746 callout_stop(&sc_if->sk_watchdog_ch);
3748 /* stop Tx descriptor polling timer */
3749 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3750 /* stop transfer of Tx descriptors */
3751 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3752 for (i = 0; i < SK_TIMEOUT; i++) {
3753 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3754 if ((val & SK_TXBMU_TX_STOP) == 0)
3755 break;
3756 DELAY(1);
3758 if (i == SK_TIMEOUT)
3759 device_printf(sc_if->sk_if_dev,
3760 "can not stop transfer of Tx descriptor\n");
3761 /* stop transfer of Rx descriptors */
3762 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3763 for (i = 0; i < SK_TIMEOUT; i++) {
3764 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3765 if ((val & SK_RXBMU_RX_STOP) == 0)
3766 break;
3767 DELAY(1);
3769 if (i == SK_TIMEOUT)
3770 device_printf(sc_if->sk_if_dev,
3771 "can not stop transfer of Rx descriptor\n");
3773 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3774 /* Put PHY back into reset. */
3775 val = sk_win_read_4(sc, SK_GPIO);
3776 if (sc_if->sk_port == SK_PORT_A) {
3777 val |= SK_GPIO_DIR0;
3778 val &= ~SK_GPIO_DAT0;
3779 } else {
3780 val |= SK_GPIO_DIR2;
3781 val &= ~SK_GPIO_DAT2;
3783 sk_win_write_4(sc, SK_GPIO, val);
3786 /* Turn off various components of this interface. */
3787 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3788 switch (sc->sk_type) {
3789 case SK_GENESIS:
3790 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3791 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3792 break;
3793 case SK_YUKON:
3794 case SK_YUKON_LITE:
3795 case SK_YUKON_LP:
3796 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3797 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3798 break;
3800 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3801 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3802 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3803 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3804 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3805 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3806 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3807 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3808 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3810 /* Disable interrupts */
3811 if (sc_if->sk_port == SK_PORT_A)
3812 sc->sk_intrmask &= ~SK_INTRS1;
3813 else
3814 sc->sk_intrmask &= ~SK_INTRS2;
3815 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3817 SK_XM_READ_2(sc_if, XM_ISR);
3818 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3820 /* Free RX and TX mbufs still in the queues. */
3821 for (i = 0; i < SK_RX_RING_CNT; i++) {
3822 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3823 if (rxd->rx_m != NULL) {
3824 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3825 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3826 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3827 rxd->rx_dmamap);
3828 m_freem(rxd->rx_m);
3829 rxd->rx_m = NULL;
3832 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3833 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3834 if (jrxd->rx_m != NULL) {
3835 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3836 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3837 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3838 jrxd->rx_dmamap);
3839 m_freem(jrxd->rx_m);
3840 jrxd->rx_m = NULL;
3843 for (i = 0; i < SK_TX_RING_CNT; i++) {
3844 txd = &sc_if->sk_cdata.sk_txdesc[i];
3845 if (txd->tx_m != NULL) {
3846 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3847 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3848 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3849 txd->tx_dmamap);
3850 m_freem(txd->tx_m);
3851 txd->tx_m = NULL;
3855 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3857 return;
3860 static int
3861 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3863 int error, value;
3865 if (!arg1)
3866 return (EINVAL);
3867 value = *(int *)arg1;
3868 error = sysctl_handle_int(oidp, &value, 0, req);
3869 if (error || !req->newptr)
3870 return (error);
3871 if (value < low || value > high)
3872 return (EINVAL);
3873 *(int *)arg1 = value;
3874 return (0);
3877 static int
3878 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3880 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));