No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / ic / rtl81x9.c
blobe8961456c1937f46ea7fb2bee501230de0b80736
1 /* $NetBSD: rtl81x9.c,v 1.86 2009/04/27 14:52:50 tsutsui Exp $ */
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
38 * RealTek 8129/8139 PCI NIC driver
40 * Supports several extremely cheap PCI 10/100 adapters based on
41 * the RealTek chipset. Datasheets can be obtained from
42 * www.realtek.com.tw.
44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51 * probably the worst PCI ethernet controller ever made, with the possible
52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53 * DMA, but it has a terrible interface that nullifies any performance
54 * gains that bus-master DMA usually offers.
56 * For transmission, the chip offers a series of four TX descriptor
57 * registers. Each transmit frame must be in a contiguous buffer, aligned
58 * on a longword (32-bit) boundary. This means we almost always have to
59 * do mbuf copies in order to transmit a frame, except in the unlikely
60 * case where a) the packet fits into a single mbuf, and b) the packet
61 * is 32-bit aligned within the mbuf's data area. The presence of only
62 * four descriptor registers means that we can never have more than four
63 * packets queued for transmission at any one time.
65 * Reception is not much better. The driver has to allocate a single large
66 * buffer area (up to 64K in size) into which the chip will DMA received
67 * frames. Because we don't know where within this region received packets
68 * will begin or end, we have no choice but to copy data from the buffer
69 * area into mbufs in order to pass the packets up to the higher protocol
70 * levels.
72 * It's impossible given this rotten design to really achieve decent
73 * performance at 100Mbps, unless you happen to have a 400MHz PII or
74 * some equally overmuscled CPU to drive it.
76 * On the bright side, the 8139 does have a built-in PHY, although
77 * rather than using an MDIO serial interface like most other NICs, the
78 * PHY registers are directly accessible through the 8139's register
79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80 * filter.
82 * The 8129 chip is an older version of the 8139 that uses an external PHY
83 * chip. The 8129 has a serial MDIO interface for accessing the MII where
84 * the 8139 lets you directly access the on-board PHY registers. We need
85 * to select which interface to use depending on the chip type.
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.86 2009/04/27 14:52:50 tsutsui Exp $");
91 #include "bpfilter.h"
92 #include "rnd.h"
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/device.h>
98 #include <sys/sockio.h>
99 #include <sys/mbuf.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/socket.h>
104 #include <uvm/uvm_extern.h>
106 #include <net/if.h>
107 #include <net/if_arp.h>
108 #include <net/if_ether.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115 #if NRND > 0
116 #include <sys/rnd.h>
117 #endif
119 #include <sys/bus.h>
120 #include <machine/endian.h>
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
125 #include <dev/ic/rtl81x9reg.h>
126 #include <dev/ic/rtl81x9var.h>
128 static void rtk_reset(struct rtk_softc *);
129 static void rtk_rxeof(struct rtk_softc *);
130 static void rtk_txeof(struct rtk_softc *);
131 static void rtk_start(struct ifnet *);
132 static int rtk_ioctl(struct ifnet *, u_long, void *);
133 static int rtk_init(struct ifnet *);
134 static void rtk_stop(struct ifnet *, int);
136 static void rtk_watchdog(struct ifnet *);
138 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
139 static void rtk_mii_sync(struct rtk_softc *);
140 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
141 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
142 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
144 static int rtk_phy_readreg(device_t, int, int);
145 static void rtk_phy_writereg(device_t, int, int, int);
146 static void rtk_phy_statchg(device_t);
147 static void rtk_tick(void *);
149 static int rtk_enable(struct rtk_softc *);
150 static void rtk_disable(struct rtk_softc *);
152 static void rtk_list_tx_init(struct rtk_softc *);
154 #define EE_SET(x) \
155 CSR_WRITE_1(sc, RTK_EECMD, \
156 CSR_READ_1(sc, RTK_EECMD) | (x))
158 #define EE_CLR(x) \
159 CSR_WRITE_1(sc, RTK_EECMD, \
160 CSR_READ_1(sc, RTK_EECMD) & ~(x))
162 #define EE_DELAY() DELAY(100)
164 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
167 * Send a read command and address to the EEPROM, check for ACK.
169 static void
170 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
172 int d, i;
174 d = (RTK_EECMD_READ << addr_len) | addr;
177 * Feed in each bit and stobe the clock.
179 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
180 if (d & (1 << (i - 1))) {
181 EE_SET(RTK_EE_DATAIN);
182 } else {
183 EE_CLR(RTK_EE_DATAIN);
185 EE_DELAY();
186 EE_SET(RTK_EE_CLK);
187 EE_DELAY();
188 EE_CLR(RTK_EE_CLK);
189 EE_DELAY();
194 * Read a word of data stored in the EEPROM at address 'addr.'
196 uint16_t
197 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
199 uint16_t word;
200 int i;
202 /* Enter EEPROM access mode. */
203 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
204 EE_DELAY();
205 EE_SET(RTK_EE_SEL);
208 * Send address of word we want to read.
210 rtk_eeprom_putbyte(sc, addr, addr_len);
213 * Start reading bits from EEPROM.
215 word = 0;
216 for (i = 16; i > 0; i--) {
217 EE_SET(RTK_EE_CLK);
218 EE_DELAY();
219 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
220 word |= 1 << (i - 1);
221 EE_CLR(RTK_EE_CLK);
222 EE_DELAY();
225 /* Turn off EEPROM access mode. */
226 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
228 return word;
232 * MII access routines are provided for the 8129, which
233 * doesn't have a built-in PHY. For the 8139, we fake things
234 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
235 * direct access PHY registers.
237 #define MII_SET(x) \
238 CSR_WRITE_1(sc, RTK_MII, \
239 CSR_READ_1(sc, RTK_MII) | (x))
241 #define MII_CLR(x) \
242 CSR_WRITE_1(sc, RTK_MII, \
243 CSR_READ_1(sc, RTK_MII) & ~(x))
246 * Sync the PHYs by setting data bit and strobing the clock 32 times.
248 static void
249 rtk_mii_sync(struct rtk_softc *sc)
251 int i;
253 MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
255 for (i = 0; i < 32; i++) {
256 MII_SET(RTK_MII_CLK);
257 DELAY(1);
258 MII_CLR(RTK_MII_CLK);
259 DELAY(1);
264 * Clock a series of bits through the MII.
266 static void
267 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
269 int i;
271 MII_CLR(RTK_MII_CLK);
273 for (i = cnt; i > 0; i--) {
274 if (bits & (1 << (i - 1))) {
275 MII_SET(RTK_MII_DATAOUT);
276 } else {
277 MII_CLR(RTK_MII_DATAOUT);
279 DELAY(1);
280 MII_CLR(RTK_MII_CLK);
281 DELAY(1);
282 MII_SET(RTK_MII_CLK);
287 * Read an PHY register through the MII.
289 static int
290 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
292 int i, ack, s;
294 s = splnet();
297 * Set up frame for RX.
299 frame->mii_stdelim = RTK_MII_STARTDELIM;
300 frame->mii_opcode = RTK_MII_READOP;
301 frame->mii_turnaround = 0;
302 frame->mii_data = 0;
304 CSR_WRITE_2(sc, RTK_MII, 0);
307 * Turn on data xmit.
309 MII_SET(RTK_MII_DIR);
311 rtk_mii_sync(sc);
314 * Send command/address info.
316 rtk_mii_send(sc, frame->mii_stdelim, 2);
317 rtk_mii_send(sc, frame->mii_opcode, 2);
318 rtk_mii_send(sc, frame->mii_phyaddr, 5);
319 rtk_mii_send(sc, frame->mii_regaddr, 5);
321 /* Idle bit */
322 MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
323 DELAY(1);
324 MII_SET(RTK_MII_CLK);
325 DELAY(1);
327 /* Turn off xmit. */
328 MII_CLR(RTK_MII_DIR);
330 /* Check for ack */
331 MII_CLR(RTK_MII_CLK);
332 DELAY(1);
333 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
334 MII_SET(RTK_MII_CLK);
335 DELAY(1);
338 * Now try reading data bits. If the ack failed, we still
339 * need to clock through 16 cycles to keep the PHY(s) in sync.
341 if (ack) {
342 for (i = 0; i < 16; i++) {
343 MII_CLR(RTK_MII_CLK);
344 DELAY(1);
345 MII_SET(RTK_MII_CLK);
346 DELAY(1);
348 goto fail;
351 for (i = 16; i > 0; i--) {
352 MII_CLR(RTK_MII_CLK);
353 DELAY(1);
354 if (!ack) {
355 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
356 frame->mii_data |= 1 << (i - 1);
357 DELAY(1);
359 MII_SET(RTK_MII_CLK);
360 DELAY(1);
363 fail:
364 MII_CLR(RTK_MII_CLK);
365 DELAY(1);
366 MII_SET(RTK_MII_CLK);
367 DELAY(1);
369 splx(s);
371 if (ack)
372 return 1;
373 return 0;
377 * Write to a PHY register through the MII.
379 static int
380 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
382 int s;
384 s = splnet();
386 * Set up frame for TX.
388 frame->mii_stdelim = RTK_MII_STARTDELIM;
389 frame->mii_opcode = RTK_MII_WRITEOP;
390 frame->mii_turnaround = RTK_MII_TURNAROUND;
393 * Turn on data output.
395 MII_SET(RTK_MII_DIR);
397 rtk_mii_sync(sc);
399 rtk_mii_send(sc, frame->mii_stdelim, 2);
400 rtk_mii_send(sc, frame->mii_opcode, 2);
401 rtk_mii_send(sc, frame->mii_phyaddr, 5);
402 rtk_mii_send(sc, frame->mii_regaddr, 5);
403 rtk_mii_send(sc, frame->mii_turnaround, 2);
404 rtk_mii_send(sc, frame->mii_data, 16);
406 /* Idle bit. */
407 MII_SET(RTK_MII_CLK);
408 DELAY(1);
409 MII_CLR(RTK_MII_CLK);
410 DELAY(1);
413 * Turn off xmit.
415 MII_CLR(RTK_MII_DIR);
417 splx(s);
419 return 0;
422 static int
423 rtk_phy_readreg(device_t self, int phy, int reg)
425 struct rtk_softc *sc = device_private(self);
426 struct rtk_mii_frame frame;
427 int rval;
428 int rtk8139_reg;
430 if ((sc->sc_quirk & RTKQ_8129) == 0) {
431 if (phy != 7)
432 return 0;
434 switch (reg) {
435 case MII_BMCR:
436 rtk8139_reg = RTK_BMCR;
437 break;
438 case MII_BMSR:
439 rtk8139_reg = RTK_BMSR;
440 break;
441 case MII_ANAR:
442 rtk8139_reg = RTK_ANAR;
443 break;
444 case MII_ANER:
445 rtk8139_reg = RTK_ANER;
446 break;
447 case MII_ANLPAR:
448 rtk8139_reg = RTK_LPAR;
449 break;
450 default:
451 #if 0
452 printf("%s: bad phy register\n", device_xname(self));
453 #endif
454 return 0;
456 rval = CSR_READ_2(sc, rtk8139_reg);
457 return rval;
460 memset(&frame, 0, sizeof(frame));
462 frame.mii_phyaddr = phy;
463 frame.mii_regaddr = reg;
464 rtk_mii_readreg(sc, &frame);
466 return frame.mii_data;
469 static void
470 rtk_phy_writereg(device_t self, int phy, int reg, int data)
472 struct rtk_softc *sc = device_private(self);
473 struct rtk_mii_frame frame;
474 int rtk8139_reg;
476 if ((sc->sc_quirk & RTKQ_8129) == 0) {
477 if (phy != 7)
478 return;
480 switch (reg) {
481 case MII_BMCR:
482 rtk8139_reg = RTK_BMCR;
483 break;
484 case MII_BMSR:
485 rtk8139_reg = RTK_BMSR;
486 break;
487 case MII_ANAR:
488 rtk8139_reg = RTK_ANAR;
489 break;
490 case MII_ANER:
491 rtk8139_reg = RTK_ANER;
492 break;
493 case MII_ANLPAR:
494 rtk8139_reg = RTK_LPAR;
495 break;
496 default:
497 #if 0
498 printf("%s: bad phy register\n", device_xname(self));
499 #endif
500 return;
502 CSR_WRITE_2(sc, rtk8139_reg, data);
503 return;
506 memset(&frame, 0, sizeof(frame));
508 frame.mii_phyaddr = phy;
509 frame.mii_regaddr = reg;
510 frame.mii_data = data;
512 rtk_mii_writereg(sc, &frame);
515 static void
516 rtk_phy_statchg(device_t v)
519 /* Nothing to do. */
522 #define rtk_calchash(addr) \
523 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
526 * Program the 64-bit multicast hash filter.
528 void
529 rtk_setmulti(struct rtk_softc *sc)
531 struct ifnet *ifp;
532 uint32_t hashes[2] = { 0, 0 };
533 uint32_t rxfilt;
534 struct ether_multi *enm;
535 struct ether_multistep step;
536 int h, mcnt;
538 ifp = &sc->ethercom.ec_if;
540 rxfilt = CSR_READ_4(sc, RTK_RXCFG);
542 if (ifp->if_flags & IFF_PROMISC) {
543 allmulti:
544 ifp->if_flags |= IFF_ALLMULTI;
545 rxfilt |= RTK_RXCFG_RX_MULTI;
546 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
547 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
548 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
549 return;
552 /* first, zot all the existing hash bits */
553 CSR_WRITE_4(sc, RTK_MAR0, 0);
554 CSR_WRITE_4(sc, RTK_MAR4, 0);
556 /* now program new ones */
557 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
558 mcnt = 0;
559 while (enm != NULL) {
560 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
561 ETHER_ADDR_LEN) != 0)
562 goto allmulti;
564 h = rtk_calchash(enm->enm_addrlo);
565 if (h < 32)
566 hashes[0] |= (1 << h);
567 else
568 hashes[1] |= (1 << (h - 32));
569 mcnt++;
570 ETHER_NEXT_MULTI(step, enm);
573 ifp->if_flags &= ~IFF_ALLMULTI;
575 if (mcnt)
576 rxfilt |= RTK_RXCFG_RX_MULTI;
577 else
578 rxfilt &= ~RTK_RXCFG_RX_MULTI;
580 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
583 * For some unfathomable reason, RealTek decided to reverse
584 * the order of the multicast hash registers in the PCI Express
585 * parts. This means we have to write the hash pattern in reverse
586 * order for those devices.
588 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
589 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
590 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
591 } else {
592 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
593 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
597 void
598 rtk_reset(struct rtk_softc *sc)
600 int i;
602 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
604 for (i = 0; i < RTK_TIMEOUT; i++) {
605 DELAY(10);
606 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
607 break;
609 if (i == RTK_TIMEOUT)
610 printf("%s: reset never completed!\n",
611 device_xname(sc->sc_dev));
615 * Attach the interface. Allocate softc structures, do ifmedia
616 * setup and ethernet/BPF attach.
618 void
619 rtk_attach(struct rtk_softc *sc)
621 device_t self = sc->sc_dev;
622 struct ifnet *ifp;
623 struct rtk_tx_desc *txd;
624 uint16_t val;
625 uint8_t eaddr[ETHER_ADDR_LEN];
626 int error;
627 int i, addr_len;
629 callout_init(&sc->rtk_tick_ch, 0);
632 * Check EEPROM type 9346 or 9356.
634 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
635 addr_len = RTK_EEADDR_LEN1;
636 else
637 addr_len = RTK_EEADDR_LEN0;
640 * Get station address.
642 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
643 eaddr[0] = val & 0xff;
644 eaddr[1] = val >> 8;
645 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
646 eaddr[2] = val & 0xff;
647 eaddr[3] = val >> 8;
648 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
649 eaddr[4] = val & 0xff;
650 eaddr[5] = val >> 8;
652 if ((error = bus_dmamem_alloc(sc->sc_dmat,
653 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
654 BUS_DMA_NOWAIT)) != 0) {
655 aprint_error_dev(self,
656 "can't allocate recv buffer, error = %d\n", error);
657 goto fail_0;
660 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
661 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
662 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
663 aprint_error_dev(self,
664 "can't map recv buffer, error = %d\n", error);
665 goto fail_1;
668 if ((error = bus_dmamap_create(sc->sc_dmat,
669 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
670 &sc->recv_dmamap)) != 0) {
671 aprint_error_dev(self,
672 "can't create recv buffer DMA map, error = %d\n", error);
673 goto fail_2;
676 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
677 sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
678 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
679 aprint_error_dev(self,
680 "can't load recv buffer DMA map, error = %d\n", error);
681 goto fail_3;
684 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
685 txd = &sc->rtk_tx_descs[i];
686 if ((error = bus_dmamap_create(sc->sc_dmat,
687 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
688 &txd->txd_dmamap)) != 0) {
689 aprint_error_dev(self,
690 "can't create snd buffer DMA map, error = %d\n",
691 error);
692 goto fail_4;
694 txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
695 txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
697 SIMPLEQ_INIT(&sc->rtk_tx_free);
698 SIMPLEQ_INIT(&sc->rtk_tx_dirty);
701 * From this point forward, the attachment cannot fail. A failure
702 * before this releases all resources thar may have been
703 * allocated.
705 sc->sc_flags |= RTK_ATTACHED;
707 /* Reset the adapter. */
708 rtk_reset(sc);
710 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
712 ifp = &sc->ethercom.ec_if;
713 ifp->if_softc = sc;
714 strcpy(ifp->if_xname, device_xname(self));
715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716 ifp->if_ioctl = rtk_ioctl;
717 ifp->if_start = rtk_start;
718 ifp->if_watchdog = rtk_watchdog;
719 ifp->if_init = rtk_init;
720 ifp->if_stop = rtk_stop;
721 IFQ_SET_READY(&ifp->if_snd);
724 * Do ifmedia setup.
726 sc->mii.mii_ifp = ifp;
727 sc->mii.mii_readreg = rtk_phy_readreg;
728 sc->mii.mii_writereg = rtk_phy_writereg;
729 sc->mii.mii_statchg = rtk_phy_statchg;
730 sc->ethercom.ec_mii = &sc->mii;
731 ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange,
732 ether_mediastatus);
733 mii_attach(self, &sc->mii, 0xffffffff,
734 MII_PHY_ANY, MII_OFFSET_ANY, 0);
736 /* Choose a default media. */
737 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
738 ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
739 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
740 } else {
741 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
745 * Call MI attach routines.
747 if_attach(ifp);
748 ether_ifattach(ifp, eaddr);
750 #if NRND > 0
751 rnd_attach_source(&sc->rnd_source, device_xname(self),
752 RND_TYPE_NET, 0);
753 #endif
755 return;
756 fail_4:
757 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
758 txd = &sc->rtk_tx_descs[i];
759 if (txd->txd_dmamap != NULL)
760 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
762 fail_3:
763 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
764 fail_2:
765 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
766 RTK_RXBUFLEN + 16);
767 fail_1:
768 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
769 fail_0:
770 return;
774 * Initialize the transmit descriptors.
776 static void
777 rtk_list_tx_init(struct rtk_softc *sc)
779 struct rtk_tx_desc *txd;
780 int i;
782 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
783 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
784 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
785 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
787 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
788 txd = &sc->rtk_tx_descs[i];
789 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
790 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
795 * rtk_activate:
796 * Handle device activation/deactivation requests.
799 rtk_activate(device_t self, enum devact act)
801 struct rtk_softc *sc = device_private(self);
803 switch (act) {
804 case DVACT_DEACTIVATE:
805 if_deactivate(&sc->ethercom.ec_if);
806 return 0;
807 default:
808 return EOPNOTSUPP;
813 * rtk_detach:
814 * Detach a rtk interface.
817 rtk_detach(struct rtk_softc *sc)
819 struct ifnet *ifp = &sc->ethercom.ec_if;
820 struct rtk_tx_desc *txd;
821 int i;
824 * Succeed now if there isn't any work to do.
826 if ((sc->sc_flags & RTK_ATTACHED) == 0)
827 return 0;
829 /* Unhook our tick handler. */
830 callout_stop(&sc->rtk_tick_ch);
832 /* Detach all PHYs. */
833 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
835 /* Delete all remaining media. */
836 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
838 #if NRND > 0
839 rnd_detach_source(&sc->rnd_source);
840 #endif
842 ether_ifdetach(ifp);
843 if_detach(ifp);
845 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
846 txd = &sc->rtk_tx_descs[i];
847 if (txd->txd_dmamap != NULL)
848 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
850 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
851 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
852 RTK_RXBUFLEN + 16);
853 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
855 return 0;
859 * rtk_enable:
860 * Enable the RTL81X9 chip.
863 rtk_enable(struct rtk_softc *sc)
866 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
867 if ((*sc->sc_enable)(sc) != 0) {
868 printf("%s: device enable failed\n",
869 device_xname(sc->sc_dev));
870 return EIO;
872 sc->sc_flags |= RTK_ENABLED;
874 return 0;
878 * rtk_disable:
879 * Disable the RTL81X9 chip.
881 void
882 rtk_disable(struct rtk_softc *sc)
885 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
886 (*sc->sc_disable)(sc);
887 sc->sc_flags &= ~RTK_ENABLED;
892 * A frame has been uploaded: pass the resulting mbuf chain up to
893 * the higher level protocols.
895 * You know there's something wrong with a PCI bus-master chip design.
897 * The receive operation is badly documented in the datasheet, so I'll
898 * attempt to document it here. The driver provides a buffer area and
899 * places its base address in the RX buffer start address register.
900 * The chip then begins copying frames into the RX buffer. Each frame
901 * is preceded by a 32-bit RX status word which specifies the length
902 * of the frame and certain other status bits. Each frame (starting with
903 * the status word) is also 32-bit aligned. The frame length is in the
904 * first 16 bits of the status word; the lower 15 bits correspond with
905 * the 'rx status register' mentioned in the datasheet.
907 * Note: to make the Alpha happy, the frame payload needs to be aligned
908 * on a 32-bit boundary. To achieve this, we copy the data to mbuf
909 * shifted forward 2 bytes.
911 static void
912 rtk_rxeof(struct rtk_softc *sc)
914 struct mbuf *m;
915 struct ifnet *ifp;
916 uint8_t *rxbufpos, *dst;
917 u_int total_len, wrap;
918 uint32_t rxstat;
919 uint16_t cur_rx, new_rx;
920 uint16_t limit;
921 uint16_t rx_bytes, max_bytes;
923 ifp = &sc->ethercom.ec_if;
925 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
927 /* Do not try to read past this point. */
928 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
930 if (limit < cur_rx)
931 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
932 else
933 max_bytes = limit - cur_rx;
934 rx_bytes = 0;
936 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
937 rxbufpos = sc->rtk_rx_buf + cur_rx;
938 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
939 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
940 rxstat = le32toh(*(uint32_t *)rxbufpos);
941 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
942 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
945 * Here's a totally undocumented fact for you. When the
946 * RealTek chip is in the process of copying a packet into
947 * RAM for you, the length will be 0xfff0. If you spot a
948 * packet header with this value, you need to stop. The
949 * datasheet makes absolutely no mention of this and
950 * RealTek should be shot for this.
952 total_len = rxstat >> 16;
953 if (total_len == RTK_RXSTAT_UNFINISHED)
954 break;
956 if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
957 total_len < ETHER_MIN_LEN ||
958 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
959 ifp->if_ierrors++;
962 * submitted by:[netbsd-pcmcia:00484]
963 * Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
964 * obtain from:
965 * FreeBSD if_rl.c rev 1.24->1.25
968 #if 0
969 if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
970 RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
971 RTK_RXSTAT_ALIGNERR)) {
972 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
973 CSR_WRITE_2(sc, RTK_COMMAND,
974 RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
975 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
976 CSR_WRITE_4(sc, RTK_RXADDR,
977 sc->recv_dmamap->dm_segs[0].ds_addr);
978 cur_rx = 0;
980 break;
981 #else
982 rtk_init(ifp);
983 return;
984 #endif
987 /* No errors; receive the packet. */
988 rx_bytes += total_len + RTK_RXSTAT_LEN;
991 * Avoid trying to read more bytes than we know
992 * the chip has prepared for us.
994 if (rx_bytes > max_bytes)
995 break;
998 * Skip the status word, wrapping around to the beginning
999 * of the Rx area, if necessary.
1001 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1002 rxbufpos = sc->rtk_rx_buf + cur_rx;
1005 * Compute the number of bytes at which the packet
1006 * will wrap to the beginning of the ring buffer.
1008 wrap = RTK_RXBUFLEN - cur_rx;
1011 * Compute where the next pending packet is.
1013 if (total_len > wrap)
1014 new_rx = total_len - wrap;
1015 else
1016 new_rx = cur_rx + total_len;
1017 /* Round up to 32-bit boundary. */
1018 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1021 * The RealTek chip includes the CRC with every
1022 * incoming packet; trim it off here.
1024 total_len -= ETHER_CRC_LEN;
1027 * Now allocate an mbuf (and possibly a cluster) to hold
1028 * the packet. Note we offset the packet 2 bytes so that
1029 * data after the Ethernet header will be 4-byte aligned.
1031 MGETHDR(m, M_DONTWAIT, MT_DATA);
1032 if (m == NULL) {
1033 printf("%s: unable to allocate Rx mbuf\n",
1034 device_xname(sc->sc_dev));
1035 ifp->if_ierrors++;
1036 goto next_packet;
1038 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1039 MCLGET(m, M_DONTWAIT);
1040 if ((m->m_flags & M_EXT) == 0) {
1041 printf("%s: unable to allocate Rx cluster\n",
1042 device_xname(sc->sc_dev));
1043 ifp->if_ierrors++;
1044 m_freem(m);
1045 m = NULL;
1046 goto next_packet;
1049 m->m_data += RTK_ETHER_ALIGN; /* for alignment */
1050 m->m_pkthdr.rcvif = ifp;
1051 m->m_pkthdr.len = m->m_len = total_len;
1052 dst = mtod(m, void *);
1055 * If the packet wraps, copy up to the wrapping point.
1057 if (total_len > wrap) {
1058 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1059 cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1060 memcpy(dst, rxbufpos, wrap);
1061 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1062 cur_rx, wrap, BUS_DMASYNC_PREREAD);
1063 cur_rx = 0;
1064 rxbufpos = sc->rtk_rx_buf;
1065 total_len -= wrap;
1066 dst += wrap;
1070 * ...and now the rest.
1072 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1073 cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1074 memcpy(dst, rxbufpos, total_len);
1075 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1076 cur_rx, total_len, BUS_DMASYNC_PREREAD);
1078 next_packet:
1079 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1080 cur_rx = new_rx;
1082 if (m == NULL)
1083 continue;
1085 ifp->if_ipackets++;
1087 #if NBPFILTER > 0
1088 if (ifp->if_bpf)
1089 bpf_mtap(ifp->if_bpf, m);
1090 #endif
1091 /* pass it on. */
1092 (*ifp->if_input)(ifp, m);
1097 * A frame was downloaded to the chip. It's safe for us to clean up
1098 * the list buffers.
1100 static void
1101 rtk_txeof(struct rtk_softc *sc)
1103 struct ifnet *ifp;
1104 struct rtk_tx_desc *txd;
1105 uint32_t txstat;
1107 ifp = &sc->ethercom.ec_if;
1110 * Go through our tx list and free mbufs for those
1111 * frames that have been uploaded.
1113 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1114 txstat = CSR_READ_4(sc, txd->txd_txstat);
1115 if ((txstat & (RTK_TXSTAT_TX_OK|
1116 RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1117 break;
1119 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1121 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1122 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1123 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1124 m_freem(txd->txd_mbuf);
1125 txd->txd_mbuf = NULL;
1127 ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1129 if (txstat & RTK_TXSTAT_TX_OK)
1130 ifp->if_opackets++;
1131 else {
1132 ifp->if_oerrors++;
1135 * Increase Early TX threshold if underrun occurred.
1136 * Increase step 64 bytes.
1138 if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1139 #ifdef DEBUG
1140 printf("%s: transmit underrun;",
1141 device_xname(sc->sc_dev));
1142 #endif
1143 if (sc->sc_txthresh < RTK_TXTH_MAX) {
1144 sc->sc_txthresh += 2;
1145 #ifdef DEBUG
1146 printf(" new threshold: %d bytes",
1147 sc->sc_txthresh * 32);
1148 #endif
1150 #ifdef DEBUG
1151 printf("\n");
1152 #endif
1154 if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1155 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1157 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1158 ifp->if_flags &= ~IFF_OACTIVE;
1161 /* Clear the timeout timer if there is no pending packet. */
1162 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1163 ifp->if_timer = 0;
1168 rtk_intr(void *arg)
1170 struct rtk_softc *sc;
1171 struct ifnet *ifp;
1172 uint16_t status;
1173 int handled;
1175 sc = arg;
1176 ifp = &sc->ethercom.ec_if;
1178 if (!device_has_power(sc->sc_dev))
1179 return 0;
1181 /* Disable interrupts. */
1182 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1184 handled = 0;
1185 for (;;) {
1187 status = CSR_READ_2(sc, RTK_ISR);
1189 if (status == 0xffff)
1190 break; /* Card is gone... */
1192 if (status)
1193 CSR_WRITE_2(sc, RTK_ISR, status);
1195 if ((status & RTK_INTRS) == 0)
1196 break;
1198 handled = 1;
1200 if (status & RTK_ISR_RX_OK)
1201 rtk_rxeof(sc);
1203 if (status & RTK_ISR_RX_ERR)
1204 rtk_rxeof(sc);
1206 if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1207 rtk_txeof(sc);
1209 if (status & RTK_ISR_SYSTEM_ERR) {
1210 rtk_reset(sc);
1211 rtk_init(ifp);
1215 /* Re-enable interrupts. */
1216 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1218 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1219 rtk_start(ifp);
1221 #if NRND > 0
1222 if (RND_ENABLED(&sc->rnd_source))
1223 rnd_add_uint32(&sc->rnd_source, status);
1224 #endif
1226 return handled;
1230 * Main transmit routine.
1233 static void
1234 rtk_start(struct ifnet *ifp)
1236 struct rtk_softc *sc;
1237 struct rtk_tx_desc *txd;
1238 struct mbuf *m_head, *m_new;
1239 int error, len;
1241 sc = ifp->if_softc;
1243 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1244 IFQ_POLL(&ifp->if_snd, m_head);
1245 if (m_head == NULL)
1246 break;
1247 m_new = NULL;
1250 * Load the DMA map. If this fails, the packet didn't
1251 * fit in one DMA segment, and we need to copy. Note,
1252 * the packet must also be aligned.
1253 * if the packet is too small, copy it too, so we're sure
1254 * so have enouth room for the pad buffer.
1256 if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1257 m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1258 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1259 m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1260 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1261 if (m_new == NULL) {
1262 printf("%s: unable to allocate Tx mbuf\n",
1263 device_xname(sc->sc_dev));
1264 break;
1266 if (m_head->m_pkthdr.len > MHLEN) {
1267 MCLGET(m_new, M_DONTWAIT);
1268 if ((m_new->m_flags & M_EXT) == 0) {
1269 printf("%s: unable to allocate Tx "
1270 "cluster\n",
1271 device_xname(sc->sc_dev));
1272 m_freem(m_new);
1273 break;
1276 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1277 mtod(m_new, void *));
1278 m_new->m_pkthdr.len = m_new->m_len =
1279 m_head->m_pkthdr.len;
1280 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1281 memset(
1282 mtod(m_new, char *) + m_head->m_pkthdr.len,
1283 0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1284 m_new->m_pkthdr.len = m_new->m_len =
1285 ETHER_PAD_LEN;
1287 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1288 txd->txd_dmamap, m_new,
1289 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1290 if (error) {
1291 printf("%s: unable to load Tx buffer, "
1292 "error = %d\n",
1293 device_xname(sc->sc_dev), error);
1294 break;
1297 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1298 #if NBPFILTER > 0
1300 * If there's a BPF listener, bounce a copy of this frame
1301 * to him.
1303 if (ifp->if_bpf)
1304 bpf_mtap(ifp->if_bpf, m_head);
1305 #endif
1306 if (m_new != NULL) {
1307 m_freem(m_head);
1308 m_head = m_new;
1310 txd->txd_mbuf = m_head;
1312 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1313 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1316 * Transmit the frame.
1318 bus_dmamap_sync(sc->sc_dmat,
1319 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1320 BUS_DMASYNC_PREWRITE);
1322 len = txd->txd_dmamap->dm_segs[0].ds_len;
1324 CSR_WRITE_4(sc, txd->txd_txaddr,
1325 txd->txd_dmamap->dm_segs[0].ds_addr);
1326 CSR_WRITE_4(sc, txd->txd_txstat,
1327 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1330 * Set a timeout in case the chip goes out to lunch.
1332 ifp->if_timer = 5;
1336 * We broke out of the loop because all our TX slots are
1337 * full. Mark the NIC as busy until it drains some of the
1338 * packets from the queue.
1340 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1341 ifp->if_flags |= IFF_OACTIVE;
1344 static int
1345 rtk_init(struct ifnet *ifp)
1347 struct rtk_softc *sc = ifp->if_softc;
1348 int error, i;
1349 uint32_t rxcfg;
1351 if ((error = rtk_enable(sc)) != 0)
1352 goto out;
1355 * Cancel pending I/O.
1357 rtk_stop(ifp, 0);
1359 /* Init our MAC address */
1360 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1361 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1364 /* Init the RX buffer pointer register. */
1365 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1366 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1367 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1369 /* Init TX descriptors. */
1370 rtk_list_tx_init(sc);
1372 /* Init Early TX threshold. */
1373 sc->sc_txthresh = RTK_TXTH_256;
1375 * Enable transmit and receive.
1377 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1380 * Set the initial TX and RX configuration.
1382 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1383 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1385 /* Set the individual bit to receive frames for this host only. */
1386 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1387 rxcfg |= RTK_RXCFG_RX_INDIV;
1389 /* If we want promiscuous mode, set the allframes bit. */
1390 if (ifp->if_flags & IFF_PROMISC) {
1391 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1392 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1393 } else {
1394 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1395 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1399 * Set capture broadcast bit to capture broadcast frames.
1401 if (ifp->if_flags & IFF_BROADCAST) {
1402 rxcfg |= RTK_RXCFG_RX_BROAD;
1403 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1404 } else {
1405 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1406 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1410 * Program the multicast filter, if necessary.
1412 rtk_setmulti(sc);
1415 * Enable interrupts.
1417 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1419 /* Start RX/TX process. */
1420 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1422 /* Enable receiver and transmitter. */
1423 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1425 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1428 * Set current media.
1430 if ((error = ether_mediachange(ifp)) != 0)
1431 goto out;
1433 ifp->if_flags |= IFF_RUNNING;
1434 ifp->if_flags &= ~IFF_OACTIVE;
1436 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1438 out:
1439 if (error) {
1440 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1441 ifp->if_timer = 0;
1442 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1444 return error;
1447 static int
1448 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1450 struct rtk_softc *sc = ifp->if_softc;
1451 int s, error;
1453 s = splnet();
1454 error = ether_ioctl(ifp, command, data);
1455 if (error == ENETRESET) {
1456 if (ifp->if_flags & IFF_RUNNING) {
1458 * Multicast list has changed. Set the
1459 * hardware filter accordingly.
1461 rtk_setmulti(sc);
1463 error = 0;
1465 splx(s);
1467 return error;
1470 static void
1471 rtk_watchdog(struct ifnet *ifp)
1473 struct rtk_softc *sc;
1475 sc = ifp->if_softc;
1477 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1478 ifp->if_oerrors++;
1479 rtk_txeof(sc);
1480 rtk_rxeof(sc);
1481 rtk_init(ifp);
1485 * Stop the adapter and free any mbufs allocated to the
1486 * RX and TX lists.
1488 static void
1489 rtk_stop(struct ifnet *ifp, int disable)
1491 struct rtk_softc *sc = ifp->if_softc;
1492 struct rtk_tx_desc *txd;
1494 callout_stop(&sc->rtk_tick_ch);
1496 mii_down(&sc->mii);
1498 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1499 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1502 * Free the TX list buffers.
1504 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1505 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1506 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1507 m_freem(txd->txd_mbuf);
1508 txd->txd_mbuf = NULL;
1509 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1512 if (disable)
1513 rtk_disable(sc);
1515 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1516 ifp->if_timer = 0;
1519 static void
1520 rtk_tick(void *arg)
1522 struct rtk_softc *sc = arg;
1523 int s;
1525 s = splnet();
1526 mii_tick(&sc->mii);
1527 splx(s);
1529 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);