1 /* $NetBSD: rtl8169.c,v 1.127 2009/09/05 08:23:24 tsutsui Exp $ */
4 * Copyright (c) 1997, 1998-2003
5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.127 2009/09/05 08:23:24 tsutsui Exp $");
37 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
40 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
42 * Written by Bill Paul <wpaul@windriver.com>
43 * Senior Networking Software Engineer
48 * This driver is designed to support RealTek's next generation of
49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54 * with the older 8139 family, however it also supports a special
55 * C+ mode of operation that provides several new performance enhancing
56 * features. These include:
58 * o Descriptor based DMA mechanism. Each descriptor represents
59 * a single packet fragment. Data buffers may be aligned on
64 * o TCP/IP checksum offload for both RX and TX
66 * o High and normal priority transmit DMA rings
68 * o VLAN tag insertion and extraction
70 * o TCP large send (segmentation offload)
72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73 * programming API is fairly straightforward. The RX filtering, EEPROM
74 * access and PHY access is the same as it is on the older 8139 series
77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78 * same programming API and feature set as the 8139C+ with the following
79 * differences and additions:
85 * o GMII and TBI ports/registers for interfacing with copper
88 * o RX and TX DMA rings can have up to 1024 descriptors
89 * (the 8139C+ allows a maximum of 64)
91 * o Slight differences in register layout from the 8139C+
93 * The TX start and timer interrupt registers are at different locations
94 * on the 8169 than they are on the 8139C+. Also, the status word in the
95 * RX descriptor has a slightly different bit layout. The 8169 does not
96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100 * (the 'S' stands for 'single-chip'). These devices have the same
101 * programming API as the older 8169, but also have some vendor-specific
102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
105 * This driver takes advantage of the RX and TX checksum offload and
106 * VLAN tag insertion/extraction features. It also implements TX
107 * interrupt moderation using the timer interrupt registers, which
108 * significantly reduces TX interrupt load. There is also support
109 * for jumbo frames, however the 8169/8169S/8110S can not transmit
110 * jumbo frames larger than 7.5K, so the max MTU possible with this
111 * driver is 7500 bytes.
114 #include "bpfilter.h"
116 #include <sys/param.h>
117 #include <sys/endian.h>
118 #include <sys/systm.h>
119 #include <sys/sockio.h>
120 #include <sys/mbuf.h>
121 #include <sys/malloc.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/device.h>
127 #include <net/if_arp.h>
128 #include <net/if_dl.h>
129 #include <net/if_ether.h>
130 #include <net/if_media.h>
131 #include <net/if_vlanvar.h>
133 #include <netinet/in_systm.h> /* XXX for IP_MAXPACKET */
134 #include <netinet/in.h> /* XXX for IP_MAXPACKET */
135 #include <netinet/ip.h> /* XXX for IP_MAXPACKET */
143 #include <dev/mii/mii.h>
144 #include <dev/mii/miivar.h>
146 #include <dev/ic/rtl81x9reg.h>
147 #include <dev/ic/rtl81x9var.h>
149 #include <dev/ic/rtl8169var.h>
151 static inline void re_set_bufaddr(struct re_desc
*, bus_addr_t
);
153 static int re_newbuf(struct rtk_softc
*, int, struct mbuf
*);
154 static int re_rx_list_init(struct rtk_softc
*);
155 static int re_tx_list_init(struct rtk_softc
*);
156 static void re_rxeof(struct rtk_softc
*);
157 static void re_txeof(struct rtk_softc
*);
158 static void re_tick(void *);
159 static void re_start(struct ifnet
*);
160 static int re_ioctl(struct ifnet
*, u_long
, void *);
161 static int re_init(struct ifnet
*);
162 static void re_stop(struct ifnet
*, int);
163 static void re_watchdog(struct ifnet
*);
165 static int re_enable(struct rtk_softc
*);
166 static void re_disable(struct rtk_softc
*);
168 static int re_gmii_readreg(device_t
, int, int);
169 static void re_gmii_writereg(device_t
, int, int, int);
171 static int re_miibus_readreg(device_t
, int, int);
172 static void re_miibus_writereg(device_t
, int, int, int);
173 static void re_miibus_statchg(device_t
);
175 static void re_reset(struct rtk_softc
*);
178 re_set_bufaddr(struct re_desc
*d
, bus_addr_t addr
)
181 d
->re_bufaddr_lo
= htole32((uint32_t)addr
);
182 if (sizeof(bus_addr_t
) == sizeof(uint64_t))
183 d
->re_bufaddr_hi
= htole32((uint64_t)addr
>> 32);
185 d
->re_bufaddr_hi
= 0;
189 re_gmii_readreg(device_t dev
, int phy
, int reg
)
191 struct rtk_softc
*sc
= device_private(dev
);
198 /* Let the rgephy driver read the GMEDIASTAT register */
200 if (reg
== RTK_GMEDIASTAT
) {
201 rval
= CSR_READ_1(sc
, RTK_GMEDIASTAT
);
205 CSR_WRITE_4(sc
, RTK_PHYAR
, reg
<< 16);
208 for (i
= 0; i
< RTK_TIMEOUT
; i
++) {
209 rval
= CSR_READ_4(sc
, RTK_PHYAR
);
210 if (rval
& RTK_PHYAR_BUSY
)
215 if (i
== RTK_TIMEOUT
) {
216 printf("%s: PHY read failed\n", device_xname(sc
->sc_dev
));
220 return rval
& RTK_PHYAR_PHYDATA
;
224 re_gmii_writereg(device_t dev
, int phy
, int reg
, int data
)
226 struct rtk_softc
*sc
= device_private(dev
);
230 CSR_WRITE_4(sc
, RTK_PHYAR
, (reg
<< 16) |
231 (data
& RTK_PHYAR_PHYDATA
) | RTK_PHYAR_BUSY
);
234 for (i
= 0; i
< RTK_TIMEOUT
; i
++) {
235 rval
= CSR_READ_4(sc
, RTK_PHYAR
);
236 if (!(rval
& RTK_PHYAR_BUSY
))
241 if (i
== RTK_TIMEOUT
) {
242 printf("%s: PHY write reg %x <- %x failed\n",
243 device_xname(sc
->sc_dev
), reg
, data
);
248 re_miibus_readreg(device_t dev
, int phy
, int reg
)
250 struct rtk_softc
*sc
= device_private(dev
);
252 uint16_t re8139_reg
= 0;
257 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) == 0) {
258 rval
= re_gmii_readreg(dev
, phy
, reg
);
263 /* Pretend the internal PHY is only at address 0 */
270 re8139_reg
= RTK_BMCR
;
273 re8139_reg
= RTK_BMSR
;
276 re8139_reg
= RTK_ANAR
;
279 re8139_reg
= RTK_ANER
;
282 re8139_reg
= RTK_LPAR
;
289 * Allow the rlphy driver to read the media status
290 * register. If we have a link partner which does not
291 * support NWAY, this is the register which will tell
292 * us the results of parallel detection.
295 rval
= CSR_READ_1(sc
, RTK_MEDIASTAT
);
299 printf("%s: bad phy register\n", device_xname(sc
->sc_dev
));
303 rval
= CSR_READ_2(sc
, re8139_reg
);
304 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) != 0 && re8139_reg
== RTK_BMCR
) {
305 /* 8139C+ has different bit layout. */
306 rval
&= ~(BMCR_LOOP
| BMCR_ISO
);
313 re_miibus_writereg(device_t dev
, int phy
, int reg
, int data
)
315 struct rtk_softc
*sc
= device_private(dev
);
316 uint16_t re8139_reg
= 0;
321 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) == 0) {
322 re_gmii_writereg(dev
, phy
, reg
, data
);
327 /* Pretend the internal PHY is only at address 0 */
334 re8139_reg
= RTK_BMCR
;
335 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) != 0) {
336 /* 8139C+ has different bit layout. */
337 data
&= ~(BMCR_LOOP
| BMCR_ISO
);
341 re8139_reg
= RTK_BMSR
;
344 re8139_reg
= RTK_ANAR
;
347 re8139_reg
= RTK_ANER
;
350 re8139_reg
= RTK_LPAR
;
358 printf("%s: bad phy register\n", device_xname(sc
->sc_dev
));
362 CSR_WRITE_2(sc
, re8139_reg
, data
);
368 re_miibus_statchg(device_t dev
)
375 re_reset(struct rtk_softc
*sc
)
379 CSR_WRITE_1(sc
, RTK_COMMAND
, RTK_CMD_RESET
);
381 for (i
= 0; i
< RTK_TIMEOUT
; i
++) {
383 if ((CSR_READ_1(sc
, RTK_COMMAND
) & RTK_CMD_RESET
) == 0)
386 if (i
== RTK_TIMEOUT
)
387 printf("%s: reset never completed!\n",
388 device_xname(sc
->sc_dev
));
391 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3,
392 * but also says "Rtl8169s sigle chip detected".
394 if ((sc
->sc_quirk
& RTKQ_MACLDPS
) != 0)
395 CSR_WRITE_1(sc
, RTK_LDPS
, 1);
400 * The following routine is designed to test for a defect on some
401 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
402 * lines connected to the bus, however for a 32-bit only card, they
403 * should be pulled high. The result of this defect is that the
404 * NIC will not work right if you plug it into a 64-bit slot: DMA
405 * operations will be done with 64-bit transfers, which will fail
406 * because the 64-bit data lines aren't connected.
408 * There's no way to work around this (short of talking a soldering
409 * iron to the board), however we can detect it. The method we use
410 * here is to put the NIC into digital loopback mode, set the receiver
411 * to promiscuous mode, and then try to send a frame. We then compare
412 * the frame data we sent to what was received. If the data matches,
413 * then the NIC is working correctly, otherwise we know the user has
414 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
415 * slot. In the latter case, there's no way the NIC can work correctly,
416 * so we print out a message on the console and abort the device attach.
420 re_diag(struct rtk_softc
*sc
)
422 struct ifnet
*ifp
= &sc
->ethercom
.ec_if
;
424 struct ether_header
*eh
;
425 struct re_rxsoft
*rxs
;
426 struct re_desc
*cur_rx
;
430 int total_len
, i
, s
, error
= 0;
431 static const uint8_t dst
[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
432 static const uint8_t src
[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
434 /* Allocate a single mbuf */
436 MGETHDR(m0
, M_DONTWAIT
, MT_DATA
);
441 * Initialize the NIC in test mode. This sets the chip up
442 * so that it can send and receive frames, but performs the
443 * following special functions:
444 * - Puts receiver in promiscuous mode
445 * - Enables digital loopback mode
446 * - Leaves interrupts turned off
449 ifp
->if_flags
|= IFF_PROMISC
;
456 /* Put some data in the mbuf */
458 eh
= mtod(m0
, struct ether_header
*);
459 memcpy(eh
->ether_dhost
, (char *)&dst
, ETHER_ADDR_LEN
);
460 memcpy(eh
->ether_shost
, (char *)&src
, ETHER_ADDR_LEN
);
461 eh
->ether_type
= htons(ETHERTYPE_IP
);
462 m0
->m_pkthdr
.len
= m0
->m_len
= ETHER_MIN_LEN
- ETHER_CRC_LEN
;
465 * Queue the packet, start transmission.
468 CSR_WRITE_2(sc
, RTK_ISR
, 0xFFFF);
470 IF_ENQUEUE(&ifp
->if_snd
, m0
);
475 /* Wait for it to propagate through the chip */
478 for (i
= 0; i
< RTK_TIMEOUT
; i
++) {
479 status
= CSR_READ_2(sc
, RTK_ISR
);
480 if ((status
& (RTK_ISR_TIMEOUT_EXPIRED
| RTK_ISR_RX_OK
)) ==
481 (RTK_ISR_TIMEOUT_EXPIRED
| RTK_ISR_RX_OK
))
485 if (i
== RTK_TIMEOUT
) {
486 aprint_error_dev(sc
->sc_dev
,
487 "diagnostic failed, failed to receive packet "
488 "in loopback mode\n");
494 * The packet should have been dumped into the first
495 * entry in the RX DMA ring. Grab it from there.
498 rxs
= &sc
->re_ldata
.re_rxsoft
[0];
499 dmamap
= rxs
->rxs_dmamap
;
500 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
501 BUS_DMASYNC_POSTREAD
);
502 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
505 rxs
->rxs_mbuf
= NULL
;
506 eh
= mtod(m0
, struct ether_header
*);
508 RE_RXDESCSYNC(sc
, 0, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
509 cur_rx
= &sc
->re_ldata
.re_rx_list
[0];
510 rxstat
= le32toh(cur_rx
->re_cmdstat
);
511 total_len
= rxstat
& sc
->re_rxlenmask
;
513 if (total_len
!= ETHER_MIN_LEN
) {
514 aprint_error_dev(sc
->sc_dev
,
515 "diagnostic failed, received short packet\n");
520 /* Test that the received packet data matches what we sent. */
522 if (memcmp((char *)&eh
->ether_dhost
, (char *)&dst
, ETHER_ADDR_LEN
) ||
523 memcmp((char *)&eh
->ether_shost
, (char *)&src
, ETHER_ADDR_LEN
) ||
524 ntohs(eh
->ether_type
) != ETHERTYPE_IP
) {
525 aprint_error_dev(sc
->sc_dev
, "WARNING, DMA FAILURE!\n"
526 "expected TX data: %s/%s/0x%x\n"
527 "received RX data: %s/%s/0x%x\n"
528 "You may have a defective 32-bit NIC plugged "
529 "into a 64-bit PCI slot.\n"
530 "Please re-install the NIC in a 32-bit slot "
531 "for proper operation.\n"
532 "Read the re(4) man page for more details.\n" ,
533 ether_sprintf(dst
), ether_sprintf(src
), ETHERTYPE_IP
,
534 ether_sprintf(eh
->ether_dhost
),
535 ether_sprintf(eh
->ether_shost
), ntohs(eh
->ether_type
));
540 /* Turn interface off, release resources */
543 ifp
->if_flags
&= ~IFF_PROMISC
;
553 * Attach the interface. Allocate softc structures, do ifmedia
554 * setup and ethernet/BPF attach.
557 re_attach(struct rtk_softc
*sc
)
559 uint8_t eaddr
[ETHER_ADDR_LEN
];
562 int error
= 0, i
, addr_len
;
564 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) == 0) {
567 /* Revision of 8169/8169S/8110s in bits 30..26, 23 */
568 hwrev
= CSR_READ_4(sc
, RTK_TXCFG
) & RTK_TXCFG_HWREV
;
571 sc
->sc_quirk
|= RTKQ_8169NONS
;
573 case RTK_HWREV_8169S
:
574 case RTK_HWREV_8110S
:
575 case RTK_HWREV_8169_8110SB
:
576 case RTK_HWREV_8169_8110SC
:
577 sc
->sc_quirk
|= RTKQ_MACLDPS
;
579 case RTK_HWREV_8168_SPIN1
:
580 case RTK_HWREV_8168_SPIN2
:
581 case RTK_HWREV_8168_SPIN3
:
582 sc
->sc_quirk
|= RTKQ_MACSTAT
;
584 case RTK_HWREV_8168C
:
585 case RTK_HWREV_8168C_SPIN2
:
586 case RTK_HWREV_8168CP
:
587 case RTK_HWREV_8168D
:
588 case RTK_HWREV_8168DP
:
589 sc
->sc_quirk
|= RTKQ_DESCV2
| RTKQ_NOEECMD
|
590 RTKQ_MACSTAT
| RTKQ_CMDSTOP
;
592 * From FreeBSD driver:
594 * These (8168/8111) controllers support jumbo frame
595 * but it seems that enabling it requires touching
596 * additional magic registers. Depending on MAC
597 * revisions some controllers need to disable
598 * checksum offload. So disable jumbo frame until
599 * I have better idea what it really requires to
601 * RTL8168C/CP : supports up to 6KB jumbo frame.
602 * RTL8111C/CP : supports up to 9KB jumbo frame.
604 sc
->sc_quirk
|= RTKQ_NOJUMBO
;
606 case RTK_HWREV_8100E
:
607 case RTK_HWREV_8100E_SPIN2
:
608 case RTK_HWREV_8101E
:
609 sc
->sc_quirk
|= RTKQ_NOJUMBO
;
611 case RTK_HWREV_8102E
:
612 case RTK_HWREV_8102EL
:
613 case RTK_HWREV_8103E
:
614 sc
->sc_quirk
|= RTKQ_DESCV2
| RTKQ_NOEECMD
|
615 RTKQ_MACSTAT
| RTKQ_CMDSTOP
| RTKQ_NOJUMBO
;
618 aprint_normal_dev(sc
->sc_dev
,
619 "Unknown revision (0x%08x)\n", hwrev
);
620 /* assume the latest features */
621 sc
->sc_quirk
|= RTKQ_DESCV2
| RTKQ_NOEECMD
;
622 sc
->sc_quirk
|= RTKQ_NOJUMBO
;
625 /* Set RX length mask */
626 sc
->re_rxlenmask
= RE_RDESC_STAT_GFRAGLEN
;
627 sc
->re_ldata
.re_tx_desc_cnt
= RE_TX_DESC_CNT_8169
;
629 sc
->sc_quirk
|= RTKQ_NOJUMBO
;
631 /* Set RX length mask */
632 sc
->re_rxlenmask
= RE_RDESC_STAT_FRAGLEN
;
633 sc
->re_ldata
.re_tx_desc_cnt
= RE_TX_DESC_CNT_8139
;
636 /* Reset the adapter. */
639 if ((sc
->sc_quirk
& RTKQ_NOEECMD
) != 0) {
641 * Get station address from ID registers.
643 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
644 eaddr
[i
] = CSR_READ_1(sc
, RTK_IDR0
+ i
);
647 * Get station address from the EEPROM.
649 if (rtk_read_eeprom(sc
, RTK_EE_ID
, RTK_EEADDR_LEN1
) == 0x8129)
650 addr_len
= RTK_EEADDR_LEN1
;
652 addr_len
= RTK_EEADDR_LEN0
;
655 * Get station address from the EEPROM.
657 for (i
= 0; i
< ETHER_ADDR_LEN
/ 2; i
++) {
658 val
= rtk_read_eeprom(sc
, RTK_EE_EADDR0
+ i
, addr_len
);
659 eaddr
[(i
* 2) + 0] = val
& 0xff;
660 eaddr
[(i
* 2) + 1] = val
>> 8;
664 aprint_normal_dev(sc
->sc_dev
, "Ethernet address %s\n",
665 ether_sprintf(eaddr
));
667 if (sc
->re_ldata
.re_tx_desc_cnt
>
668 PAGE_SIZE
/ sizeof(struct re_desc
)) {
669 sc
->re_ldata
.re_tx_desc_cnt
=
670 PAGE_SIZE
/ sizeof(struct re_desc
);
673 aprint_verbose_dev(sc
->sc_dev
, "using %d tx descriptors\n",
674 sc
->re_ldata
.re_tx_desc_cnt
);
675 KASSERT(RE_NEXT_TX_DESC(sc
, RE_TX_DESC_CNT(sc
) - 1) == 0);
677 /* Allocate DMA'able memory for the TX ring */
678 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
, RE_TX_LIST_SZ(sc
),
679 RE_RING_ALIGN
, 0, &sc
->re_ldata
.re_tx_listseg
, 1,
680 &sc
->re_ldata
.re_tx_listnseg
, BUS_DMA_NOWAIT
)) != 0) {
681 aprint_error_dev(sc
->sc_dev
,
682 "can't allocate tx listseg, error = %d\n", error
);
686 /* Load the map for the TX ring. */
687 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &sc
->re_ldata
.re_tx_listseg
,
688 sc
->re_ldata
.re_tx_listnseg
, RE_TX_LIST_SZ(sc
),
689 (void **)&sc
->re_ldata
.re_tx_list
,
690 BUS_DMA_COHERENT
| BUS_DMA_NOWAIT
)) != 0) {
691 aprint_error_dev(sc
->sc_dev
,
692 "can't map tx list, error = %d\n", error
);
695 memset(sc
->re_ldata
.re_tx_list
, 0, RE_TX_LIST_SZ(sc
));
697 if ((error
= bus_dmamap_create(sc
->sc_dmat
, RE_TX_LIST_SZ(sc
), 1,
698 RE_TX_LIST_SZ(sc
), 0, 0,
699 &sc
->re_ldata
.re_tx_list_map
)) != 0) {
700 aprint_error_dev(sc
->sc_dev
,
701 "can't create tx list map, error = %d\n", error
);
706 if ((error
= bus_dmamap_load(sc
->sc_dmat
,
707 sc
->re_ldata
.re_tx_list_map
, sc
->re_ldata
.re_tx_list
,
708 RE_TX_LIST_SZ(sc
), NULL
, BUS_DMA_NOWAIT
)) != 0) {
709 aprint_error_dev(sc
->sc_dev
,
710 "can't load tx list, error = %d\n", error
);
714 /* Create DMA maps for TX buffers */
715 for (i
= 0; i
< RE_TX_QLEN
; i
++) {
716 error
= bus_dmamap_create(sc
->sc_dmat
,
717 round_page(IP_MAXPACKET
),
718 RE_TX_DESC_CNT(sc
), RE_TDESC_CMD_FRAGLEN
,
719 0, 0, &sc
->re_ldata
.re_txq
[i
].txq_dmamap
);
721 aprint_error_dev(sc
->sc_dev
,
722 "can't create DMA map for TX\n");
727 /* Allocate DMA'able memory for the RX ring */
728 /* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */
729 if ((error
= bus_dmamem_alloc(sc
->sc_dmat
,
730 RE_RX_DMAMEM_SZ
, RE_RING_ALIGN
, 0, &sc
->re_ldata
.re_rx_listseg
, 1,
731 &sc
->re_ldata
.re_rx_listnseg
, BUS_DMA_NOWAIT
)) != 0) {
732 aprint_error_dev(sc
->sc_dev
,
733 "can't allocate rx listseg, error = %d\n", error
);
737 /* Load the map for the RX ring. */
738 if ((error
= bus_dmamem_map(sc
->sc_dmat
, &sc
->re_ldata
.re_rx_listseg
,
739 sc
->re_ldata
.re_rx_listnseg
, RE_RX_DMAMEM_SZ
,
740 (void **)&sc
->re_ldata
.re_rx_list
,
741 BUS_DMA_COHERENT
| BUS_DMA_NOWAIT
)) != 0) {
742 aprint_error_dev(sc
->sc_dev
,
743 "can't map rx list, error = %d\n", error
);
746 memset(sc
->re_ldata
.re_rx_list
, 0, RE_RX_DMAMEM_SZ
);
748 if ((error
= bus_dmamap_create(sc
->sc_dmat
,
749 RE_RX_DMAMEM_SZ
, 1, RE_RX_DMAMEM_SZ
, 0, 0,
750 &sc
->re_ldata
.re_rx_list_map
)) != 0) {
751 aprint_error_dev(sc
->sc_dev
,
752 "can't create rx list map, error = %d\n", error
);
756 if ((error
= bus_dmamap_load(sc
->sc_dmat
,
757 sc
->re_ldata
.re_rx_list_map
, sc
->re_ldata
.re_rx_list
,
758 RE_RX_DMAMEM_SZ
, NULL
, BUS_DMA_NOWAIT
)) != 0) {
759 aprint_error_dev(sc
->sc_dev
,
760 "can't load rx list, error = %d\n", error
);
764 /* Create DMA maps for RX buffers */
765 for (i
= 0; i
< RE_RX_DESC_CNT
; i
++) {
766 error
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1, MCLBYTES
,
767 0, 0, &sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
);
769 aprint_error_dev(sc
->sc_dev
,
770 "can't create DMA map for RX\n");
776 * Record interface as attached. From here, we should not fail.
778 sc
->sc_flags
|= RTK_ATTACHED
;
780 ifp
= &sc
->ethercom
.ec_if
;
782 strlcpy(ifp
->if_xname
, device_xname(sc
->sc_dev
), IFNAMSIZ
);
783 ifp
->if_mtu
= ETHERMTU
;
784 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
785 ifp
->if_ioctl
= re_ioctl
;
786 sc
->ethercom
.ec_capabilities
|=
787 ETHERCAP_VLAN_MTU
| ETHERCAP_VLAN_HWTAGGING
;
788 ifp
->if_start
= re_start
;
789 ifp
->if_stop
= re_stop
;
792 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets,
793 * so we have a workaround to handle the bug by padding
794 * such packets manually.
796 ifp
->if_capabilities
|=
797 IFCAP_CSUM_IPv4_Tx
| IFCAP_CSUM_IPv4_Rx
|
798 IFCAP_CSUM_TCPv4_Tx
| IFCAP_CSUM_TCPv4_Rx
|
799 IFCAP_CSUM_UDPv4_Tx
| IFCAP_CSUM_UDPv4_Rx
|
804 * Still have no idea how to make TSO work on 8168C, 8168CP,
805 * 8102E, 8111C and 8111CP.
807 if ((sc
->sc_quirk
& RTKQ_DESCV2
) != 0)
808 ifp
->if_capabilities
&= ~IFCAP_TSOv4
;
810 ifp
->if_watchdog
= re_watchdog
;
811 ifp
->if_init
= re_init
;
812 ifp
->if_snd
.ifq_maxlen
= RE_IFQ_MAXLEN
;
813 ifp
->if_capenable
= ifp
->if_capabilities
;
814 IFQ_SET_READY(&ifp
->if_snd
);
816 callout_init(&sc
->rtk_tick_ch
, 0);
819 sc
->mii
.mii_ifp
= ifp
;
820 sc
->mii
.mii_readreg
= re_miibus_readreg
;
821 sc
->mii
.mii_writereg
= re_miibus_writereg
;
822 sc
->mii
.mii_statchg
= re_miibus_statchg
;
823 sc
->ethercom
.ec_mii
= &sc
->mii
;
824 ifmedia_init(&sc
->mii
.mii_media
, IFM_IMASK
, ether_mediachange
,
826 mii_attach(sc
->sc_dev
, &sc
->mii
, 0xffffffff, MII_PHY_ANY
,
828 ifmedia_set(&sc
->mii
.mii_media
, IFM_ETHER
| IFM_AUTO
);
831 * Call MI attach routine.
834 ether_ifattach(ifp
, eaddr
);
836 if (pmf_device_register(sc
->sc_dev
, NULL
, NULL
))
837 pmf_class_network_register(sc
->sc_dev
, ifp
);
839 aprint_error_dev(sc
->sc_dev
,
840 "couldn't establish power handler\n");
845 /* Destroy DMA maps for RX buffers. */
846 for (i
= 0; i
< RE_RX_DESC_CNT
; i
++)
847 if (sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
!= NULL
)
848 bus_dmamap_destroy(sc
->sc_dmat
,
849 sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
);
851 /* Free DMA'able memory for the RX ring. */
852 bus_dmamap_unload(sc
->sc_dmat
, sc
->re_ldata
.re_rx_list_map
);
854 bus_dmamap_destroy(sc
->sc_dmat
, sc
->re_ldata
.re_rx_list_map
);
856 bus_dmamem_unmap(sc
->sc_dmat
,
857 (void *)sc
->re_ldata
.re_rx_list
, RE_RX_DMAMEM_SZ
);
859 bus_dmamem_free(sc
->sc_dmat
,
860 &sc
->re_ldata
.re_rx_listseg
, sc
->re_ldata
.re_rx_listnseg
);
863 /* Destroy DMA maps for TX buffers. */
864 for (i
= 0; i
< RE_TX_QLEN
; i
++)
865 if (sc
->re_ldata
.re_txq
[i
].txq_dmamap
!= NULL
)
866 bus_dmamap_destroy(sc
->sc_dmat
,
867 sc
->re_ldata
.re_txq
[i
].txq_dmamap
);
869 /* Free DMA'able memory for the TX ring. */
870 bus_dmamap_unload(sc
->sc_dmat
, sc
->re_ldata
.re_tx_list_map
);
872 bus_dmamap_destroy(sc
->sc_dmat
, sc
->re_ldata
.re_tx_list_map
);
874 bus_dmamem_unmap(sc
->sc_dmat
,
875 (void *)sc
->re_ldata
.re_tx_list
, RE_TX_LIST_SZ(sc
));
877 bus_dmamem_free(sc
->sc_dmat
,
878 &sc
->re_ldata
.re_tx_listseg
, sc
->re_ldata
.re_tx_listnseg
);
886 * Handle device activation/deactivation requests.
889 re_activate(device_t self
, enum devact act
)
891 struct rtk_softc
*sc
= device_private(self
);
894 case DVACT_DEACTIVATE
:
895 if_deactivate(&sc
->ethercom
.ec_if
);
904 * Detach a rtk interface.
907 re_detach(struct rtk_softc
*sc
)
909 struct ifnet
*ifp
= &sc
->ethercom
.ec_if
;
913 * Succeed now if there isn't any work to do.
915 if ((sc
->sc_flags
& RTK_ATTACHED
) == 0)
918 /* Unhook our tick handler. */
919 callout_stop(&sc
->rtk_tick_ch
);
921 /* Detach all PHYs. */
922 mii_detach(&sc
->mii
, MII_PHY_ANY
, MII_OFFSET_ANY
);
924 /* Delete all remaining media. */
925 ifmedia_delete_instance(&sc
->mii
.mii_media
, IFM_INST_ANY
);
930 /* Destroy DMA maps for RX buffers. */
931 for (i
= 0; i
< RE_RX_DESC_CNT
; i
++)
932 if (sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
!= NULL
)
933 bus_dmamap_destroy(sc
->sc_dmat
,
934 sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
);
936 /* Free DMA'able memory for the RX ring. */
937 bus_dmamap_unload(sc
->sc_dmat
, sc
->re_ldata
.re_rx_list_map
);
938 bus_dmamap_destroy(sc
->sc_dmat
, sc
->re_ldata
.re_rx_list_map
);
939 bus_dmamem_unmap(sc
->sc_dmat
,
940 (void *)sc
->re_ldata
.re_rx_list
, RE_RX_DMAMEM_SZ
);
941 bus_dmamem_free(sc
->sc_dmat
,
942 &sc
->re_ldata
.re_rx_listseg
, sc
->re_ldata
.re_rx_listnseg
);
944 /* Destroy DMA maps for TX buffers. */
945 for (i
= 0; i
< RE_TX_QLEN
; i
++)
946 if (sc
->re_ldata
.re_txq
[i
].txq_dmamap
!= NULL
)
947 bus_dmamap_destroy(sc
->sc_dmat
,
948 sc
->re_ldata
.re_txq
[i
].txq_dmamap
);
950 /* Free DMA'able memory for the TX ring. */
951 bus_dmamap_unload(sc
->sc_dmat
, sc
->re_ldata
.re_tx_list_map
);
952 bus_dmamap_destroy(sc
->sc_dmat
, sc
->re_ldata
.re_tx_list_map
);
953 bus_dmamem_unmap(sc
->sc_dmat
,
954 (void *)sc
->re_ldata
.re_tx_list
, RE_TX_LIST_SZ(sc
));
955 bus_dmamem_free(sc
->sc_dmat
,
956 &sc
->re_ldata
.re_tx_listseg
, sc
->re_ldata
.re_tx_listnseg
);
958 pmf_device_deregister(sc
->sc_dev
);
965 * Enable the RTL81X9 chip.
968 re_enable(struct rtk_softc
*sc
)
971 if (RTK_IS_ENABLED(sc
) == 0 && sc
->sc_enable
!= NULL
) {
972 if ((*sc
->sc_enable
)(sc
) != 0) {
973 printf("%s: device enable failed\n",
974 device_xname(sc
->sc_dev
));
977 sc
->sc_flags
|= RTK_ENABLED
;
984 * Disable the RTL81X9 chip.
987 re_disable(struct rtk_softc
*sc
)
990 if (RTK_IS_ENABLED(sc
) && sc
->sc_disable
!= NULL
) {
991 (*sc
->sc_disable
)(sc
);
992 sc
->sc_flags
&= ~RTK_ENABLED
;
997 re_newbuf(struct rtk_softc
*sc
, int idx
, struct mbuf
*m
)
999 struct mbuf
*n
= NULL
;
1002 struct re_rxsoft
*rxs
;
1007 MGETHDR(n
, M_DONTWAIT
, MT_DATA
);
1011 MCLGET(n
, M_DONTWAIT
);
1012 if ((n
->m_flags
& M_EXT
) == 0) {
1018 m
->m_data
= m
->m_ext
.ext_buf
;
1021 * Initialize mbuf length fields and fixup
1022 * alignment so that the frame payload is
1025 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
- RE_ETHER_ALIGN
;
1026 m
->m_data
+= RE_ETHER_ALIGN
;
1028 rxs
= &sc
->re_ldata
.re_rxsoft
[idx
];
1029 map
= rxs
->rxs_dmamap
;
1030 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m
,
1031 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
1036 bus_dmamap_sync(sc
->sc_dmat
, map
, 0, map
->dm_mapsize
,
1037 BUS_DMASYNC_PREREAD
);
1039 d
= &sc
->re_ldata
.re_rx_list
[idx
];
1041 RE_RXDESCSYNC(sc
, idx
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1042 cmdstat
= le32toh(d
->re_cmdstat
);
1043 RE_RXDESCSYNC(sc
, idx
, BUS_DMASYNC_PREREAD
);
1044 if (cmdstat
& RE_RDESC_STAT_OWN
) {
1045 panic("%s: tried to map busy RX descriptor",
1046 device_xname(sc
->sc_dev
));
1053 cmdstat
= map
->dm_segs
[0].ds_len
;
1054 if (idx
== (RE_RX_DESC_CNT
- 1))
1055 cmdstat
|= RE_RDESC_CMD_EOR
;
1056 re_set_bufaddr(d
, map
->dm_segs
[0].ds_addr
);
1057 d
->re_cmdstat
= htole32(cmdstat
);
1058 RE_RXDESCSYNC(sc
, idx
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1059 cmdstat
|= RE_RDESC_CMD_OWN
;
1060 d
->re_cmdstat
= htole32(cmdstat
);
1061 RE_RXDESCSYNC(sc
, idx
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1071 re_tx_list_init(struct rtk_softc
*sc
)
1075 memset(sc
->re_ldata
.re_tx_list
, 0, RE_TX_LIST_SZ(sc
));
1076 for (i
= 0; i
< RE_TX_QLEN
; i
++) {
1077 sc
->re_ldata
.re_txq
[i
].txq_mbuf
= NULL
;
1080 bus_dmamap_sync(sc
->sc_dmat
,
1081 sc
->re_ldata
.re_tx_list_map
, 0,
1082 sc
->re_ldata
.re_tx_list_map
->dm_mapsize
,
1083 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1084 sc
->re_ldata
.re_txq_prodidx
= 0;
1085 sc
->re_ldata
.re_txq_considx
= 0;
1086 sc
->re_ldata
.re_txq_free
= RE_TX_QLEN
;
1087 sc
->re_ldata
.re_tx_free
= RE_TX_DESC_CNT(sc
);
1088 sc
->re_ldata
.re_tx_nextfree
= 0;
1094 re_rx_list_init(struct rtk_softc
*sc
)
1098 memset(sc
->re_ldata
.re_rx_list
, 0, RE_RX_LIST_SZ
);
1100 for (i
= 0; i
< RE_RX_DESC_CNT
; i
++) {
1101 if (re_newbuf(sc
, i
, NULL
) == ENOBUFS
)
1105 sc
->re_ldata
.re_rx_prodidx
= 0;
1106 sc
->re_head
= sc
->re_tail
= NULL
;
1112 * RX handler for C+ and 8169. For the gigE chips, we support
1113 * the reception of jumbo frames that have been fragmented
1114 * across multiple 2K mbuf cluster buffers.
1117 re_rxeof(struct rtk_softc
*sc
)
1122 struct re_desc
*cur_rx
;
1123 struct re_rxsoft
*rxs
;
1124 uint32_t rxstat
, rxvlan
;
1126 ifp
= &sc
->ethercom
.ec_if
;
1128 for (i
= sc
->re_ldata
.re_rx_prodidx
;; i
= RE_NEXT_RX_DESC(sc
, i
)) {
1129 cur_rx
= &sc
->re_ldata
.re_rx_list
[i
];
1130 RE_RXDESCSYNC(sc
, i
,
1131 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1132 rxstat
= le32toh(cur_rx
->re_cmdstat
);
1133 rxvlan
= le32toh(cur_rx
->re_vlanctl
);
1134 RE_RXDESCSYNC(sc
, i
, BUS_DMASYNC_PREREAD
);
1135 if ((rxstat
& RE_RDESC_STAT_OWN
) != 0) {
1138 total_len
= rxstat
& sc
->re_rxlenmask
;
1139 rxs
= &sc
->re_ldata
.re_rxsoft
[i
];
1142 /* Invalidate the RX mbuf and unload its map */
1144 bus_dmamap_sync(sc
->sc_dmat
,
1145 rxs
->rxs_dmamap
, 0, rxs
->rxs_dmamap
->dm_mapsize
,
1146 BUS_DMASYNC_POSTREAD
);
1147 bus_dmamap_unload(sc
->sc_dmat
, rxs
->rxs_dmamap
);
1149 if ((rxstat
& RE_RDESC_STAT_EOF
) == 0) {
1150 m
->m_len
= MCLBYTES
- RE_ETHER_ALIGN
;
1151 if (sc
->re_head
== NULL
)
1152 sc
->re_head
= sc
->re_tail
= m
;
1154 m
->m_flags
&= ~M_PKTHDR
;
1155 sc
->re_tail
->m_next
= m
;
1158 re_newbuf(sc
, i
, NULL
);
1163 * NOTE: for the 8139C+, the frame length field
1164 * is always 12 bits in size, but for the gigE chips,
1165 * it is 13 bits (since the max RX frame length is 16K).
1166 * Unfortunately, all 32 bits in the status word
1167 * were already used, so to make room for the extra
1168 * length bit, RealTek took out the 'frame alignment
1169 * error' bit and shifted the other status bits
1170 * over one slot. The OWN, EOR, FS and LS bits are
1171 * still in the same places. We have already extracted
1172 * the frame length and checked the OWN bit, so rather
1173 * than using an alternate bit mapping, we shift the
1174 * status bits one space to the right so we can evaluate
1175 * them using the 8169 status as though it was in the
1176 * same format as that of the 8139C+.
1178 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) == 0)
1181 if (__predict_false((rxstat
& RE_RDESC_STAT_RXERRSUM
) != 0)) {
1183 printf("%s: RX error (rxstat = 0x%08x)",
1184 device_xname(sc
->sc_dev
), rxstat
);
1185 if (rxstat
& RE_RDESC_STAT_FRALIGN
)
1186 printf(", frame alignment error");
1187 if (rxstat
& RE_RDESC_STAT_BUFOFLOW
)
1188 printf(", out of buffer space");
1189 if (rxstat
& RE_RDESC_STAT_FIFOOFLOW
)
1190 printf(", FIFO overrun");
1191 if (rxstat
& RE_RDESC_STAT_GIANT
)
1192 printf(", giant packet");
1193 if (rxstat
& RE_RDESC_STAT_RUNT
)
1194 printf(", runt packet");
1195 if (rxstat
& RE_RDESC_STAT_CRCERR
)
1196 printf(", CRC error");
1201 * If this is part of a multi-fragment packet,
1202 * discard all the pieces.
1204 if (sc
->re_head
!= NULL
) {
1205 m_freem(sc
->re_head
);
1206 sc
->re_head
= sc
->re_tail
= NULL
;
1208 re_newbuf(sc
, i
, m
);
1213 * If allocating a replacement mbuf fails,
1214 * reload the current one.
1217 if (__predict_false(re_newbuf(sc
, i
, NULL
) != 0)) {
1219 if (sc
->re_head
!= NULL
) {
1220 m_freem(sc
->re_head
);
1221 sc
->re_head
= sc
->re_tail
= NULL
;
1223 re_newbuf(sc
, i
, m
);
1227 if (sc
->re_head
!= NULL
) {
1228 m
->m_len
= total_len
% (MCLBYTES
- RE_ETHER_ALIGN
);
1230 * Special case: if there's 4 bytes or less
1231 * in this buffer, the mbuf can be discarded:
1232 * the last 4 bytes is the CRC, which we don't
1233 * care about anyway.
1235 if (m
->m_len
<= ETHER_CRC_LEN
) {
1236 sc
->re_tail
->m_len
-=
1237 (ETHER_CRC_LEN
- m
->m_len
);
1240 m
->m_len
-= ETHER_CRC_LEN
;
1241 m
->m_flags
&= ~M_PKTHDR
;
1242 sc
->re_tail
->m_next
= m
;
1245 sc
->re_head
= sc
->re_tail
= NULL
;
1246 m
->m_pkthdr
.len
= total_len
- ETHER_CRC_LEN
;
1248 m
->m_pkthdr
.len
= m
->m_len
=
1249 (total_len
- ETHER_CRC_LEN
);
1252 m
->m_pkthdr
.rcvif
= ifp
;
1254 /* Do RX checksumming */
1255 if ((sc
->sc_quirk
& RTKQ_DESCV2
) == 0) {
1256 /* Check IP header checksum */
1257 if ((rxstat
& RE_RDESC_STAT_PROTOID
) != 0) {
1258 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
1259 if (rxstat
& RE_RDESC_STAT_IPSUMBAD
)
1260 m
->m_pkthdr
.csum_flags
|=
1263 /* Check TCP/UDP checksum */
1264 if (RE_TCPPKT(rxstat
)) {
1265 m
->m_pkthdr
.csum_flags
|= M_CSUM_TCPv4
;
1266 if (rxstat
& RE_RDESC_STAT_TCPSUMBAD
)
1267 m
->m_pkthdr
.csum_flags
|=
1269 } else if (RE_UDPPKT(rxstat
)) {
1270 m
->m_pkthdr
.csum_flags
|= M_CSUM_UDPv4
;
1271 if (rxstat
& RE_RDESC_STAT_UDPSUMBAD
)
1272 m
->m_pkthdr
.csum_flags
|=
1277 /* Check IPv4 header checksum */
1278 if ((rxvlan
& RE_RDESC_VLANCTL_IPV4
) != 0) {
1279 m
->m_pkthdr
.csum_flags
|= M_CSUM_IPv4
;
1280 if (rxstat
& RE_RDESC_STAT_IPSUMBAD
)
1281 m
->m_pkthdr
.csum_flags
|=
1284 /* Check TCPv4/UDPv4 checksum */
1285 if (RE_TCPPKT(rxstat
)) {
1286 m
->m_pkthdr
.csum_flags
|= M_CSUM_TCPv4
;
1287 if (rxstat
& RE_RDESC_STAT_TCPSUMBAD
)
1288 m
->m_pkthdr
.csum_flags
|=
1290 } else if (RE_UDPPKT(rxstat
)) {
1291 m
->m_pkthdr
.csum_flags
|= M_CSUM_UDPv4
;
1292 if (rxstat
& RE_RDESC_STAT_UDPSUMBAD
)
1293 m
->m_pkthdr
.csum_flags
|=
1297 /* XXX Check TCPv6/UDPv6 checksum? */
1300 if (rxvlan
& RE_RDESC_VLANCTL_TAG
) {
1301 VLAN_INPUT_TAG(ifp
, m
,
1302 bswap16(rxvlan
& RE_RDESC_VLANCTL_DATA
),
1307 bpf_mtap(ifp
->if_bpf
, m
);
1309 (*ifp
->if_input
)(ifp
, m
);
1312 sc
->re_ldata
.re_rx_prodidx
= i
;
1316 re_txeof(struct rtk_softc
*sc
)
1323 ifp
= &sc
->ethercom
.ec_if
;
1325 for (idx
= sc
->re_ldata
.re_txq_considx
;
1326 sc
->re_ldata
.re_txq_free
< RE_TX_QLEN
;
1327 idx
= RE_NEXT_TXQ(sc
, idx
), sc
->re_ldata
.re_txq_free
++) {
1328 txq
= &sc
->re_ldata
.re_txq
[idx
];
1329 KASSERT(txq
->txq_mbuf
!= NULL
);
1331 descidx
= txq
->txq_descidx
;
1332 RE_TXDESCSYNC(sc
, descidx
,
1333 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1335 le32toh(sc
->re_ldata
.re_tx_list
[descidx
].re_cmdstat
);
1336 RE_TXDESCSYNC(sc
, descidx
, BUS_DMASYNC_PREREAD
);
1337 KASSERT((txstat
& RE_TDESC_CMD_EOF
) != 0);
1338 if (txstat
& RE_TDESC_CMD_OWN
) {
1342 sc
->re_ldata
.re_tx_free
+= txq
->txq_nsegs
;
1343 KASSERT(sc
->re_ldata
.re_tx_free
<= RE_TX_DESC_CNT(sc
));
1344 bus_dmamap_sync(sc
->sc_dmat
, txq
->txq_dmamap
,
1345 0, txq
->txq_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
1346 bus_dmamap_unload(sc
->sc_dmat
, txq
->txq_dmamap
);
1347 m_freem(txq
->txq_mbuf
);
1348 txq
->txq_mbuf
= NULL
;
1350 if (txstat
& (RE_TDESC_STAT_EXCESSCOL
| RE_TDESC_STAT_COLCNT
))
1351 ifp
->if_collisions
++;
1352 if (txstat
& RE_TDESC_STAT_TXERRSUM
)
1358 sc
->re_ldata
.re_txq_considx
= idx
;
1360 if (sc
->re_ldata
.re_txq_free
> RE_NTXDESC_RSVD
)
1361 ifp
->if_flags
&= ~IFF_OACTIVE
;
1364 * If not all descriptors have been released reaped yet,
1365 * reload the timer so that we will eventually get another
1366 * interrupt that will cause us to re-enter this routine.
1367 * This is done in case the transmitter has gone idle.
1369 if (sc
->re_ldata
.re_txq_free
< RE_TX_QLEN
) {
1370 CSR_WRITE_4(sc
, RTK_TIMERCNT
, 1);
1371 if ((sc
->sc_quirk
& RTKQ_PCIE
) != 0) {
1373 * Some chips will ignore a second TX request
1374 * issued while an existing transmission is in
1375 * progress. If the transmitter goes idle but
1376 * there are still packets waiting to be sent,
1377 * we need to restart the channel here to flush
1378 * them out. This only seems to be required with
1381 CSR_WRITE_1(sc
, RTK_GTXSTART
, RTK_TXSTART_START
);
1390 struct rtk_softc
*sc
= arg
;
1393 /* XXX: just return for 8169S/8110S with rev 2 or newer phy */
1399 callout_reset(&sc
->rtk_tick_ch
, hz
, re_tick
, sc
);
1405 struct rtk_softc
*sc
= arg
;
1410 if (!device_has_power(sc
->sc_dev
))
1413 ifp
= &sc
->ethercom
.ec_if
;
1415 if ((ifp
->if_flags
& IFF_UP
) == 0)
1420 status
= CSR_READ_2(sc
, RTK_ISR
);
1421 /* If the card has gone away the read returns 0xffff. */
1422 if (status
== 0xffff)
1426 CSR_WRITE_2(sc
, RTK_ISR
, status
);
1429 if ((status
& RTK_INTRS_CPLUS
) == 0)
1432 if (status
& (RTK_ISR_RX_OK
| RTK_ISR_RX_ERR
))
1435 if (status
& (RTK_ISR_TIMEOUT_EXPIRED
| RTK_ISR_TX_ERR
|
1436 RTK_ISR_TX_DESC_UNAVAIL
))
1439 if (status
& RTK_ISR_SYSTEM_ERR
) {
1443 if (status
& RTK_ISR_LINKCHG
) {
1444 callout_stop(&sc
->rtk_tick_ch
);
1449 if (handled
&& !IFQ_IS_EMPTY(&ifp
->if_snd
))
1458 * Main transmit routine for C+ and gigE NICs.
1462 re_start(struct ifnet
*ifp
)
1464 struct rtk_softc
*sc
;
1470 uint32_t cmdstat
, re_flags
, vlanctl
;
1471 int ofree
, idx
, error
, nsegs
, seg
;
1472 int startdesc
, curdesc
, lastdesc
;
1476 ofree
= sc
->re_ldata
.re_txq_free
;
1478 for (idx
= sc
->re_ldata
.re_txq_prodidx
;; idx
= RE_NEXT_TXQ(sc
, idx
)) {
1480 IFQ_POLL(&ifp
->if_snd
, m
);
1484 if (sc
->re_ldata
.re_txq_free
== 0 ||
1485 sc
->re_ldata
.re_tx_free
== 0) {
1486 /* no more free slots left */
1487 ifp
->if_flags
|= IFF_OACTIVE
;
1492 * Set up checksum offload. Note: checksum offload bits must
1493 * appear in all descriptors of a multi-descriptor transmit
1494 * attempt. (This is according to testing done with an 8169
1495 * chip. I'm not sure if this is a requirement or a bug.)
1499 if ((m
->m_pkthdr
.csum_flags
& M_CSUM_TSOv4
) != 0) {
1500 uint32_t segsz
= m
->m_pkthdr
.segsz
;
1502 re_flags
= RE_TDESC_CMD_LGSEND
|
1503 (segsz
<< RE_TDESC_CMD_MSSVAL_SHIFT
);
1506 * set RE_TDESC_CMD_IPCSUM if any checksum offloading
1507 * is requested. otherwise, RE_TDESC_CMD_TCPCSUM/
1508 * RE_TDESC_CMD_UDPCSUM doesn't make effects.
1511 if ((m
->m_pkthdr
.csum_flags
&
1512 (M_CSUM_IPv4
| M_CSUM_TCPv4
| M_CSUM_UDPv4
))
1514 if ((sc
->sc_quirk
& RTKQ_DESCV2
) == 0) {
1515 re_flags
|= RE_TDESC_CMD_IPCSUM
;
1516 if (m
->m_pkthdr
.csum_flags
&
1519 RE_TDESC_CMD_TCPCSUM
;
1520 } else if (m
->m_pkthdr
.csum_flags
&
1523 RE_TDESC_CMD_UDPCSUM
;
1526 vlanctl
|= RE_TDESC_VLANCTL_IPCSUM
;
1527 if (m
->m_pkthdr
.csum_flags
&
1530 RE_TDESC_VLANCTL_TCPCSUM
;
1531 } else if (m
->m_pkthdr
.csum_flags
&
1534 RE_TDESC_VLANCTL_UDPCSUM
;
1540 txq
= &sc
->re_ldata
.re_txq
[idx
];
1541 map
= txq
->txq_dmamap
;
1542 error
= bus_dmamap_load_mbuf(sc
->sc_dmat
, map
, m
,
1543 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
1545 if (__predict_false(error
)) {
1546 /* XXX try to defrag if EFBIG? */
1547 printf("%s: can't map mbuf (error %d)\n",
1548 device_xname(sc
->sc_dev
), error
);
1550 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
1556 nsegs
= map
->dm_nsegs
;
1558 if (__predict_false(m
->m_pkthdr
.len
<= RE_IP4CSUMTX_PADLEN
&&
1559 (re_flags
& RE_TDESC_CMD_IPCSUM
) != 0 &&
1560 (sc
->sc_quirk
& RTKQ_DESCV2
) == 0)) {
1565 if (nsegs
> sc
->re_ldata
.re_tx_free
) {
1567 * Not enough free descriptors to transmit this packet.
1569 ifp
->if_flags
|= IFF_OACTIVE
;
1570 bus_dmamap_unload(sc
->sc_dmat
, map
);
1574 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
1577 * Make sure that the caches are synchronized before we
1578 * ask the chip to start DMA for the packet data.
1580 bus_dmamap_sync(sc
->sc_dmat
, map
, 0, map
->dm_mapsize
,
1581 BUS_DMASYNC_PREWRITE
);
1584 * Set up hardware VLAN tagging. Note: vlan tag info must
1585 * appear in all descriptors of a multi-descriptor
1586 * transmission attempt.
1588 if ((mtag
= VLAN_OUTPUT_TAG(&sc
->ethercom
, m
)) != NULL
)
1589 vlanctl
|= bswap16(VLAN_TAG_VALUE(mtag
)) |
1590 RE_TDESC_VLANCTL_TAG
;
1593 * Map the segment array into descriptors.
1594 * Note that we set the start-of-frame and
1595 * end-of-frame markers for either TX or RX,
1596 * but they really only have meaning in the TX case.
1597 * (In the RX case, it's the chip that tells us
1598 * where packets begin and end.)
1599 * We also keep track of the end of the ring
1600 * and set the end-of-ring bits as needed,
1601 * and we set the ownership bits in all except
1602 * the very first descriptor. (The caller will
1603 * set this descriptor later when it start
1604 * transmission or reception.)
1606 curdesc
= startdesc
= sc
->re_ldata
.re_tx_nextfree
;
1608 for (seg
= 0; seg
< map
->dm_nsegs
;
1609 seg
++, curdesc
= RE_NEXT_TX_DESC(sc
, curdesc
)) {
1610 d
= &sc
->re_ldata
.re_tx_list
[curdesc
];
1612 RE_TXDESCSYNC(sc
, curdesc
,
1613 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1614 cmdstat
= le32toh(d
->re_cmdstat
);
1615 RE_TXDESCSYNC(sc
, curdesc
, BUS_DMASYNC_PREREAD
);
1616 if (cmdstat
& RE_TDESC_STAT_OWN
) {
1617 panic("%s: tried to map busy TX descriptor",
1618 device_xname(sc
->sc_dev
));
1622 d
->re_vlanctl
= htole32(vlanctl
);
1623 re_set_bufaddr(d
, map
->dm_segs
[seg
].ds_addr
);
1624 cmdstat
= re_flags
| map
->dm_segs
[seg
].ds_len
;
1626 cmdstat
|= RE_TDESC_CMD_SOF
;
1628 cmdstat
|= RE_TDESC_CMD_OWN
;
1629 if (curdesc
== (RE_TX_DESC_CNT(sc
) - 1))
1630 cmdstat
|= RE_TDESC_CMD_EOR
;
1631 if (seg
== nsegs
- 1) {
1632 cmdstat
|= RE_TDESC_CMD_EOF
;
1635 d
->re_cmdstat
= htole32(cmdstat
);
1636 RE_TXDESCSYNC(sc
, curdesc
,
1637 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1639 if (__predict_false(pad
)) {
1640 d
= &sc
->re_ldata
.re_tx_list
[curdesc
];
1641 d
->re_vlanctl
= htole32(vlanctl
);
1642 re_set_bufaddr(d
, RE_TXPADDADDR(sc
));
1643 cmdstat
= re_flags
|
1644 RE_TDESC_CMD_OWN
| RE_TDESC_CMD_EOF
|
1645 (RE_IP4CSUMTX_PADLEN
+ 1 - m
->m_pkthdr
.len
);
1646 if (curdesc
== (RE_TX_DESC_CNT(sc
) - 1))
1647 cmdstat
|= RE_TDESC_CMD_EOR
;
1648 d
->re_cmdstat
= htole32(cmdstat
);
1649 RE_TXDESCSYNC(sc
, curdesc
,
1650 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1652 curdesc
= RE_NEXT_TX_DESC(sc
, curdesc
);
1654 KASSERT(lastdesc
!= -1);
1656 /* Transfer ownership of packet to the chip. */
1658 sc
->re_ldata
.re_tx_list
[startdesc
].re_cmdstat
|=
1659 htole32(RE_TDESC_CMD_OWN
);
1660 RE_TXDESCSYNC(sc
, startdesc
,
1661 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1663 /* update info of TX queue and descriptors */
1665 txq
->txq_descidx
= lastdesc
;
1666 txq
->txq_nsegs
= nsegs
;
1668 sc
->re_ldata
.re_txq_free
--;
1669 sc
->re_ldata
.re_tx_free
-= nsegs
;
1670 sc
->re_ldata
.re_tx_nextfree
= curdesc
;
1674 * If there's a BPF listener, bounce a copy of this frame
1678 bpf_mtap(ifp
->if_bpf
, m
);
1682 if (sc
->re_ldata
.re_txq_free
< ofree
) {
1684 * TX packets are enqueued.
1686 sc
->re_ldata
.re_txq_prodidx
= idx
;
1689 * Start the transmitter to poll.
1691 * RealTek put the TX poll request register in a different
1692 * location on the 8169 gigE chip. I don't know why.
1694 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) != 0)
1695 CSR_WRITE_1(sc
, RTK_TXSTART
, RTK_TXSTART_START
);
1697 CSR_WRITE_1(sc
, RTK_GTXSTART
, RTK_TXSTART_START
);
1700 * Use the countdown timer for interrupt moderation.
1701 * 'TX done' interrupts are disabled. Instead, we reset the
1702 * countdown timer, which will begin counting until it hits
1703 * the value in the TIMERINT register, and then trigger an
1704 * interrupt. Each time we write to the TIMERCNT register,
1705 * the timer count is reset to 0.
1707 CSR_WRITE_4(sc
, RTK_TIMERCNT
, 1);
1710 * Set a timeout in case the chip goes out to lunch.
1717 re_init(struct ifnet
*ifp
)
1719 struct rtk_softc
*sc
= ifp
->if_softc
;
1720 const uint8_t *enaddr
;
1726 if ((error
= re_enable(sc
)) != 0)
1730 * Cancel pending I/O and free all RX/TX buffers.
1737 * Enable C+ RX and TX mode, as well as VLAN stripping and
1738 * RX checksum offload. We must configure the C+ register
1739 * before all others.
1741 cfg
= RE_CPLUSCMD_PCI_MRW
;
1744 * XXX: For old 8169 set bit 14.
1745 * For 8169S/8110S and above, do not set bit 14.
1747 if ((sc
->sc_quirk
& RTKQ_8169NONS
) != 0)
1750 if ((ifp
->if_capenable
& ETHERCAP_VLAN_HWTAGGING
) != 0)
1751 cfg
|= RE_CPLUSCMD_VLANSTRIP
;
1752 if ((ifp
->if_capenable
& (IFCAP_CSUM_IPv4_Rx
|
1753 IFCAP_CSUM_TCPv4_Rx
| IFCAP_CSUM_UDPv4_Rx
)) != 0)
1754 cfg
|= RE_CPLUSCMD_RXCSUM_ENB
;
1755 if ((sc
->sc_quirk
& RTKQ_MACSTAT
) != 0) {
1756 cfg
|= RE_CPLUSCMD_MACSTAT_DIS
;
1757 cfg
|= RE_CPLUSCMD_TXENB
;
1759 cfg
|= RE_CPLUSCMD_RXENB
| RE_CPLUSCMD_TXENB
;
1761 CSR_WRITE_2(sc
, RTK_CPLUS_CMD
, cfg
);
1763 /* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1764 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) == 0)
1765 CSR_WRITE_2(sc
, RTK_IM
, 0x0000);
1770 * Init our MAC address. Even though the chipset
1771 * documentation doesn't mention it, we need to enter "Config
1772 * register write enable" mode to modify the ID registers.
1774 CSR_WRITE_1(sc
, RTK_EECMD
, RTK_EEMODE_WRITECFG
);
1775 enaddr
= CLLADDR(ifp
->if_sadl
);
1776 reg
= enaddr
[0] | (enaddr
[1] << 8) |
1777 (enaddr
[2] << 16) | (enaddr
[3] << 24);
1778 CSR_WRITE_4(sc
, RTK_IDR0
, reg
);
1779 reg
= enaddr
[4] | (enaddr
[5] << 8);
1780 CSR_WRITE_4(sc
, RTK_IDR4
, reg
);
1781 CSR_WRITE_1(sc
, RTK_EECMD
, RTK_EEMODE_OFF
);
1784 * For C+ mode, initialize the RX descriptors and mbufs.
1786 re_rx_list_init(sc
);
1787 re_tx_list_init(sc
);
1790 * Load the addresses of the RX and TX lists into the chip.
1792 CSR_WRITE_4(sc
, RTK_RXLIST_ADDR_HI
,
1793 RE_ADDR_HI(sc
->re_ldata
.re_rx_list_map
->dm_segs
[0].ds_addr
));
1794 CSR_WRITE_4(sc
, RTK_RXLIST_ADDR_LO
,
1795 RE_ADDR_LO(sc
->re_ldata
.re_rx_list_map
->dm_segs
[0].ds_addr
));
1797 CSR_WRITE_4(sc
, RTK_TXLIST_ADDR_HI
,
1798 RE_ADDR_HI(sc
->re_ldata
.re_tx_list_map
->dm_segs
[0].ds_addr
));
1799 CSR_WRITE_4(sc
, RTK_TXLIST_ADDR_LO
,
1800 RE_ADDR_LO(sc
->re_ldata
.re_tx_list_map
->dm_segs
[0].ds_addr
));
1803 * Enable transmit and receive.
1805 CSR_WRITE_1(sc
, RTK_COMMAND
, RTK_CMD_TX_ENB
| RTK_CMD_RX_ENB
);
1808 * Set the initial TX and RX configuration.
1810 if (sc
->re_testmode
&& (sc
->sc_quirk
& RTKQ_8169NONS
) != 0) {
1811 /* test mode is needed only for old 8169 */
1812 CSR_WRITE_4(sc
, RTK_TXCFG
,
1813 RE_TXCFG_CONFIG
| RTK_LOOPTEST_ON
);
1815 CSR_WRITE_4(sc
, RTK_TXCFG
, RE_TXCFG_CONFIG
);
1817 CSR_WRITE_1(sc
, RTK_EARLY_TX_THRESH
, 16);
1819 CSR_WRITE_4(sc
, RTK_RXCFG
, RE_RXCFG_CONFIG
);
1821 /* Set the individual bit to receive frames for this host only. */
1822 rxcfg
= CSR_READ_4(sc
, RTK_RXCFG
);
1823 rxcfg
|= RTK_RXCFG_RX_INDIV
;
1825 /* If we want promiscuous mode, set the allframes bit. */
1826 if (ifp
->if_flags
& IFF_PROMISC
)
1827 rxcfg
|= RTK_RXCFG_RX_ALLPHYS
;
1829 rxcfg
&= ~RTK_RXCFG_RX_ALLPHYS
;
1830 CSR_WRITE_4(sc
, RTK_RXCFG
, rxcfg
);
1833 * Set capture broadcast bit to capture broadcast frames.
1835 if (ifp
->if_flags
& IFF_BROADCAST
)
1836 rxcfg
|= RTK_RXCFG_RX_BROAD
;
1838 rxcfg
&= ~RTK_RXCFG_RX_BROAD
;
1839 CSR_WRITE_4(sc
, RTK_RXCFG
, rxcfg
);
1842 * Program the multicast filter, if necessary.
1847 * Enable interrupts.
1849 if (sc
->re_testmode
)
1850 CSR_WRITE_2(sc
, RTK_IMR
, 0);
1852 CSR_WRITE_2(sc
, RTK_IMR
, RTK_INTRS_CPLUS
);
1854 /* Start RX/TX process. */
1855 CSR_WRITE_4(sc
, RTK_MISSEDPKT
, 0);
1857 /* Enable receiver and transmitter. */
1858 CSR_WRITE_1(sc
, RTK_COMMAND
, RTK_CMD_TX_ENB
| RTK_CMD_RX_ENB
);
1862 * Initialize the timer interrupt register so that
1863 * a timer interrupt will be generated once the timer
1864 * reaches a certain number of ticks. The timer is
1865 * reloaded on each transmit. This gives us TX interrupt
1866 * moderation, which dramatically improves TX frame rate.
1869 if ((sc
->sc_quirk
& RTKQ_8139CPLUS
) != 0)
1870 CSR_WRITE_4(sc
, RTK_TIMERINT
, 0x400);
1872 CSR_WRITE_4(sc
, RTK_TIMERINT_8169
, 0x800);
1875 * For 8169 gigE NICs, set the max allowed RX packet
1876 * size so we can receive jumbo frames.
1878 CSR_WRITE_2(sc
, RTK_MAXRXPKTLEN
, 16383);
1881 if (sc
->re_testmode
)
1884 CSR_WRITE_1(sc
, RTK_CFG1
, RTK_CFG1_DRVLOAD
);
1886 ifp
->if_flags
|= IFF_RUNNING
;
1887 ifp
->if_flags
&= ~IFF_OACTIVE
;
1889 callout_reset(&sc
->rtk_tick_ch
, hz
, re_tick
, sc
);
1893 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1895 printf("%s: interface not running\n",
1896 device_xname(sc
->sc_dev
));
1903 re_ioctl(struct ifnet
*ifp
, u_long command
, void *data
)
1905 struct rtk_softc
*sc
= ifp
->if_softc
;
1906 struct ifreq
*ifr
= data
;
1914 * Disable jumbo frames if it's not supported.
1916 if ((sc
->sc_quirk
& RTKQ_NOJUMBO
) != 0 &&
1917 ifr
->ifr_mtu
> ETHERMTU
) {
1922 if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> ETHERMTU_JUMBO
)
1924 else if ((error
= ifioctl_common(ifp
, command
, data
)) ==
1929 if ((error
= ether_ioctl(ifp
, command
, data
)) != ENETRESET
)
1934 if (command
== SIOCSIFCAP
)
1935 error
= (*ifp
->if_init
)(ifp
);
1936 else if (command
!= SIOCADDMULTI
&& command
!= SIOCDELMULTI
)
1938 else if (ifp
->if_flags
& IFF_RUNNING
)
1949 re_watchdog(struct ifnet
*ifp
)
1951 struct rtk_softc
*sc
;
1956 printf("%s: watchdog timeout\n", device_xname(sc
->sc_dev
));
1968 * Stop the adapter and free any mbufs allocated to the
1972 re_stop(struct ifnet
*ifp
, int disable
)
1975 struct rtk_softc
*sc
= ifp
->if_softc
;
1977 callout_stop(&sc
->rtk_tick_ch
);
1981 if ((sc
->sc_quirk
& RTKQ_CMDSTOP
) != 0)
1982 CSR_WRITE_1(sc
, RTK_COMMAND
, RTK_CMD_STOPREQ
| RTK_CMD_TX_ENB
|
1985 CSR_WRITE_1(sc
, RTK_COMMAND
, 0x00);
1987 CSR_WRITE_2(sc
, RTK_IMR
, 0x0000);
1988 CSR_WRITE_2(sc
, RTK_ISR
, 0xFFFF);
1990 if (sc
->re_head
!= NULL
) {
1991 m_freem(sc
->re_head
);
1992 sc
->re_head
= sc
->re_tail
= NULL
;
1995 /* Free the TX list buffers. */
1996 for (i
= 0; i
< RE_TX_QLEN
; i
++) {
1997 if (sc
->re_ldata
.re_txq
[i
].txq_mbuf
!= NULL
) {
1998 bus_dmamap_unload(sc
->sc_dmat
,
1999 sc
->re_ldata
.re_txq
[i
].txq_dmamap
);
2000 m_freem(sc
->re_ldata
.re_txq
[i
].txq_mbuf
);
2001 sc
->re_ldata
.re_txq
[i
].txq_mbuf
= NULL
;
2005 /* Free the RX list buffers. */
2006 for (i
= 0; i
< RE_RX_DESC_CNT
; i
++) {
2007 if (sc
->re_ldata
.re_rxsoft
[i
].rxs_mbuf
!= NULL
) {
2008 bus_dmamap_unload(sc
->sc_dmat
,
2009 sc
->re_ldata
.re_rxsoft
[i
].rxs_dmamap
);
2010 m_freem(sc
->re_ldata
.re_rxsoft
[i
].rxs_mbuf
);
2011 sc
->re_ldata
.re_rxsoft
[i
].rxs_mbuf
= NULL
;
2018 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);