1 /* $NetBSD: if_vr.c,v 1.100 2009/11/26 15:17:10 njoly Exp $ */
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 1997, 1998
35 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by Bill Paul.
48 * 4. Neither the name of the author nor the names of any co-contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
62 * THE POSSIBILITY OF SUCH DAMAGE.
64 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
68 * VIA Rhine fast ethernet PCI NIC driver
70 * Supports various network adapters based on the VIA Rhine
71 * and Rhine II PCI controllers, including the D-Link DFE530TX.
72 * Datasheets are available at http://www.via.com.tw.
74 * Written by Bill Paul <wpaul@ctr.columbia.edu>
75 * Electrical Engineering Department
76 * Columbia University, New York City
80 * The VIA Rhine controllers are similar in some respects to the
81 * the DEC tulip chips, except less complicated. The controller
82 * uses an MII bus and an external physical layer interface. The
83 * receiver has a one entry perfect filter and a 64-bit hash table
84 * multicast filter. Transmit and receive descriptors are similar
87 * The Rhine has a serious flaw in its transmit DMA mechanism:
88 * transmit buffers must be longword aligned. Unfortunately,
89 * the kernel doesn't guarantee that mbufs will be filled in starting
90 * at longword boundaries, so we have to do a buffer copy before
93 * Apparently, the receive DMA mechanism also has the same flaw. This
94 * means that on systems with struct alignment requirements, incoming
95 * frames must be copied to a new buffer which shifts the data forward
96 * 2 bytes so that the payload is aligned on a 4-byte boundary.
99 #include <sys/cdefs.h>
100 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.100 2009/11/26 15:17:10 njoly Exp $");
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/callout.h>
107 #include <sys/sockio.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/socket.h>
112 #include <sys/device.h>
118 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_ether.h>
126 #include "bpfilter.h"
132 #include <sys/intr.h>
133 #include <machine/endian.h>
135 #include <dev/mii/mii.h>
136 #include <dev/mii/miivar.h>
137 #include <dev/mii/mii_bitbang.h>
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
143 #include <dev/pci/if_vrreg.h>
145 #define VR_USEIOSPACE
148 * Various supported device vendors/types and their names.
150 static const struct vr_type
{
151 pci_vendor_id_t vr_vid
;
152 pci_product_id_t vr_did
;
154 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT3043
},
155 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6102
},
156 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105
},
157 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105M
},
158 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT86C100A
}
162 * Transmit descriptor list size.
164 #define VR_NTXDESC 64
165 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
166 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
169 * Receive descriptor list size.
171 #define VR_NRXDESC 64
172 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
173 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
176 * Control data structres that are DMA'd to the Rhine chip. We allocate
177 * them in a single clump that maps to a single DMA segment to make several
180 * Note that since we always copy outgoing packets to aligned transmit
181 * buffers, we can reduce the transmit descriptors to one per packet.
183 struct vr_control_data
{
184 struct vr_desc vr_txdescs
[VR_NTXDESC
];
185 struct vr_desc vr_rxdescs
[VR_NRXDESC
];
188 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
189 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
190 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
193 * Software state of transmit and receive descriptors.
196 struct mbuf
*ds_mbuf
; /* head of mbuf chain */
197 bus_dmamap_t ds_dmamap
; /* our DMA map */
202 void *vr_ih
; /* interrupt cookie */
203 bus_space_tag_t vr_bst
; /* bus space tag */
204 bus_space_handle_t vr_bsh
; /* bus space handle */
205 bus_dma_tag_t vr_dmat
; /* bus DMA tag */
206 pci_chipset_tag_t vr_pc
; /* PCI chipset info */
207 pcitag_t vr_tag
; /* PCI tag */
208 struct ethercom vr_ec
; /* Ethernet common info */
209 uint8_t vr_enaddr
[ETHER_ADDR_LEN
];
210 struct mii_data vr_mii
; /* MII/media info */
212 pcireg_t vr_id
; /* vendor/product ID */
213 uint8_t vr_revid
; /* Rhine chip revision */
215 callout_t vr_tick_ch
; /* tick callout */
217 bus_dmamap_t vr_cddmamap
; /* control data DMA map */
218 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
221 * Software state for transmit and receive descriptors.
223 struct vr_descsoft vr_txsoft
[VR_NTXDESC
];
224 struct vr_descsoft vr_rxsoft
[VR_NRXDESC
];
227 * Control data structures.
229 struct vr_control_data
*vr_control_data
;
231 int vr_txpending
; /* number of TX requests pending */
232 int vr_txdirty
; /* first dirty TX descriptor */
233 int vr_txlast
; /* last used TX descriptor */
235 int vr_rxptr
; /* next ready RX descriptor */
237 uint32_t vr_save_iobase
;
238 uint32_t vr_save_membase
;
239 uint32_t vr_save_irq
;
242 rndsource_element_t rnd_source
; /* random source */
246 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
247 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
249 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
250 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
252 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
253 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
255 #define VR_CDTXSYNC(sc, x, ops) \
256 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
257 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
259 #define VR_CDRXSYNC(sc, x, ops) \
260 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
261 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
264 * Note we rely on MCLBYTES being a power of two below.
266 #define VR_INIT_RXDESC(sc, i) \
268 struct vr_desc *__d = VR_CDRX((sc), (i)); \
269 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
271 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \
272 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \
273 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
274 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
275 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \
276 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
277 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
278 } while (/* CONSTCOND */ 0)
281 * register space access macros
283 #define CSR_WRITE_4(sc, reg, val) \
284 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
285 #define CSR_WRITE_2(sc, reg, val) \
286 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
287 #define CSR_WRITE_1(sc, reg, val) \
288 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
290 #define CSR_READ_4(sc, reg) \
291 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
292 #define CSR_READ_2(sc, reg) \
293 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
294 #define CSR_READ_1(sc, reg) \
295 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
297 #define VR_TIMEOUT 1000
299 static int vr_add_rxbuf(struct vr_softc
*, int);
301 static void vr_rxeof(struct vr_softc
*);
302 static void vr_rxeoc(struct vr_softc
*);
303 static void vr_txeof(struct vr_softc
*);
304 static int vr_intr(void *);
305 static void vr_start(struct ifnet
*);
306 static int vr_ioctl(struct ifnet
*, u_long
, void *);
307 static int vr_init(struct ifnet
*);
308 static void vr_stop(struct ifnet
*, int);
309 static void vr_rxdrain(struct vr_softc
*);
310 static void vr_watchdog(struct ifnet
*);
311 static void vr_tick(void *);
313 static int vr_mii_readreg(device_t
, int, int);
314 static void vr_mii_writereg(device_t
, int, int, int);
315 static void vr_mii_statchg(device_t
);
317 static void vr_setmulti(struct vr_softc
*);
318 static void vr_reset(struct vr_softc
*);
319 static int vr_restore_state(pci_chipset_tag_t
, pcitag_t
, device_t
,
321 static bool vr_resume(device_t
, pmf_qual_t
);
323 int vr_copy_small
= 0;
325 #define VR_SETBIT(sc, reg, x) \
326 CSR_WRITE_1(sc, reg, \
327 CSR_READ_1(sc, reg) | (x))
329 #define VR_CLRBIT(sc, reg, x) \
330 CSR_WRITE_1(sc, reg, \
331 CSR_READ_1(sc, reg) & ~(x))
333 #define VR_SETBIT16(sc, reg, x) \
334 CSR_WRITE_2(sc, reg, \
335 CSR_READ_2(sc, reg) | (x))
337 #define VR_CLRBIT16(sc, reg, x) \
338 CSR_WRITE_2(sc, reg, \
339 CSR_READ_2(sc, reg) & ~(x))
341 #define VR_SETBIT32(sc, reg, x) \
342 CSR_WRITE_4(sc, reg, \
343 CSR_READ_4(sc, reg) | (x))
345 #define VR_CLRBIT32(sc, reg, x) \
346 CSR_WRITE_4(sc, reg, \
347 CSR_READ_4(sc, reg) & ~(x))
352 static uint32_t vr_mii_bitbang_read(device_t
);
353 static void vr_mii_bitbang_write(device_t
, uint32_t);
355 static const struct mii_bitbang_ops vr_mii_bitbang_ops
= {
357 vr_mii_bitbang_write
,
359 VR_MIICMD_DATAOUT
, /* MII_BIT_MDO */
360 VR_MIICMD_DATAIN
, /* MII_BIT_MDI */
361 VR_MIICMD_CLK
, /* MII_BIT_MDC */
362 VR_MIICMD_DIR
, /* MII_BIT_DIR_HOST_PHY */
363 0, /* MII_BIT_DIR_PHY_HOST */
368 vr_mii_bitbang_read(device_t self
)
370 struct vr_softc
*sc
= device_private(self
);
372 return (CSR_READ_1(sc
, VR_MIICMD
));
376 vr_mii_bitbang_write(device_t self
, uint32_t val
)
378 struct vr_softc
*sc
= device_private(self
);
380 CSR_WRITE_1(sc
, VR_MIICMD
, (val
& 0xff) | VR_MIICMD_DIRECTPGM
);
384 * Read an PHY register through the MII.
387 vr_mii_readreg(device_t self
, int phy
, int reg
)
389 struct vr_softc
*sc
= device_private(self
);
391 CSR_WRITE_1(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
392 return (mii_bitbang_readreg(self
, &vr_mii_bitbang_ops
, phy
, reg
));
396 * Write to a PHY register through the MII.
399 vr_mii_writereg(device_t self
, int phy
, int reg
, int val
)
401 struct vr_softc
*sc
= device_private(self
);
403 CSR_WRITE_1(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
404 mii_bitbang_writereg(self
, &vr_mii_bitbang_ops
, phy
, reg
, val
);
408 vr_mii_statchg(device_t self
)
410 struct vr_softc
*sc
= device_private(self
);
413 * In order to fiddle with the 'full-duplex' bit in the netconfig
414 * register, we first have to put the transmit and/or receive logic
417 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_TX_ON
|VR_CMD_RX_ON
));
419 if (sc
->vr_mii
.mii_media_active
& IFM_FDX
)
420 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
422 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
424 if (sc
->vr_ec
.ec_if
.if_flags
& IFF_RUNNING
)
425 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_ON
|VR_CMD_RX_ON
);
428 #define vr_calchash(addr) \
429 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
432 * Program the 64-bit multicast hash filter.
435 vr_setmulti(struct vr_softc
*sc
)
439 uint32_t hashes
[2] = { 0, 0 };
440 struct ether_multistep step
;
441 struct ether_multi
*enm
;
445 ifp
= &sc
->vr_ec
.ec_if
;
447 rxfilt
= CSR_READ_1(sc
, VR_RXCFG
);
449 if (ifp
->if_flags
& IFF_PROMISC
) {
451 ifp
->if_flags
|= IFF_ALLMULTI
;
452 rxfilt
|= VR_RXCFG_RX_MULTI
;
453 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
454 CSR_WRITE_4(sc
, VR_MAR0
, 0xFFFFFFFF);
455 CSR_WRITE_4(sc
, VR_MAR1
, 0xFFFFFFFF);
459 /* first, zot all the existing hash bits */
460 CSR_WRITE_4(sc
, VR_MAR0
, 0);
461 CSR_WRITE_4(sc
, VR_MAR1
, 0);
463 /* now program new ones */
464 ETHER_FIRST_MULTI(step
, &sc
->vr_ec
, enm
);
465 while (enm
!= NULL
) {
466 if (memcmp(enm
->enm_addrlo
, enm
->enm_addrhi
,
467 ETHER_ADDR_LEN
) != 0)
470 h
= vr_calchash(enm
->enm_addrlo
);
473 hashes
[0] |= (1 << h
);
475 hashes
[1] |= (1 << (h
- 32));
476 ETHER_NEXT_MULTI(step
, enm
);
480 ifp
->if_flags
&= ~IFF_ALLMULTI
;
483 rxfilt
|= VR_RXCFG_RX_MULTI
;
485 rxfilt
&= ~VR_RXCFG_RX_MULTI
;
487 CSR_WRITE_4(sc
, VR_MAR0
, hashes
[0]);
488 CSR_WRITE_4(sc
, VR_MAR1
, hashes
[1]);
489 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
493 vr_reset(struct vr_softc
*sc
)
497 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RESET
);
499 for (i
= 0; i
< VR_TIMEOUT
; i
++) {
501 if (!(CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RESET
))
504 if (i
== VR_TIMEOUT
) {
505 if (sc
->vr_revid
< REV_ID_VT3065_A
) {
506 printf("%s: reset never completed!\n",
507 device_xname(sc
->vr_dev
));
509 /* Use newer force reset command */
510 printf("%s: using force reset command.\n",
511 device_xname(sc
->vr_dev
));
512 VR_SETBIT(sc
, VR_MISC_CR1
, VR_MISCCR1_FORSRST
);
516 /* Wait a little while for the chip to get its brains in order. */
521 * Initialize an RX descriptor and attach an MBUF cluster.
522 * Note: the length fields are only 11 bits wide, which means the
523 * largest size we can specify is 2047. This is important because
524 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
525 * overflow the field and make a mess.
528 vr_add_rxbuf(struct vr_softc
*sc
, int i
)
530 struct vr_descsoft
*ds
= VR_DSRX(sc
, i
);
534 MGETHDR(m_new
, M_DONTWAIT
, MT_DATA
);
538 MCLGET(m_new
, M_DONTWAIT
);
539 if ((m_new
->m_flags
& M_EXT
) == 0) {
544 if (ds
->ds_mbuf
!= NULL
)
545 bus_dmamap_unload(sc
->vr_dmat
, ds
->ds_dmamap
);
549 error
= bus_dmamap_load(sc
->vr_dmat
, ds
->ds_dmamap
,
550 m_new
->m_ext
.ext_buf
, m_new
->m_ext
.ext_size
, NULL
,
551 BUS_DMA_READ
|BUS_DMA_NOWAIT
);
553 aprint_error_dev(sc
->vr_dev
, "unable to load rx DMA map %d, error = %d\n",
555 panic("vr_add_rxbuf"); /* XXX */
558 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
559 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
561 VR_INIT_RXDESC(sc
, i
);
567 * A frame has been uploaded: pass the resulting mbuf chain up to
568 * the higher level protocols.
571 vr_rxeof(struct vr_softc
*sc
)
576 struct vr_descsoft
*ds
;
580 ifp
= &sc
->vr_ec
.ec_if
;
582 for (i
= sc
->vr_rxptr
;; i
= VR_NEXTRX(i
)) {
586 VR_CDRXSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
588 rxstat
= le32toh(d
->vr_status
);
590 if (rxstat
& VR_RXSTAT_OWN
) {
592 * We have processed all of the receive buffers.
598 * If an error occurs, update stats, clear the
599 * status word and leave the mbuf cluster in place:
600 * it should simply get re-used next time this descriptor
601 * comes up in the ring.
603 if (rxstat
& VR_RXSTAT_RXERR
) {
607 switch (rxstat
& 0x000000FF) {
608 case VR_RXSTAT_CRCERR
:
609 errstr
= "crc error";
611 case VR_RXSTAT_FRAMEALIGNERR
:
612 errstr
= "frame alignment error";
614 case VR_RXSTAT_FIFOOFLOW
:
615 errstr
= "FIFO overflow";
617 case VR_RXSTAT_GIANT
:
618 errstr
= "received giant packet";
621 errstr
= "received runt packet";
623 case VR_RXSTAT_BUSERR
:
624 errstr
= "system bus error";
626 case VR_RXSTAT_BUFFERR
:
627 errstr
= "rx buffer error";
630 errstr
= "unknown rx error";
633 printf("%s: receive error: %s\n", device_xname(sc
->vr_dev
),
636 VR_INIT_RXDESC(sc
, i
);
639 } else if (!(rxstat
& VR_RXSTAT_FIRSTFRAG
) ||
640 !(rxstat
& VR_RXSTAT_LASTFRAG
)) {
642 * This driver expects to receive whole packets every
643 * time. In case we receive a fragment that is not
644 * a complete packet, we discard it.
648 printf("%s: receive error: incomplete frame; "
649 "size = %d, status = 0x%x\n",
650 device_xname(sc
->vr_dev
),
651 VR_RXBYTES(le32toh(d
->vr_status
)), rxstat
);
653 VR_INIT_RXDESC(sc
, i
);
658 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
659 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
661 /* No errors; receive the packet. */
662 total_len
= VR_RXBYTES(le32toh(d
->vr_status
));
664 if (total_len
== 0) {
666 * If we receive a zero-length packet, we probably
667 * missed to handle an error condition above.
668 * Discard it to avoid a later crash.
672 printf("%s: receive error: zero-length packet; "
674 device_xname(sc
->vr_dev
), rxstat
);
676 VR_INIT_RXDESC(sc
, i
);
683 * The Rhine chip includes the CRC with every packet.
686 total_len
-= ETHER_CRC_LEN
;
688 #ifdef __NO_STRICT_ALIGNMENT
690 * If the packet is small enough to fit in a
691 * single header mbuf, allocate one and copy
692 * the data into it. This greatly reduces
693 * memory consumption when we receive lots
696 * Otherwise, we add a new buffer to the receive
697 * chain. If this fails, we drop the packet and
698 * recycle the old buffer.
700 if (vr_copy_small
!= 0 && total_len
<= MHLEN
) {
701 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
704 memcpy(mtod(m
, void *),
705 mtod(ds
->ds_mbuf
, void *), total_len
);
706 VR_INIT_RXDESC(sc
, i
);
707 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
708 ds
->ds_dmamap
->dm_mapsize
,
709 BUS_DMASYNC_PREREAD
);
712 if (vr_add_rxbuf(sc
, i
) == ENOBUFS
) {
715 VR_INIT_RXDESC(sc
, i
);
716 bus_dmamap_sync(sc
->vr_dmat
,
718 ds
->ds_dmamap
->dm_mapsize
,
719 BUS_DMASYNC_PREREAD
);
725 * The Rhine's packet buffers must be 4-byte aligned.
726 * But this means that the data after the Ethernet header
727 * is misaligned. We must allocate a new buffer and
728 * copy the data, shifted forward 2 bytes.
730 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
734 VR_INIT_RXDESC(sc
, i
);
735 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
736 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
739 if (total_len
> (MHLEN
- 2)) {
740 MCLGET(m
, M_DONTWAIT
);
741 if ((m
->m_flags
& M_EXT
) == 0) {
749 * Note that we use clusters for incoming frames, so the
750 * buffer is virtually contiguous.
752 memcpy(mtod(m
, void *), mtod(ds
->ds_mbuf
, void *),
755 /* Allow the receive descriptor to continue using its mbuf. */
756 VR_INIT_RXDESC(sc
, i
);
757 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
758 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREREAD
);
759 #endif /* __NO_STRICT_ALIGNMENT */
762 m
->m_pkthdr
.rcvif
= ifp
;
763 m
->m_pkthdr
.len
= m
->m_len
= total_len
;
766 * Handle BPF listeners. Let the BPF user see the packet, but
767 * don't pass it up to the ether_input() layer unless it's
768 * a broadcast packet, multicast packet, matches our ethernet
769 * address or the interface is in promiscuous mode.
772 bpf_mtap(ifp
->if_bpf
, m
);
775 (*ifp
->if_input
)(ifp
, m
);
778 /* Update the receive pointer. */
783 vr_rxeoc(struct vr_softc
*sc
)
788 ifp
= &sc
->vr_ec
.ec_if
;
792 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
793 for (i
= 0; i
< VR_TIMEOUT
; i
++) {
795 if ((CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RX_ON
) == 0)
798 if (i
== VR_TIMEOUT
) {
799 /* XXX need reset? */
800 printf("%s: RX shutdown never complete\n",
801 device_xname(sc
->vr_dev
));
806 CSR_WRITE_4(sc
, VR_RXADDR
, VR_CDRXADDR(sc
, sc
->vr_rxptr
));
807 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
808 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_GO
);
812 * A frame was downloaded to the chip. It's safe for us to clean up
816 vr_txeof(struct vr_softc
*sc
)
818 struct ifnet
*ifp
= &sc
->vr_ec
.ec_if
;
820 struct vr_descsoft
*ds
;
824 ifp
->if_flags
&= ~IFF_OACTIVE
;
827 * Go through our tx list and free mbufs for those
828 * frames that have been transmitted.
830 for (i
= sc
->vr_txdirty
; sc
->vr_txpending
!= 0;
831 i
= VR_NEXTTX(i
), sc
->vr_txpending
--) {
835 VR_CDTXSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
837 txstat
= le32toh(d
->vr_status
);
839 if (txstat
& (VR_TXSTAT_ABRT
| VR_TXSTAT_UDF
)) {
840 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_TX_ON
);
841 for (j
= 0; j
< VR_TIMEOUT
; j
++) {
843 if ((CSR_READ_2(sc
, VR_COMMAND
) &
847 if (j
== VR_TIMEOUT
) {
848 /* XXX need reset? */
849 printf("%s: TX shutdown never complete\n",
850 device_xname(sc
->vr_dev
));
852 d
->vr_status
= htole32(VR_TXSTAT_OWN
);
853 CSR_WRITE_4(sc
, VR_TXADDR
, VR_CDTXADDR(sc
, i
));
857 if (txstat
& VR_TXSTAT_OWN
)
860 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
,
861 0, ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_POSTWRITE
);
862 bus_dmamap_unload(sc
->vr_dmat
, ds
->ds_dmamap
);
863 m_freem(ds
->ds_mbuf
);
866 if (txstat
& VR_TXSTAT_ERRSUM
) {
868 if (txstat
& VR_TXSTAT_DEFER
)
869 ifp
->if_collisions
++;
870 if (txstat
& VR_TXSTAT_LATECOLL
)
871 ifp
->if_collisions
++;
874 ifp
->if_collisions
+= (txstat
& VR_TXSTAT_COLLCNT
) >> 3;
878 /* Update the dirty transmit buffer pointer. */
882 * Cancel the watchdog timer if there are no pending
885 if (sc
->vr_txpending
== 0)
895 int handled
= 0, dotx
= 0;
898 ifp
= &sc
->vr_ec
.ec_if
;
900 /* Suppress unwanted interrupts. */
901 if ((ifp
->if_flags
& IFF_UP
) == 0) {
906 /* Disable interrupts. */
907 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
910 status
= CSR_READ_2(sc
, VR_ISR
);
912 CSR_WRITE_2(sc
, VR_ISR
, status
);
914 if ((status
& VR_INTRS
) == 0)
920 if (RND_ENABLED(&sc
->rnd_source
))
921 rnd_add_uint32(&sc
->rnd_source
, status
);
924 if (status
& VR_ISR_RX_OK
)
927 if (status
& VR_ISR_RX_DROPPED
) {
928 printf("%s: rx packet lost\n", device_xname(sc
->vr_dev
));
933 (VR_ISR_RX_ERR
| VR_ISR_RX_NOBUF
| VR_ISR_RX_OFLOW
))
937 if (status
& (VR_ISR_BUSERR
| VR_ISR_TX_UNDERRUN
)) {
938 if (status
& VR_ISR_BUSERR
)
939 printf("%s: PCI bus error\n",
940 device_xname(sc
->vr_dev
));
941 if (status
& VR_ISR_TX_UNDERRUN
)
942 printf("%s: transmit underrun\n",
943 device_xname(sc
->vr_dev
));
944 /* vr_init() calls vr_start() */
950 if (status
& VR_ISR_TX_OK
) {
956 (VR_ISR_TX_ABRT
| VR_ISR_TX_ABRT2
| VR_ISR_TX_UDFI
)) {
957 if (status
& (VR_ISR_TX_ABRT
| VR_ISR_TX_ABRT2
))
958 printf("%s: transmit aborted\n",
959 device_xname(sc
->vr_dev
));
960 if (status
& VR_ISR_TX_UDFI
)
961 printf("%s: transmit underflow\n",
962 device_xname(sc
->vr_dev
));
966 if (sc
->vr_txpending
) {
967 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_ON
);
968 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_GO
);
973 /* Re-enable interrupts. */
974 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
983 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
984 * to the mbuf data regions directly in the transmit lists. We also save a
985 * copy of the pointers since the transmit list fragment pointers are
986 * physical addresses.
989 vr_start(struct ifnet
*ifp
)
991 struct vr_softc
*sc
= ifp
->if_softc
;
994 struct vr_descsoft
*ds
;
995 int error
, firsttx
, nexttx
, opending
;
998 * Remember the previous txpending and the first transmit
1001 opending
= sc
->vr_txpending
;
1002 firsttx
= VR_NEXTTX(sc
->vr_txlast
);
1005 * Loop through the send queue, setting up transmit descriptors
1006 * until we drain the queue, or use up all available transmit
1009 while (sc
->vr_txpending
< VR_NTXDESC
) {
1011 * Grab a packet off the queue.
1013 IFQ_POLL(&ifp
->if_snd
, m0
);
1019 * Get the next available transmit descriptor.
1021 nexttx
= VR_NEXTTX(sc
->vr_txlast
);
1022 d
= VR_CDTX(sc
, nexttx
);
1023 ds
= VR_DSTX(sc
, nexttx
);
1026 * Load the DMA map. If this fails, the packet didn't
1027 * fit in one DMA segment, and we need to copy. Note,
1028 * the packet must also be aligned.
1029 * if the packet is too small, copy it too, so we're sure
1030 * we have enough room for the pad buffer.
1032 if ((mtod(m0
, uintptr_t) & 3) != 0 ||
1033 m0
->m_pkthdr
.len
< VR_MIN_FRAMELEN
||
1034 bus_dmamap_load_mbuf(sc
->vr_dmat
, ds
->ds_dmamap
, m0
,
1035 BUS_DMA_WRITE
|BUS_DMA_NOWAIT
) != 0) {
1036 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1038 printf("%s: unable to allocate Tx mbuf\n",
1039 device_xname(sc
->vr_dev
));
1042 if (m0
->m_pkthdr
.len
> MHLEN
) {
1043 MCLGET(m
, M_DONTWAIT
);
1044 if ((m
->m_flags
& M_EXT
) == 0) {
1045 printf("%s: unable to allocate Tx "
1046 "cluster\n", device_xname(sc
->vr_dev
));
1051 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
1052 m
->m_pkthdr
.len
= m
->m_len
= m0
->m_pkthdr
.len
;
1054 * The Rhine doesn't auto-pad, so we have to do this
1057 if (m0
->m_pkthdr
.len
< VR_MIN_FRAMELEN
) {
1058 memset(mtod(m
, char *) + m0
->m_pkthdr
.len
,
1059 0, VR_MIN_FRAMELEN
- m0
->m_pkthdr
.len
);
1060 m
->m_pkthdr
.len
= m
->m_len
= VR_MIN_FRAMELEN
;
1062 error
= bus_dmamap_load_mbuf(sc
->vr_dmat
,
1063 ds
->ds_dmamap
, m
, BUS_DMA_WRITE
|BUS_DMA_NOWAIT
);
1066 printf("%s: unable to load Tx buffer, "
1067 "error = %d\n", device_xname(sc
->vr_dev
), error
);
1072 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
1078 /* Sync the DMA map. */
1079 bus_dmamap_sync(sc
->vr_dmat
, ds
->ds_dmamap
, 0,
1080 ds
->ds_dmamap
->dm_mapsize
, BUS_DMASYNC_PREWRITE
);
1083 * Store a pointer to the packet so we can free it later.
1089 * If there's a BPF listener, bounce a copy of this frame
1093 bpf_mtap(ifp
->if_bpf
, m0
);
1097 * Fill in the transmit descriptor.
1099 d
->vr_data
= htole32(ds
->ds_dmamap
->dm_segs
[0].ds_addr
);
1100 d
->vr_ctl
= htole32(m0
->m_pkthdr
.len
);
1101 d
->vr_ctl
|= htole32(VR_TXCTL_FIRSTFRAG
| VR_TXCTL_LASTFRAG
);
1104 * If this is the first descriptor we're enqueuing,
1105 * don't give it to the Rhine yet. That could cause
1106 * a race condition. We'll do it below.
1108 if (nexttx
== firsttx
)
1111 d
->vr_status
= htole32(VR_TXSTAT_OWN
);
1113 VR_CDTXSYNC(sc
, nexttx
,
1114 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1116 /* Advance the tx pointer. */
1118 sc
->vr_txlast
= nexttx
;
1121 if (sc
->vr_txpending
== VR_NTXDESC
) {
1122 /* No more slots left; notify upper layer. */
1123 ifp
->if_flags
|= IFF_OACTIVE
;
1126 if (sc
->vr_txpending
!= opending
) {
1128 * We enqueued packets. If the transmitter was idle,
1129 * reset the txdirty pointer.
1132 sc
->vr_txdirty
= firsttx
;
1135 * Cause a transmit interrupt to happen on the
1136 * last packet we enqueued.
1138 VR_CDTX(sc
, sc
->vr_txlast
)->vr_ctl
|= htole32(VR_TXCTL_FINT
);
1139 VR_CDTXSYNC(sc
, sc
->vr_txlast
,
1140 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1143 * The entire packet chain is set up. Give the
1144 * first descriptor to the Rhine now.
1146 VR_CDTX(sc
, firsttx
)->vr_status
= htole32(VR_TXSTAT_OWN
);
1147 VR_CDTXSYNC(sc
, firsttx
,
1148 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1150 /* Start the transmitter. */
1151 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_GO
);
1153 /* Set the watchdog timer in case the chip flakes out. */
1159 * Initialize the interface. Must be called at splnet.
1162 vr_init(struct ifnet
*ifp
)
1164 struct vr_softc
*sc
= ifp
->if_softc
;
1166 struct vr_descsoft
*ds
;
1169 /* Cancel pending I/O. */
1172 /* Reset the Rhine to a known state. */
1175 /* set DMA length in BCR0 and BCR1 */
1176 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_DMA_LENGTH
);
1177 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_DMA_STORENFWD
);
1179 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_RX_THRESH
);
1180 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_RXTH_128BYTES
);
1182 VR_CLRBIT(sc
, VR_BCR1
, VR_BCR1_TX_THRESH
);
1183 VR_SETBIT(sc
, VR_BCR1
, VR_BCR1_TXTH_STORENFWD
);
1185 /* set DMA threshold length in RXCFG and TXCFG */
1186 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_THRESH
);
1187 VR_SETBIT(sc
, VR_RXCFG
, VR_RXTHRESH_128BYTES
);
1189 VR_CLRBIT(sc
, VR_TXCFG
, VR_TXCFG_TX_THRESH
);
1190 VR_SETBIT(sc
, VR_TXCFG
, VR_TXTHRESH_STORENFWD
);
1193 * Initialize the transmit descriptor ring. txlast is initialized
1194 * to the end of the list so that it will wrap around to the first
1195 * descriptor when the first packet is transmitted.
1197 for (i
= 0; i
< VR_NTXDESC
; i
++) {
1199 memset(d
, 0, sizeof(struct vr_desc
));
1200 d
->vr_next
= htole32(VR_CDTXADDR(sc
, VR_NEXTTX(i
)));
1201 VR_CDTXSYNC(sc
, i
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
1203 sc
->vr_txpending
= 0;
1205 sc
->vr_txlast
= VR_NTXDESC
- 1;
1208 * Initialize the receive descriptor ring.
1210 for (i
= 0; i
< VR_NRXDESC
; i
++) {
1211 ds
= VR_DSRX(sc
, i
);
1212 if (ds
->ds_mbuf
== NULL
) {
1213 if ((error
= vr_add_rxbuf(sc
, i
)) != 0) {
1214 printf("%s: unable to allocate or map rx "
1215 "buffer %d, error = %d\n",
1216 device_xname(sc
->vr_dev
), i
, error
);
1218 * XXX Should attempt to run with fewer receive
1219 * XXX buffers instead of just failing.
1225 VR_INIT_RXDESC(sc
, i
);
1229 /* If we want promiscuous mode, set the allframes bit. */
1230 if (ifp
->if_flags
& IFF_PROMISC
)
1231 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1233 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1235 /* Set capture broadcast bit to capture broadcast frames. */
1236 if (ifp
->if_flags
& IFF_BROADCAST
)
1237 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1239 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1241 /* Program the multicast filter, if necessary. */
1244 /* Give the transmit and receive rings to the Rhine. */
1245 CSR_WRITE_4(sc
, VR_RXADDR
, VR_CDRXADDR(sc
, sc
->vr_rxptr
));
1246 CSR_WRITE_4(sc
, VR_TXADDR
, VR_CDTXADDR(sc
, VR_NEXTTX(sc
->vr_txlast
)));
1248 /* Set current media. */
1249 if ((error
= ether_mediachange(ifp
)) != 0)
1252 /* Enable receiver and transmitter. */
1253 CSR_WRITE_2(sc
, VR_COMMAND
, VR_CMD_TX_NOPOLL
|VR_CMD_START
|
1254 VR_CMD_TX_ON
|VR_CMD_RX_ON
|
1257 /* Enable interrupts. */
1258 CSR_WRITE_2(sc
, VR_ISR
, 0xFFFF);
1259 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1261 ifp
->if_flags
|= IFF_RUNNING
;
1262 ifp
->if_flags
&= ~IFF_OACTIVE
;
1264 /* Start one second timer. */
1265 callout_reset(&sc
->vr_tick_ch
, hz
, vr_tick
, sc
);
1267 /* Attempt to start output on the interface. */
1272 printf("%s: interface not running\n", device_xname(sc
->vr_dev
));
1277 vr_ioctl(struct ifnet
*ifp
, u_long command
, void *data
)
1279 struct vr_softc
*sc
= ifp
->if_softc
;
1284 error
= ether_ioctl(ifp
, command
, data
);
1285 if (error
== ENETRESET
) {
1287 * Multicast list has changed; set the hardware filter
1290 if (ifp
->if_flags
& IFF_RUNNING
)
1300 vr_watchdog(struct ifnet
*ifp
)
1302 struct vr_softc
*sc
= ifp
->if_softc
;
1304 printf("%s: device timeout\n", device_xname(sc
->vr_dev
));
1307 (void) vr_init(ifp
);
1311 * One second timer, used to tick MII.
1316 struct vr_softc
*sc
= arg
;
1320 mii_tick(&sc
->vr_mii
);
1323 callout_reset(&sc
->vr_tick_ch
, hz
, vr_tick
, sc
);
1327 * Drain the receive queue.
1330 vr_rxdrain(struct vr_softc
*sc
)
1332 struct vr_descsoft
*ds
;
1335 for (i
= 0; i
< VR_NRXDESC
; i
++) {
1336 ds
= VR_DSRX(sc
, i
);
1337 if (ds
->ds_mbuf
!= NULL
) {
1338 bus_dmamap_unload(sc
->vr_dmat
, ds
->ds_dmamap
);
1339 m_freem(ds
->ds_mbuf
);
1346 * Stop the adapter and free any mbufs allocated to the
1350 vr_stop(struct ifnet
*ifp
, int disable
)
1352 struct vr_softc
*sc
= ifp
->if_softc
;
1353 struct vr_descsoft
*ds
;
1356 /* Cancel one second timer. */
1357 callout_stop(&sc
->vr_tick_ch
);
1360 mii_down(&sc
->vr_mii
);
1362 ifp
= &sc
->vr_ec
.ec_if
;
1365 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_STOP
);
1366 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_RX_ON
|VR_CMD_TX_ON
));
1367 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1368 CSR_WRITE_4(sc
, VR_TXADDR
, 0x00000000);
1369 CSR_WRITE_4(sc
, VR_RXADDR
, 0x00000000);
1372 * Release any queued transmit buffers.
1374 for (i
= 0; i
< VR_NTXDESC
; i
++) {
1375 ds
= VR_DSTX(sc
, i
);
1376 if (ds
->ds_mbuf
!= NULL
) {
1377 bus_dmamap_unload(sc
->vr_dmat
, ds
->ds_dmamap
);
1378 m_freem(ds
->ds_mbuf
);
1384 * Mark the interface down and cancel the watchdog timer.
1386 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1393 static int vr_probe(device_t
, cfdata_t
, void *);
1394 static void vr_attach(device_t
, device_t
, void *);
1395 static bool vr_shutdown(device_t
, int);
1397 CFATTACH_DECL_NEW(vr
, sizeof (struct vr_softc
),
1398 vr_probe
, vr_attach
, NULL
, NULL
);
1400 static const struct vr_type
*
1401 vr_lookup(struct pci_attach_args
*pa
)
1403 const struct vr_type
*vrt
;
1406 for (i
= 0; i
< __arraycount(vr_devs
); i
++) {
1408 if (PCI_VENDOR(pa
->pa_id
) == vrt
->vr_vid
&&
1409 PCI_PRODUCT(pa
->pa_id
) == vrt
->vr_did
)
1416 vr_probe(device_t parent
, cfdata_t match
, void *aux
)
1418 struct pci_attach_args
*pa
= (struct pci_attach_args
*)aux
;
1420 if (vr_lookup(pa
) != NULL
)
1427 * Stop all chip I/O so that the kernel's probe routines don't
1428 * get confused by errant DMAs when rebooting.
1431 vr_shutdown(device_t self
, int howto
)
1433 struct vr_softc
*sc
= device_private(self
);
1435 vr_stop(&sc
->vr_ec
.ec_if
, 1);
1441 * Attach the interface. Allocate softc structures, do ifmedia
1442 * setup and ethernet/BPF attach.
1445 vr_attach(device_t parent
, device_t self
, void *aux
)
1447 struct vr_softc
*sc
= device_private(self
);
1448 struct pci_attach_args
*pa
= (struct pci_attach_args
*) aux
;
1449 bus_dma_segment_t seg
;
1452 uint8_t eaddr
[ETHER_ADDR_LEN
], mac
;
1456 #define PCI_CONF_WRITE(r, v) pci_conf_write(sc->vr_pc, sc->vr_tag, (r), (v))
1457 #define PCI_CONF_READ(r) pci_conf_read(sc->vr_pc, sc->vr_tag, (r))
1460 sc
->vr_pc
= pa
->pa_pc
;
1461 sc
->vr_tag
= pa
->pa_tag
;
1462 sc
->vr_id
= pa
->pa_id
;
1463 callout_init(&sc
->vr_tick_ch
, 0);
1465 pci_devinfo(pa
->pa_id
, pa
->pa_class
, 0, devinfo
, sizeof(devinfo
));
1467 aprint_normal(": %s (rev. 0x%02x)\n", devinfo
,
1468 PCI_REVISION(pa
->pa_class
));
1471 * Handle power management nonsense.
1474 sc
->vr_save_iobase
= PCI_CONF_READ(VR_PCI_LOIO
);
1475 sc
->vr_save_membase
= PCI_CONF_READ(VR_PCI_LOMEM
);
1476 sc
->vr_save_irq
= PCI_CONF_READ(PCI_INTERRUPT_REG
);
1479 if ((error
= pci_activate(pa
->pa_pc
, pa
->pa_tag
, self
,
1480 vr_restore_state
)) && error
!= EOPNOTSUPP
) {
1481 aprint_error_dev(self
, "cannot activate %d\n",
1486 /* Make sure bus mastering is enabled. */
1487 reg
= PCI_CONF_READ(PCI_COMMAND_STATUS_REG
);
1488 reg
|= PCI_COMMAND_MASTER_ENABLE
;
1489 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG
, reg
);
1492 sc
->vr_revid
= PCI_REVISION(pa
->pa_class
);
1495 * Map control/status registers.
1498 bus_space_tag_t iot
, memt
;
1499 bus_space_handle_t ioh
, memh
;
1500 int ioh_valid
, memh_valid
;
1501 pci_intr_handle_t intrhandle
;
1502 const char *intrstr
;
1504 ioh_valid
= (pci_mapreg_map(pa
, VR_PCI_LOIO
,
1505 PCI_MAPREG_TYPE_IO
, 0,
1506 &iot
, &ioh
, NULL
, NULL
) == 0);
1507 memh_valid
= (pci_mapreg_map(pa
, VR_PCI_LOMEM
,
1508 PCI_MAPREG_TYPE_MEM
|
1509 PCI_MAPREG_MEM_TYPE_32BIT
,
1510 0, &memt
, &memh
, NULL
, NULL
) == 0);
1511 #if defined(VR_USEIOSPACE)
1515 } else if (memh_valid
) {
1523 } else if (ioh_valid
) {
1529 printf(": unable to map device registers\n");
1533 /* Allocate interrupt */
1534 if (pci_intr_map(pa
, &intrhandle
)) {
1535 aprint_error_dev(self
, "couldn't map interrupt\n");
1538 intrstr
= pci_intr_string(pa
->pa_pc
, intrhandle
);
1539 sc
->vr_ih
= pci_intr_establish(pa
->pa_pc
, intrhandle
, IPL_NET
,
1541 if (sc
->vr_ih
== NULL
) {
1542 aprint_error_dev(self
, "couldn't establish interrupt");
1543 if (intrstr
!= NULL
)
1544 aprint_error(" at %s", intrstr
);
1547 aprint_normal_dev(self
, "interrupting at %s\n", intrstr
);
1551 * Windows may put the chip in suspend mode when it
1552 * shuts down. Be sure to kick it in the head to wake it
1555 * Don't touch this register on VT3043 since it causes
1556 * kernel MCHK trap on macppc.
1557 * (Note some VT86C100A chip returns a product ID of VT3043)
1559 if (PCI_PRODUCT(pa
->pa_id
) != PCI_PRODUCT_VIATECH_VT3043
)
1560 VR_CLRBIT(sc
, VR_STICKHW
, (VR_STICKHW_DS0
|VR_STICKHW_DS1
));
1562 /* Reset the adapter. */
1566 * Get station address. The way the Rhine chips work,
1567 * you're not allowed to directly access the EEPROM once
1568 * they've been programmed a special way. Consequently,
1569 * we need to read the node address from the PAR0 and PAR1
1572 * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload
1573 * of the *whole* EEPROM, not just the MAC address. This is
1574 * pretty pointless since the chip does this automatically
1576 * I suspect the same thing applies to the other Rhine
1577 * variants, but in the absence of a data sheet for those
1578 * (and the lack of anyone else noticing the problems this
1579 * causes) I'm going to retain the old behaviour for the
1581 * In some cases, the chip really does startup without having
1582 * read the EEPROM (kern/34812). To handle this case, we force
1583 * a reload if we see an all-zeroes MAC address.
1585 for (mac
= 0, i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1586 mac
|= (eaddr
[i
] = CSR_READ_1(sc
, VR_PAR0
+ i
));
1588 if (mac
== 0 || (PCI_PRODUCT(pa
->pa_id
) != PCI_PRODUCT_VIATECH_VT6105
&&
1589 PCI_PRODUCT(pa
->pa_id
) != PCI_PRODUCT_VIATECH_VT6102
)) {
1590 VR_SETBIT(sc
, VR_EECSR
, VR_EECSR_LOAD
);
1592 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1593 eaddr
[i
] = CSR_READ_1(sc
, VR_PAR0
+ i
);
1597 * A Rhine chip was detected. Inform the world.
1599 printf("%s: Ethernet address: %s\n",
1600 device_xname(self
), ether_sprintf(eaddr
));
1602 memcpy(sc
->vr_enaddr
, eaddr
, ETHER_ADDR_LEN
);
1604 sc
->vr_dmat
= pa
->pa_dmat
;
1607 * Allocate the control data structures, and create and load
1608 * the DMA map for it.
1610 if ((error
= bus_dmamem_alloc(sc
->vr_dmat
,
1611 sizeof(struct vr_control_data
), PAGE_SIZE
, 0, &seg
, 1, &rseg
,
1613 aprint_error_dev(self
, "unable to allocate control data, error = %d\n", error
);
1617 if ((error
= bus_dmamem_map(sc
->vr_dmat
, &seg
, rseg
,
1618 sizeof(struct vr_control_data
), (void **)&sc
->vr_control_data
,
1619 BUS_DMA_COHERENT
)) != 0) {
1620 aprint_error_dev(self
, "unable to map control data, error = %d\n", error
);
1624 if ((error
= bus_dmamap_create(sc
->vr_dmat
,
1625 sizeof(struct vr_control_data
), 1,
1626 sizeof(struct vr_control_data
), 0, 0,
1627 &sc
->vr_cddmamap
)) != 0) {
1628 aprint_error_dev(self
, "unable to create control data DMA map, "
1629 "error = %d\n", error
);
1633 if ((error
= bus_dmamap_load(sc
->vr_dmat
, sc
->vr_cddmamap
,
1634 sc
->vr_control_data
, sizeof(struct vr_control_data
), NULL
,
1636 aprint_error_dev(self
, "unable to load control data DMA map, error = %d\n",
1642 * Create the transmit buffer DMA maps.
1644 for (i
= 0; i
< VR_NTXDESC
; i
++) {
1645 if ((error
= bus_dmamap_create(sc
->vr_dmat
, MCLBYTES
,
1647 &VR_DSTX(sc
, i
)->ds_dmamap
)) != 0) {
1648 aprint_error_dev(self
, "unable to create tx DMA map %d, "
1649 "error = %d\n", i
, error
);
1655 * Create the receive buffer DMA maps.
1657 for (i
= 0; i
< VR_NRXDESC
; i
++) {
1658 if ((error
= bus_dmamap_create(sc
->vr_dmat
, MCLBYTES
, 1,
1660 &VR_DSRX(sc
, i
)->ds_dmamap
)) != 0) {
1661 aprint_error_dev(self
, "unable to create rx DMA map %d, "
1662 "error = %d\n", i
, error
);
1665 VR_DSRX(sc
, i
)->ds_mbuf
= NULL
;
1668 ifp
= &sc
->vr_ec
.ec_if
;
1670 ifp
->if_mtu
= ETHERMTU
;
1671 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1672 ifp
->if_ioctl
= vr_ioctl
;
1673 ifp
->if_start
= vr_start
;
1674 ifp
->if_watchdog
= vr_watchdog
;
1675 ifp
->if_init
= vr_init
;
1676 ifp
->if_stop
= vr_stop
;
1677 IFQ_SET_READY(&ifp
->if_snd
);
1679 strlcpy(ifp
->if_xname
, device_xname(self
), IFNAMSIZ
);
1682 * Initialize MII/media info.
1684 sc
->vr_mii
.mii_ifp
= ifp
;
1685 sc
->vr_mii
.mii_readreg
= vr_mii_readreg
;
1686 sc
->vr_mii
.mii_writereg
= vr_mii_writereg
;
1687 sc
->vr_mii
.mii_statchg
= vr_mii_statchg
;
1689 sc
->vr_ec
.ec_mii
= &sc
->vr_mii
;
1690 ifmedia_init(&sc
->vr_mii
.mii_media
, IFM_IMASK
, ether_mediachange
,
1692 mii_attach(self
, &sc
->vr_mii
, 0xffffffff, MII_PHY_ANY
,
1693 MII_OFFSET_ANY
, MIIF_FORCEANEG
);
1694 if (LIST_FIRST(&sc
->vr_mii
.mii_phys
) == NULL
) {
1695 ifmedia_add(&sc
->vr_mii
.mii_media
, IFM_ETHER
|IFM_NONE
, 0, NULL
);
1696 ifmedia_set(&sc
->vr_mii
.mii_media
, IFM_ETHER
|IFM_NONE
);
1698 ifmedia_set(&sc
->vr_mii
.mii_media
, IFM_ETHER
|IFM_AUTO
);
1701 * Call MI attach routines.
1704 ether_ifattach(ifp
, sc
->vr_enaddr
);
1706 rnd_attach_source(&sc
->rnd_source
, device_xname(self
),
1710 if (pmf_device_register1(self
, NULL
, vr_resume
, vr_shutdown
))
1711 pmf_class_network_register(self
, ifp
);
1713 aprint_error_dev(self
, "couldn't establish power handler\n");
1718 for (i
= 0; i
< VR_NRXDESC
; i
++) {
1719 if (sc
->vr_rxsoft
[i
].ds_dmamap
!= NULL
)
1720 bus_dmamap_destroy(sc
->vr_dmat
,
1721 sc
->vr_rxsoft
[i
].ds_dmamap
);
1724 for (i
= 0; i
< VR_NTXDESC
; i
++) {
1725 if (sc
->vr_txsoft
[i
].ds_dmamap
!= NULL
)
1726 bus_dmamap_destroy(sc
->vr_dmat
,
1727 sc
->vr_txsoft
[i
].ds_dmamap
);
1729 bus_dmamap_unload(sc
->vr_dmat
, sc
->vr_cddmamap
);
1731 bus_dmamap_destroy(sc
->vr_dmat
, sc
->vr_cddmamap
);
1733 bus_dmamem_unmap(sc
->vr_dmat
, (void *)sc
->vr_control_data
,
1734 sizeof(struct vr_control_data
));
1736 bus_dmamem_free(sc
->vr_dmat
, &seg
, rseg
);
1742 vr_restore_state(pci_chipset_tag_t pc
, pcitag_t tag
, device_t self
,
1745 struct vr_softc
*sc
= device_private(self
);
1748 if (state
== PCI_PMCSR_STATE_D0
)
1750 if ((error
= pci_set_powerstate(pc
, tag
, PCI_PMCSR_STATE_D0
)))
1753 /* Restore PCI config data. */
1754 PCI_CONF_WRITE(VR_PCI_LOIO
, sc
->vr_save_iobase
);
1755 PCI_CONF_WRITE(VR_PCI_LOMEM
, sc
->vr_save_membase
);
1756 PCI_CONF_WRITE(PCI_INTERRUPT_REG
, sc
->vr_save_irq
);
1761 vr_resume(device_t self
, pmf_qual_t qual
)
1763 struct vr_softc
*sc
= device_private(self
);
1765 if (PCI_PRODUCT(sc
->vr_id
) != PCI_PRODUCT_VIATECH_VT3043
)
1766 VR_CLRBIT(sc
, VR_STICKHW
, (VR_STICKHW_DS0
|VR_STICKHW_DS1
));