1 /* $NetBSD: if_sq.c,v 1.33 2007/03/04 06:00:39 christos Exp $ */
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.33 2007/03/04 06:00:39 christos Exp $");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
52 #include <uvm/uvm_extern.h>
54 #include <machine/endian.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67 #include <machine/sysconf.h>
69 #include <dev/ic/seeq8003reg.h>
71 #include <sgimips/hpc/sqvar.h>
72 #include <sgimips/hpc/hpcvar.h>
73 #include <sgimips/hpc/hpcreg.h>
75 #include <dev/arcbios/arcbios.h>
76 #include <dev/arcbios/arcbiosvar.h>
82 * (1) Do counters for bad-RX packets.
83 * (2) Allow multi-segment transmits, instead of copying to a single,
85 * (3) Verify sq_stop() turns off enough stuff; I was still getting
86 * seeq interrupts after sq_stop().
87 * (4) Implement EDLC modes: especially packet auto-pad and simplex
89 * (5) Should the driver filter out its own transmissions in non-EDLC
91 * (6) Multicast support -- multicast filter, address management, ...
92 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
93 * to figure out if RB0 is read-only as stated in one spot in the
94 * HPC spec or read-write (ie, is the 'write a one to clear it')
100 #define SQ_DPRINTF(x) if (sq_debug) printf x
102 #define SQ_DPRINTF(x)
105 static int sq_match(struct device
*, struct cfdata
*, void *);
106 static void sq_attach(struct device
*, struct device
*, void *);
107 static int sq_init(struct ifnet
*);
108 static void sq_start(struct ifnet
*);
109 static void sq_stop(struct ifnet
*, int);
110 static void sq_watchdog(struct ifnet
*);
111 static int sq_ioctl(struct ifnet
*, u_long
, void *);
113 static void sq_set_filter(struct sq_softc
*);
114 static int sq_intr(void *);
115 static int sq_rxintr(struct sq_softc
*);
116 static int sq_txintr(struct sq_softc
*);
117 static void sq_txring_hpc1(struct sq_softc
*);
118 static void sq_txring_hpc3(struct sq_softc
*);
119 static void sq_reset(struct sq_softc
*);
120 static int sq_add_rxbuf(struct sq_softc
*, int);
121 static void sq_dump_buffer(paddr_t addr
, psize_t len
);
122 static void sq_trace_dump(struct sq_softc
*);
124 static void enaddr_aton(const char*, u_int8_t
*);
126 CFATTACH_DECL(sq
, sizeof(struct sq_softc
),
127 sq_match
, sq_attach
, NULL
, NULL
);
129 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
131 #define sq_seeq_read(sc, off) \
132 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
133 #define sq_seeq_write(sc, off, val) \
134 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
136 #define sq_hpc_read(sc, off) \
137 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
138 #define sq_hpc_write(sc, off, val) \
139 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
141 /* MAC address offset for non-onboard implementations */
142 #define SQ_HPC_EEPROM_ENADDR 250
144 #define SGI_OUI_0 0x08
145 #define SGI_OUI_1 0x00
146 #define SGI_OUI_2 0x69
149 sq_match(struct device
*parent
, struct cfdata
*cf
, void *aux
)
151 struct hpc_attach_args
*ha
= aux
;
153 if (strcmp(ha
->ha_name
, cf
->cf_name
) == 0) {
154 vaddr_t reset
, txstat
;
156 reset
= MIPS_PHYS_TO_KSEG1(ha
->ha_sh
+
157 ha
->ha_dmaoff
+ ha
->hpc_regs
->enetr_reset
);
158 txstat
= MIPS_PHYS_TO_KSEG1(ha
->ha_sh
+
159 ha
->ha_devoff
+ (SEEQ_TXSTAT
<< 2));
161 if (platform
.badaddr((void *)reset
, sizeof(reset
)))
164 *(volatile uint32_t *)reset
= 0x1;
166 *(volatile uint32_t *)reset
= 0x0;
168 if (platform
.badaddr((void *)txstat
, sizeof(txstat
)))
171 if ((*(volatile uint32_t *)txstat
& 0xff) == TXSTAT_OLDNEW
)
179 sq_attach(struct device
*parent
, struct device
*self
, void *aux
)
183 struct sq_softc
*sc
= (void *)self
;
184 struct hpc_attach_args
*haa
= aux
;
185 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
187 sc
->sc_hpct
= haa
->ha_st
;
188 sc
->hpc_regs
= haa
->hpc_regs
; /* HPC register definitions */
190 if ((err
= bus_space_subregion(haa
->ha_st
, haa
->ha_sh
,
192 sc
->hpc_regs
->enet_regs_size
,
193 &sc
->sc_hpch
)) != 0) {
194 printf(": unable to map HPC DMA registers, error = %d\n", err
);
198 sc
->sc_regt
= haa
->ha_st
;
199 if ((err
= bus_space_subregion(haa
->ha_st
, haa
->ha_sh
,
201 sc
->hpc_regs
->enet_devregs_size
,
202 &sc
->sc_regh
)) != 0) {
203 printf(": unable to map Seeq registers, error = %d\n", err
);
207 sc
->sc_dmat
= haa
->ha_dmat
;
209 if ((err
= bus_dmamem_alloc(sc
->sc_dmat
, sizeof(struct sq_control
),
210 PAGE_SIZE
, PAGE_SIZE
, &sc
->sc_cdseg
,
211 1, &sc
->sc_ncdseg
, BUS_DMA_NOWAIT
)) != 0) {
212 printf(": unable to allocate control data, error = %d\n", err
);
216 if ((err
= bus_dmamem_map(sc
->sc_dmat
, &sc
->sc_cdseg
, sc
->sc_ncdseg
,
217 sizeof(struct sq_control
),
218 (void **)&sc
->sc_control
,
219 BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
)) != 0) {
220 printf(": unable to map control data, error = %d\n", err
);
224 if ((err
= bus_dmamap_create(sc
->sc_dmat
, sizeof(struct sq_control
),
225 1, sizeof(struct sq_control
), PAGE_SIZE
,
226 BUS_DMA_NOWAIT
, &sc
->sc_cdmap
)) != 0) {
227 printf(": unable to create DMA map for control data, error "
232 if ((err
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_cdmap
, sc
->sc_control
,
233 sizeof(struct sq_control
),
234 NULL
, BUS_DMA_NOWAIT
)) != 0) {
235 printf(": unable to load DMA map for control data, error "
240 memset(sc
->sc_control
, 0, sizeof(struct sq_control
));
242 /* Create transmit buffer DMA maps */
243 for (i
= 0; i
< SQ_NTXDESC
; i
++) {
244 if ((err
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1, MCLBYTES
,
246 &sc
->sc_txmap
[i
])) != 0) {
247 printf(": unable to create tx DMA map %d, error = %d\n",
253 /* Create receive buffer DMA maps */
254 for (i
= 0; i
< SQ_NRXDESC
; i
++) {
255 if ((err
= bus_dmamap_create(sc
->sc_dmat
, MCLBYTES
, 1, MCLBYTES
,
257 &sc
->sc_rxmap
[i
])) != 0) {
258 printf(": unable to create rx DMA map %d, error = %d\n",
264 /* Pre-allocate the receive buffers. */
265 for (i
= 0; i
< SQ_NRXDESC
; i
++) {
266 if ((err
= sq_add_rxbuf(sc
, i
)) != 0) {
267 printf(": unable to allocate or map rx buffer %d\n,"
268 " error = %d\n", i
, err
);
273 memcpy(sc
->sc_enaddr
, &haa
->hpc_eeprom
[SQ_HPC_EEPROM_ENADDR
],
277 * If our mac address is bogus, obtain it from ARCBIOS. This will
278 * be true of the onboard HPC3 on IP22, since there is no eeprom,
279 * but rather the DS1386 RTC's battery-backed ram is used.
281 if (sc
->sc_enaddr
[0] != SGI_OUI_0
|| sc
->sc_enaddr
[1] != SGI_OUI_1
||
282 sc
->sc_enaddr
[2] != SGI_OUI_2
) {
283 macaddr
= ARCBIOS
->GetEnvironmentVariable("eaddr");
284 if (macaddr
== NULL
) {
285 printf(": unable to get MAC address!\n");
288 enaddr_aton(macaddr
, sc
->sc_enaddr
);
291 evcnt_attach_dynamic(&sc
->sq_intrcnt
, EVCNT_TYPE_INTR
, NULL
,
292 self
->dv_xname
, "intr");
294 if ((cpu_intr_establish(haa
->ha_irq
, IPL_NET
, sq_intr
, sc
)) == NULL
) {
295 printf(": unable to establish interrupt!\n");
299 /* Reset the chip to a known state. */
303 * Determine if we're an 8003 or 80c03 by setting the first
304 * MAC address register to non-zero, and then reading it back.
305 * If it's zero, we have an 80c03, because we will have read
306 * the TxCollLSB register.
308 sq_seeq_write(sc
, SEEQ_TXCOLLS0
, 0xa5);
309 if (sq_seeq_read(sc
, SEEQ_TXCOLLS0
) == 0)
310 sc
->sc_type
= SQ_TYPE_80C03
;
312 sc
->sc_type
= SQ_TYPE_8003
;
313 sq_seeq_write(sc
, SEEQ_TXCOLLS0
, 0x00);
315 printf(": SGI Seeq %s\n",
316 sc
->sc_type
== SQ_TYPE_80C03
? "80c03" : "8003");
318 printf("%s: Ethernet address %s\n", sc
->sc_dev
.dv_xname
,
319 ether_sprintf(sc
->sc_enaddr
));
321 strcpy(ifp
->if_xname
, sc
->sc_dev
.dv_xname
);
323 ifp
->if_mtu
= ETHERMTU
;
324 ifp
->if_init
= sq_init
;
325 ifp
->if_stop
= sq_stop
;
326 ifp
->if_start
= sq_start
;
327 ifp
->if_ioctl
= sq_ioctl
;
328 ifp
->if_watchdog
= sq_watchdog
;
329 ifp
->if_flags
= IFF_BROADCAST
| IFF_NOTRAILERS
| IFF_MULTICAST
;
330 IFQ_SET_READY(&ifp
->if_snd
);
333 ether_ifattach(ifp
, sc
->sc_enaddr
);
335 memset(&sc
->sq_trace
, 0, sizeof(sc
->sq_trace
));
340 * Free any resources we've allocated during the failed attach
341 * attempt. Do this in reverse order and fall through.
344 for (i
= 0; i
< SQ_NRXDESC
; i
++) {
345 if (sc
->sc_rxmbuf
[i
] != NULL
) {
346 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rxmap
[i
]);
347 m_freem(sc
->sc_rxmbuf
[i
]);
351 for (i
= 0; i
< SQ_NRXDESC
; i
++) {
352 if (sc
->sc_rxmap
[i
] != NULL
)
353 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_rxmap
[i
]);
356 for (i
= 0; i
< SQ_NTXDESC
; i
++) {
357 if (sc
->sc_txmap
[i
] != NULL
)
358 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_txmap
[i
]);
360 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_cdmap
);
362 bus_dmamap_destroy(sc
->sc_dmat
, sc
->sc_cdmap
);
364 bus_dmamem_unmap(sc
->sc_dmat
, (void *) sc
->sc_control
,
365 sizeof(struct sq_control
));
367 bus_dmamem_free(sc
->sc_dmat
, &sc
->sc_cdseg
, sc
->sc_ncdseg
);
372 /* Set up data to get the interface up and running. */
374 sq_init(struct ifnet
*ifp
)
377 struct sq_softc
*sc
= ifp
->if_softc
;
379 /* Cancel any in-progress I/O */
384 sc
->sc_nfreetx
= SQ_NTXDESC
;
385 sc
->sc_nexttx
= sc
->sc_prevtx
= 0;
387 SQ_TRACE(SQ_RESET
, sc
, 0, 0);
389 /* Set into 8003 mode, bank 0 to program ethernet address */
390 sq_seeq_write(sc
, SEEQ_TXCMD
, TXCMD_BANK0
);
392 /* Now write the address */
393 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
394 sq_seeq_write(sc
, i
, sc
->sc_enaddr
[i
]);
396 sc
->sc_rxcmd
= RXCMD_IE_CRC
|
403 * Set the receive filter -- this will add some bits to the
404 * prototype RXCMD register. Do this before setting the
405 * transmit config register, since we might need to switch
410 /* Set up Seeq transmit command register */
411 sq_seeq_write(sc
, SEEQ_TXCMD
, TXCMD_IE_UFLOW
|
416 /* Now write the receive command register. */
417 sq_seeq_write(sc
, SEEQ_RXCMD
, sc
->sc_rxcmd
);
420 * Set up HPC ethernet PIO and DMA configurations.
422 * The PROM appears to do most of this for the onboard HPC3, but
423 * not for the Challenge S's IOPLUS chip. We copy how the onboard
424 * chip is configured and assume that it's correct for both.
426 if (sc
->hpc_regs
->revision
== 3) {
427 u_int32_t dmareg
, pioreg
;
429 pioreg
= HPC3_ENETR_PIOCFG_P1(1) |
430 HPC3_ENETR_PIOCFG_P2(6) |
431 HPC3_ENETR_PIOCFG_P3(1);
433 dmareg
= HPC3_ENETR_DMACFG_D1(6) |
434 HPC3_ENETR_DMACFG_D2(2) |
435 HPC3_ENETR_DMACFG_D3(0) |
436 HPC3_ENETR_DMACFG_FIX_RXDC
|
437 HPC3_ENETR_DMACFG_FIX_INTR
|
438 HPC3_ENETR_DMACFG_FIX_EOP
|
439 HPC3_ENETR_DMACFG_TIMEOUT
;
441 sq_hpc_write(sc
, HPC3_ENETR_PIOCFG
, pioreg
);
442 sq_hpc_write(sc
, HPC3_ENETR_DMACFG
, dmareg
);
445 /* Pass the start of the receive ring to the HPC */
446 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_ndbp
, SQ_CDRXADDR(sc
, 0));
448 /* And turn on the HPC ethernet receive channel */
449 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_ctl
,
450 sc
->hpc_regs
->enetr_ctl_active
);
453 * Turn off delayed receive interrupts on HPC1.
454 * (see Hollywood HPC Specification 2.1.4.3)
456 if (sc
->hpc_regs
->revision
!= 3)
457 sq_hpc_write(sc
, HPC1_ENET_INTDELAY
, HPC1_ENET_INTDELAY_OFF
);
459 ifp
->if_flags
|= IFF_RUNNING
;
460 ifp
->if_flags
&= ~IFF_OACTIVE
;
466 sq_set_filter(struct sq_softc
*sc
)
468 struct ethercom
*ec
= &sc
->sc_ethercom
;
469 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
470 struct ether_multi
*enm
;
471 struct ether_multistep step
;
474 * Check for promiscuous mode. Also implies
477 if (ifp
->if_flags
& IFF_PROMISC
) {
478 sc
->sc_rxcmd
|= RXCMD_REC_ALL
;
479 ifp
->if_flags
|= IFF_ALLMULTI
;
484 * The 8003 has no hash table. If we have any multicast
485 * addresses on the list, enable reception of all multicast
488 * XXX The 80c03 has a hash table. We should use it.
491 ETHER_FIRST_MULTI(step
, ec
, enm
);
494 sc
->sc_rxcmd
&= ~RXCMD_REC_MASK
;
495 sc
->sc_rxcmd
|= RXCMD_REC_BROAD
;
497 ifp
->if_flags
&= ~IFF_ALLMULTI
;
501 sc
->sc_rxcmd
|= RXCMD_REC_MULTI
;
502 ifp
->if_flags
|= IFF_ALLMULTI
;
506 sq_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
510 SQ_TRACE(SQ_IOCTL
, (struct sq_softc
*)ifp
->if_softc
, 0, 0);
514 error
= ether_ioctl(ifp
, cmd
, data
);
515 if (error
== ENETRESET
) {
517 * Multicast list has changed; set the hardware filter
520 if (ifp
->if_flags
& IFF_RUNNING
)
521 error
= sq_init(ifp
);
531 sq_start(struct ifnet
*ifp
)
533 struct sq_softc
*sc
= ifp
->if_softc
;
537 int err
, totlen
, nexttx
, firsttx
, lasttx
= -1, ofree
, seg
;
539 if ((ifp
->if_flags
& (IFF_RUNNING
|IFF_OACTIVE
)) != IFF_RUNNING
)
543 * Remember the previous number of free descriptors and
544 * the first descriptor we'll use.
546 ofree
= sc
->sc_nfreetx
;
547 firsttx
= sc
->sc_nexttx
;
550 * Loop through the send queue, setting up transmit descriptors
551 * until we drain the queue, or use up all available transmit
554 while (sc
->sc_nfreetx
!= 0) {
556 * Grab a packet off the queue.
558 IFQ_POLL(&ifp
->if_snd
, m0
);
563 dmamap
= sc
->sc_txmap
[sc
->sc_nexttx
];
566 * Load the DMA map. If this fails, the packet either
567 * didn't fit in the alloted number of segments, or we were
568 * short on resources. In this case, we'll copy and try
570 * Also copy it if we need to pad, so that we are sure there
571 * is room for the pad buffer.
572 * XXX the right way of doing this is to use a static buffer
573 * for padding and adding it to the transmit descriptor (see
574 * sys/dev/pci/if_tl.c for example). We can't do this here yet
575 * because we can't send packets with more than one fragment.
577 if (m0
->m_pkthdr
.len
< ETHER_PAD_LEN
||
578 bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
, m0
,
579 BUS_DMA_NOWAIT
) != 0) {
580 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
582 printf("%s: unable to allocate Tx mbuf\n",
583 sc
->sc_dev
.dv_xname
);
586 if (m0
->m_pkthdr
.len
> MHLEN
) {
587 MCLGET(m
, M_DONTWAIT
);
588 if ((m
->m_flags
& M_EXT
) == 0) {
589 printf("%s: unable to allocate Tx "
590 "cluster\n", sc
->sc_dev
.dv_xname
);
596 m_copydata(m0
, 0, m0
->m_pkthdr
.len
, mtod(m
, void *));
597 if (m0
->m_pkthdr
.len
< ETHER_PAD_LEN
) {
598 memset(mtod(m
, char *) + m0
->m_pkthdr
.len
, 0,
599 ETHER_PAD_LEN
- m0
->m_pkthdr
.len
);
600 m
->m_pkthdr
.len
= m
->m_len
= ETHER_PAD_LEN
;
602 m
->m_pkthdr
.len
= m
->m_len
= m0
->m_pkthdr
.len
;
604 if ((err
= bus_dmamap_load_mbuf(sc
->sc_dmat
, dmamap
,
605 m
, BUS_DMA_NOWAIT
)) != 0) {
606 printf("%s: unable to load Tx buffer, "
607 "error = %d\n", sc
->sc_dev
.dv_xname
, err
);
613 * Ensure we have enough descriptors free to describe
616 if (dmamap
->dm_nsegs
> sc
->sc_nfreetx
) {
618 * Not enough free descriptors to transmit this
619 * packet. We haven't committed to anything yet,
620 * so just unload the DMA map, put the packet
621 * back on the queue, and punt. Notify the upper
622 * layer that there are no more slots left.
624 * XXX We could allocate an mbuf and copy, but
625 * XXX it is worth it?
627 ifp
->if_flags
|= IFF_OACTIVE
;
628 bus_dmamap_unload(sc
->sc_dmat
, dmamap
);
634 IFQ_DEQUEUE(&ifp
->if_snd
, m0
);
637 * Pass the packet to any BPF listeners.
640 bpf_mtap(ifp
->if_bpf
, m0
);
641 #endif /* NBPFILTER > 0 */
648 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
651 SQ_TRACE(SQ_ENQUEUE
, sc
, sc
->sc_nexttx
, 0);
653 /* Sync the DMA map. */
654 bus_dmamap_sync(sc
->sc_dmat
, dmamap
, 0, dmamap
->dm_mapsize
,
655 BUS_DMASYNC_PREWRITE
);
658 * Initialize the transmit descriptors.
660 for (nexttx
= sc
->sc_nexttx
, seg
= 0, totlen
= 0;
661 seg
< dmamap
->dm_nsegs
;
662 seg
++, nexttx
= SQ_NEXTTX(nexttx
)) {
663 if (sc
->hpc_regs
->revision
== 3) {
664 sc
->sc_txdesc
[nexttx
].hpc3_hdd_bufptr
=
665 dmamap
->dm_segs
[seg
].ds_addr
;
666 sc
->sc_txdesc
[nexttx
].hpc3_hdd_ctl
=
667 dmamap
->dm_segs
[seg
].ds_len
;
669 sc
->sc_txdesc
[nexttx
].hpc1_hdd_bufptr
=
670 dmamap
->dm_segs
[seg
].ds_addr
;
671 sc
->sc_txdesc
[nexttx
].hpc1_hdd_ctl
=
672 dmamap
->dm_segs
[seg
].ds_len
;
674 sc
->sc_txdesc
[nexttx
].hdd_descptr
=
675 SQ_CDTXADDR(sc
, SQ_NEXTTX(nexttx
));
677 totlen
+= dmamap
->dm_segs
[seg
].ds_len
;
680 /* Last descriptor gets end-of-packet */
681 KASSERT(lasttx
!= -1);
682 if (sc
->hpc_regs
->revision
== 3)
683 sc
->sc_txdesc
[lasttx
].hpc3_hdd_ctl
|=
684 HPC3_HDD_CTL_EOPACKET
;
686 sc
->sc_txdesc
[lasttx
].hpc1_hdd_ctl
|=
687 HPC1_HDD_CTL_EOPACKET
;
689 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc
->sc_dev
.dv_xname
,
690 sc
->sc_nexttx
, lasttx
,
693 if (ifp
->if_flags
& IFF_DEBUG
) {
694 printf(" transmit chain:\n");
695 for (seg
= sc
->sc_nexttx
;; seg
= SQ_NEXTTX(seg
)) {
696 printf(" descriptor %d:\n", seg
);
697 printf(" hdd_bufptr: 0x%08x\n",
698 (sc
->hpc_regs
->revision
== 3) ?
699 sc
->sc_txdesc
[seg
].hpc3_hdd_bufptr
:
700 sc
->sc_txdesc
[seg
].hpc1_hdd_bufptr
);
701 printf(" hdd_ctl: 0x%08x\n",
702 (sc
->hpc_regs
->revision
== 3) ?
703 sc
->sc_txdesc
[seg
].hpc3_hdd_ctl
:
704 sc
->sc_txdesc
[seg
].hpc1_hdd_ctl
);
705 printf(" hdd_descptr: 0x%08x\n",
706 sc
->sc_txdesc
[seg
].hdd_descptr
);
713 /* Sync the descriptors we're using. */
714 SQ_CDTXSYNC(sc
, sc
->sc_nexttx
, dmamap
->dm_nsegs
,
715 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
717 /* Store a pointer to the packet so we can free it later */
718 sc
->sc_txmbuf
[sc
->sc_nexttx
] = m0
;
720 /* Advance the tx pointer. */
721 sc
->sc_nfreetx
-= dmamap
->dm_nsegs
;
722 sc
->sc_nexttx
= nexttx
;
725 /* All transmit descriptors used up, let upper layers know */
726 if (sc
->sc_nfreetx
== 0)
727 ifp
->if_flags
|= IFF_OACTIVE
;
729 if (sc
->sc_nfreetx
!= ofree
) {
730 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
731 sc
->sc_dev
.dv_xname
, lasttx
- firsttx
+ 1,
735 * Cause a transmit interrupt to happen on the
736 * last packet we enqueued, mark it as the last
739 * HPC1_HDD_CTL_INTR will generate an interrupt on
740 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
741 * addition to HPC3_HDD_CTL_INTR to interrupt.
743 KASSERT(lasttx
!= -1);
744 if (sc
->hpc_regs
->revision
== 3) {
745 sc
->sc_txdesc
[lasttx
].hpc3_hdd_ctl
|=
746 HPC3_HDD_CTL_INTR
| HPC3_HDD_CTL_EOCHAIN
;
748 sc
->sc_txdesc
[lasttx
].hpc1_hdd_ctl
|= HPC1_HDD_CTL_INTR
;
749 sc
->sc_txdesc
[lasttx
].hpc1_hdd_bufptr
|=
750 HPC1_HDD_CTL_EOCHAIN
;
753 SQ_CDTXSYNC(sc
, lasttx
, 1,
754 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
757 * There is a potential race condition here if the HPC
758 * DMA channel is active and we try and either update
759 * the 'next descriptor' pointer in the HPC PIO space
760 * or the 'next descriptor' pointer in a previous desc-
763 * To avoid this, if the channel is active, we rely on
764 * the transmit interrupt routine noticing that there
765 * are more packets to send and restarting the HPC DMA
766 * engine, rather than mucking with the DMA state here.
768 status
= sq_hpc_read(sc
, sc
->hpc_regs
->enetx_ctl
);
770 if ((status
& sc
->hpc_regs
->enetx_ctl_active
) != 0) {
771 SQ_TRACE(SQ_ADD_TO_DMA
, sc
, firsttx
, status
);
774 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
775 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
777 sc
->sc_txdesc
[SQ_PREVTX(firsttx
)].hpc3_hdd_ctl
&=
778 ~HPC3_HDD_CTL_EOCHAIN
;
780 if (sc
->hpc_regs
->revision
!= 3)
781 sc
->sc_txdesc
[SQ_PREVTX(firsttx
)].hpc1_hdd_ctl
782 &= ~HPC1_HDD_CTL_INTR
;
784 SQ_CDTXSYNC(sc
, SQ_PREVTX(firsttx
), 1,
785 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
786 } else if (sc
->hpc_regs
->revision
== 3) {
787 SQ_TRACE(SQ_START_DMA
, sc
, firsttx
, status
);
789 sq_hpc_write(sc
, HPC3_ENETX_NDBP
, SQ_CDTXADDR(sc
,
792 /* Kick DMA channel into life */
793 sq_hpc_write(sc
, HPC3_ENETX_CTL
, HPC3_ENETX_CTL_ACTIVE
);
796 * In the HPC1 case where transmit DMA is
797 * inactive, we can either kick off if
798 * the ring was previously empty, or call
799 * our transmit interrupt handler to
800 * figure out if the ring stopped short
801 * and restart at the right place.
803 if (ofree
== SQ_NTXDESC
) {
804 SQ_TRACE(SQ_START_DMA
, sc
, firsttx
, status
);
806 sq_hpc_write(sc
, HPC1_ENETX_NDBP
,
807 SQ_CDTXADDR(sc
, firsttx
));
808 sq_hpc_write(sc
, HPC1_ENETX_CFXBP
,
809 SQ_CDTXADDR(sc
, firsttx
));
810 sq_hpc_write(sc
, HPC1_ENETX_CBP
,
811 SQ_CDTXADDR(sc
, firsttx
));
813 /* Kick DMA channel into life */
814 sq_hpc_write(sc
, HPC1_ENETX_CTL
,
815 HPC1_ENETX_CTL_ACTIVE
);
820 /* Set a watchdog timer in case the chip flakes out. */
826 sq_stop(struct ifnet
*ifp
, int disable
)
829 struct sq_softc
*sc
= ifp
->if_softc
;
831 for (i
=0; i
< SQ_NTXDESC
; i
++) {
832 if (sc
->sc_txmbuf
[i
] != NULL
) {
833 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_txmap
[i
]);
834 m_freem(sc
->sc_txmbuf
[i
]);
835 sc
->sc_txmbuf
[i
] = NULL
;
839 /* Clear Seeq transmit/receive command registers */
840 sq_seeq_write(sc
, SEEQ_TXCMD
, 0);
841 sq_seeq_write(sc
, SEEQ_RXCMD
, 0);
845 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
849 /* Device timeout/watchdog routine. */
851 sq_watchdog(struct ifnet
*ifp
)
854 struct sq_softc
*sc
= ifp
->if_softc
;
856 status
= sq_hpc_read(sc
, sc
->hpc_regs
->enetx_ctl
);
857 log(LOG_ERR
, "%s: device timeout (prev %d, next %d, free %d, "
858 "status %08x)\n", sc
->sc_dev
.dv_xname
, sc
->sc_prevtx
,
859 sc
->sc_nexttx
, sc
->sc_nfreetx
, status
);
863 memset(&sc
->sq_trace
, 0, sizeof(sc
->sq_trace
));
864 sc
->sq_trace_idx
= 0;
872 sq_trace_dump(struct sq_softc
*sc
)
877 for (i
= 0; i
< sc
->sq_trace_idx
; i
++) {
878 switch (sc
->sq_trace
[i
].action
) {
879 case SQ_RESET
: act
= "SQ_RESET"; break;
880 case SQ_ADD_TO_DMA
: act
= "SQ_ADD_TO_DMA"; break;
881 case SQ_START_DMA
: act
= "SQ_START_DMA"; break;
882 case SQ_DONE_DMA
: act
= "SQ_DONE_DMA"; break;
883 case SQ_RESTART_DMA
: act
= "SQ_RESTART_DMA"; break;
884 case SQ_TXINTR_ENTER
: act
= "SQ_TXINTR_ENTER"; break;
885 case SQ_TXINTR_EXIT
: act
= "SQ_TXINTR_EXIT"; break;
886 case SQ_TXINTR_BUSY
: act
= "SQ_TXINTR_BUSY"; break;
887 case SQ_IOCTL
: act
= "SQ_IOCTL"; break;
888 case SQ_ENQUEUE
: act
= "SQ_ENQUEUE"; break;
889 default: act
= "UNKNOWN";
892 printf("%s: [%03d] action %-16s buf %03d free %03d "
893 "status %08x line %d\n", sc
->sc_dev
.dv_xname
, i
, act
,
894 sc
->sq_trace
[i
].bufno
, sc
->sq_trace
[i
].freebuf
,
895 sc
->sq_trace
[i
].status
, sc
->sq_trace
[i
].line
);
902 struct sq_softc
*sc
= arg
;
903 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
907 stat
= sq_hpc_read(sc
, sc
->hpc_regs
->enetr_reset
);
910 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
911 sc
->sc_dev
.dv_xname
));
913 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_reset
, (stat
| 2));
916 * If the interface isn't running, the interrupt couldn't
917 * possibly have come from us.
919 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
922 sc
->sq_intrcnt
.ev_count
++;
924 /* Always check for received packets */
925 if (sq_rxintr(sc
) != 0)
928 /* Only handle transmit interrupts if we actually sent something */
929 if (sc
->sc_nfreetx
< SQ_NTXDESC
) {
936 rnd_add_uint32(&sc
->rnd_source
, stat
);
942 sq_rxintr(struct sq_softc
*sc
)
950 int new_end
, orig_end
;
951 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
953 for (i
= sc
->sc_nextrx
;; i
= SQ_NEXTRX(i
)) {
954 SQ_CDRXSYNC(sc
, i
, BUS_DMASYNC_POSTREAD
|
955 BUS_DMASYNC_POSTWRITE
);
958 * If this is a CPU-owned buffer, we're at the end of the list.
960 if (sc
->hpc_regs
->revision
== 3)
961 ctl_reg
= sc
->sc_rxdesc
[i
].hpc3_hdd_ctl
&
964 ctl_reg
= sc
->sc_rxdesc
[i
].hpc1_hdd_ctl
&
968 #if defined(SQ_DEBUG)
971 reg
= sq_hpc_read(sc
, sc
->hpc_regs
->enetr_ctl
);
972 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
973 sc
->sc_dev
.dv_xname
, i
, reg
));
980 m
= sc
->sc_rxmbuf
[i
];
981 framelen
= m
->m_ext
.ext_size
- 3;
982 if (sc
->hpc_regs
->revision
== 3)
984 HPC3_HDD_CTL_BYTECNT(sc
->sc_rxdesc
[i
].hpc3_hdd_ctl
);
987 HPC1_HDD_CTL_BYTECNT(sc
->sc_rxdesc
[i
].hpc1_hdd_ctl
);
989 /* Now sync the actual packet data */
990 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rxmap
[i
], 0,
991 sc
->sc_rxmap
[i
]->dm_mapsize
, BUS_DMASYNC_POSTREAD
);
993 pktstat
= *((u_int8_t
*)m
->m_data
+ framelen
+ 2);
995 if ((pktstat
& RXSTAT_GOOD
) == 0) {
998 if (pktstat
& RXSTAT_OFLOW
)
999 printf("%s: receive FIFO overflow\n",
1000 sc
->sc_dev
.dv_xname
);
1002 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rxmap
[i
], 0,
1003 sc
->sc_rxmap
[i
]->dm_mapsize
,
1004 BUS_DMASYNC_PREREAD
);
1005 SQ_INIT_RXDESC(sc
, i
);
1006 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
1007 sc
->sc_dev
.dv_xname
, i
));
1011 if (sq_add_rxbuf(sc
, i
) != 0) {
1013 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rxmap
[i
], 0,
1014 sc
->sc_rxmap
[i
]->dm_mapsize
,
1015 BUS_DMASYNC_PREREAD
);
1016 SQ_INIT_RXDESC(sc
, i
);
1017 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
1018 "failed\n", sc
->sc_dev
.dv_xname
, i
));
1024 m
->m_pkthdr
.rcvif
= ifp
;
1025 m
->m_pkthdr
.len
= m
->m_len
= framelen
;
1029 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1030 sc
->sc_dev
.dv_xname
, i
, framelen
));
1034 bpf_mtap(ifp
->if_bpf
, m
);
1036 (*ifp
->if_input
)(ifp
, m
);
1040 /* If anything happened, move ring start/end pointers to new spot */
1041 if (i
!= sc
->sc_nextrx
) {
1043 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1044 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1047 new_end
= SQ_PREVRX(i
);
1048 sc
->sc_rxdesc
[new_end
].hpc3_hdd_ctl
|= HPC3_HDD_CTL_EOCHAIN
;
1049 SQ_CDRXSYNC(sc
, new_end
, BUS_DMASYNC_PREREAD
|
1050 BUS_DMASYNC_PREWRITE
);
1052 orig_end
= SQ_PREVRX(sc
->sc_nextrx
);
1053 sc
->sc_rxdesc
[orig_end
].hpc3_hdd_ctl
&= ~HPC3_HDD_CTL_EOCHAIN
;
1054 SQ_CDRXSYNC(sc
, orig_end
, BUS_DMASYNC_PREREAD
|
1055 BUS_DMASYNC_PREWRITE
);
1060 status
= sq_hpc_read(sc
, sc
->hpc_regs
->enetr_ctl
);
1062 /* If receive channel is stopped, restart it... */
1063 if ((status
& sc
->hpc_regs
->enetr_ctl_active
) == 0) {
1064 /* Pass the start of the receive ring to the HPC */
1065 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_ndbp
, SQ_CDRXADDR(sc
,
1068 /* And turn on the HPC ethernet receive channel */
1069 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_ctl
,
1070 sc
->hpc_regs
->enetr_ctl_active
);
1077 sq_txintr(struct sq_softc
*sc
)
1080 u_int32_t status
, tmp
;
1081 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1083 if (sc
->hpc_regs
->revision
!= 3)
1086 status
= sq_hpc_read(sc
, sc
->hpc_regs
->enetx_ctl
) >> shift
;
1088 SQ_TRACE(SQ_TXINTR_ENTER
, sc
, sc
->sc_prevtx
, status
);
1090 tmp
= (sc
->hpc_regs
->enetx_ctl_active
>> shift
) | TXSTAT_GOOD
;
1091 if ((status
& tmp
) == 0) {
1092 if (status
& TXSTAT_COLL
)
1093 ifp
->if_collisions
++;
1095 if (status
& TXSTAT_UFLOW
) {
1096 printf("%s: transmit underflow\n", sc
->sc_dev
.dv_xname
);
1100 if (status
& TXSTAT_16COLL
) {
1101 printf("%s: max collisions reached\n",
1102 sc
->sc_dev
.dv_xname
);
1104 ifp
->if_collisions
+= 16;
1108 /* prevtx now points to next xmit packet not yet finished */
1109 if (sc
->hpc_regs
->revision
== 3)
1114 /* If we have buffers free, let upper layers know */
1115 if (sc
->sc_nfreetx
> 0)
1116 ifp
->if_flags
&= ~IFF_OACTIVE
;
1118 /* If all packets have left the coop, cancel watchdog */
1119 if (sc
->sc_nfreetx
== SQ_NTXDESC
)
1122 SQ_TRACE(SQ_TXINTR_EXIT
, sc
, sc
->sc_prevtx
, status
);
1129 * Reclaim used transmit descriptors and restart the transmit DMA
1130 * engine if necessary.
1133 sq_txring_hpc1(struct sq_softc
*sc
)
1136 * HPC1 doesn't tag transmitted descriptors, however,
1137 * the NDBP register points to the next descriptor that
1138 * has not yet been processed. If DMA is not in progress,
1139 * we can safely reclaim all descriptors up to NDBP, and,
1140 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1141 * is active, we can only safely reclaim up to CBP.
1143 * For now, we'll only reclaim on inactive DMA and assume
1144 * that a sufficiently large ring keeps us out of trouble.
1146 u_int32_t reclaimto
, status
;
1147 int reclaimall
, i
= sc
->sc_prevtx
;
1148 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1150 status
= sq_hpc_read(sc
, HPC1_ENETX_CTL
);
1151 if (status
& HPC1_ENETX_CTL_ACTIVE
) {
1152 SQ_TRACE(SQ_TXINTR_BUSY
, sc
, i
, status
);
1155 reclaimto
= sq_hpc_read(sc
, HPC1_ENETX_NDBP
);
1157 if (sc
->sc_nfreetx
== 0 && SQ_CDTXADDR(sc
, i
) == reclaimto
)
1162 while (sc
->sc_nfreetx
< SQ_NTXDESC
) {
1163 if (SQ_CDTXADDR(sc
, i
) == reclaimto
&& !reclaimall
)
1166 SQ_CDTXSYNC(sc
, i
, sc
->sc_txmap
[i
]->dm_nsegs
,
1167 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1169 /* Sync the packet data, unload DMA map, free mbuf */
1170 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_txmap
[i
], 0,
1171 sc
->sc_txmap
[i
]->dm_mapsize
,
1172 BUS_DMASYNC_POSTWRITE
);
1173 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_txmap
[i
]);
1174 m_freem(sc
->sc_txmbuf
[i
]);
1175 sc
->sc_txmbuf
[i
] = NULL
;
1180 SQ_TRACE(SQ_DONE_DMA
, sc
, i
, status
);
1185 if (sc
->sc_nfreetx
< SQ_NTXDESC
) {
1186 SQ_TRACE(SQ_RESTART_DMA
, sc
, i
, status
);
1188 KASSERT(reclaimto
== SQ_CDTXADDR(sc
, i
));
1190 sq_hpc_write(sc
, HPC1_ENETX_CFXBP
, reclaimto
);
1191 sq_hpc_write(sc
, HPC1_ENETX_CBP
, reclaimto
);
1193 /* Kick DMA channel into life */
1194 sq_hpc_write(sc
, HPC1_ENETX_CTL
, HPC1_ENETX_CTL_ACTIVE
);
1197 * Set a watchdog timer in case the chip
1207 * Reclaim used transmit descriptors and restart the transmit DMA
1208 * engine if necessary.
1211 sq_txring_hpc3(struct sq_softc
*sc
)
1214 * HPC3 tags descriptors with a bit once they've been
1215 * transmitted. We need only free each XMITDONE'd
1216 * descriptor, and restart the DMA engine if any
1217 * descriptors are left over.
1220 u_int32_t status
= 0;
1221 struct ifnet
*ifp
= &sc
->sc_ethercom
.ec_if
;
1224 while (sc
->sc_nfreetx
< SQ_NTXDESC
) {
1226 * Check status first so we don't end up with a case of
1227 * the buffer not being finished while the DMA channel
1230 status
= sq_hpc_read(sc
, HPC3_ENETX_CTL
);
1232 SQ_CDTXSYNC(sc
, i
, sc
->sc_txmap
[i
]->dm_nsegs
,
1233 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1235 /* Check for used descriptor and restart DMA chain if needed */
1236 if (!(sc
->sc_txdesc
[i
].hpc3_hdd_ctl
& HPC3_HDD_CTL_XMITDONE
)) {
1237 if ((status
& HPC3_ENETX_CTL_ACTIVE
) == 0) {
1238 SQ_TRACE(SQ_RESTART_DMA
, sc
, i
, status
);
1240 sq_hpc_write(sc
, HPC3_ENETX_NDBP
,
1241 SQ_CDTXADDR(sc
, i
));
1243 /* Kick DMA channel into life */
1244 sq_hpc_write(sc
, HPC3_ENETX_CTL
,
1245 HPC3_ENETX_CTL_ACTIVE
);
1248 * Set a watchdog timer in case the chip
1253 SQ_TRACE(SQ_TXINTR_BUSY
, sc
, i
, status
);
1257 /* Sync the packet data, unload DMA map, free mbuf */
1258 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_txmap
[i
], 0,
1259 sc
->sc_txmap
[i
]->dm_mapsize
,
1260 BUS_DMASYNC_POSTWRITE
);
1261 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_txmap
[i
]);
1262 m_freem(sc
->sc_txmbuf
[i
]);
1263 sc
->sc_txmbuf
[i
] = NULL
;
1268 SQ_TRACE(SQ_DONE_DMA
, sc
, i
, status
);
1276 sq_reset(struct sq_softc
*sc
)
1278 /* Stop HPC dma channels */
1279 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_ctl
, 0);
1280 sq_hpc_write(sc
, sc
->hpc_regs
->enetx_ctl
, 0);
1282 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_reset
, 3);
1284 sq_hpc_write(sc
, sc
->hpc_regs
->enetr_reset
, 0);
1287 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1289 sq_add_rxbuf(struct sq_softc
*sc
, int idx
)
1294 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
1298 MCLGET(m
, M_DONTWAIT
);
1299 if ((m
->m_flags
& M_EXT
) == 0) {
1304 if (sc
->sc_rxmbuf
[idx
] != NULL
)
1305 bus_dmamap_unload(sc
->sc_dmat
, sc
->sc_rxmap
[idx
]);
1307 sc
->sc_rxmbuf
[idx
] = m
;
1309 if ((err
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_rxmap
[idx
],
1310 m
->m_ext
.ext_buf
, m
->m_ext
.ext_size
,
1311 NULL
, BUS_DMA_NOWAIT
)) != 0) {
1312 printf("%s: can't load rx DMA map %d, error = %d\n",
1313 sc
->sc_dev
.dv_xname
, idx
, err
);
1314 panic("sq_add_rxbuf"); /* XXX */
1317 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_rxmap
[idx
], 0,
1318 sc
->sc_rxmap
[idx
]->dm_mapsize
, BUS_DMASYNC_PREREAD
);
1320 SQ_INIT_RXDESC(sc
, idx
);
1326 sq_dump_buffer(paddr_t addr
, psize_t len
)
1329 u_char
* physaddr
= (char*) MIPS_PHYS_TO_KSEG1(addr
);
1334 printf("%p: ", physaddr
);
1336 for (i
= 0; i
< len
; i
++) {
1337 printf("%02x ", *(physaddr
+ i
) & 0xff);
1338 if ((i
% 16) == 15 && i
!= len
- 1)
1339 printf("\n%p: ", physaddr
+ i
);
1346 enaddr_aton(const char* str
, u_int8_t
* eaddr
)
1351 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1357 eaddr
[i
] = (c
- '0');
1358 } else if (isxdigit(c
)) {
1359 eaddr
[i
] = (toupper(c
) + 10 - 'A');
1364 eaddr
[i
] = (eaddr
[i
] << 4) | (c
- '0');
1365 } else if (isxdigit(c
)) {
1366 eaddr
[i
] = (eaddr
[i
] << 4) | (toupper(c
) + 10 - 'A');