Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / ic / pdq_ifsubr.c
blob0d80049dc9d9cc7c8b3d38a7c027304e4bf21d95
1 /* $NetBSD: pdq_ifsubr.c,v 1.52 2008/04/08 12:07:27 cegger Exp $ */
3 /*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Id: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp
31 * DEC PDQ FDDI Controller; code for BSD derived operating systems
33 * This module provide bus independent BSD specific O/S functions.
34 * (ie. it provides an ifnet interface to the rest of the system)
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: pdq_ifsubr.c,v 1.52 2008/04/08 12:07:27 cegger Exp $");
40 #ifdef __NetBSD__
41 #include "opt_inet.h"
42 #endif
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/mbuf.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/malloc.h>
52 #if defined(__FreeBSD__) && BSD < 199401
53 #include <sys/devconf.h>
54 #elif defined(__bsdi__) || defined(__NetBSD__)
55 #include <sys/device.h>
56 #endif
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/if_dl.h>
61 #if !defined(__NetBSD__)
62 #include <net/route.h>
63 #endif
65 #include "bpfilter.h"
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69 #endif
71 #ifdef INET
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #if defined(__NetBSD__)
77 #include <netinet/if_inarp.h>
78 #endif
79 #endif
80 #if defined(__FreeBSD__)
81 #include <netinet/if_ether.h>
82 #include <netinet/if_fddi.h>
83 #else
84 #include <net/if_fddi.h>
85 #endif
87 #if defined(__bsdi__)
88 #include <netinet/if_ether.h>
89 #include <i386/isa/isavar.h>
90 #endif
93 #ifndef __NetBSD__
94 #include <vm/vm.h>
95 #endif
97 #if defined(__FreeBSD__)
99 * Yet another specific ifdef for FreeBSD as it diverges...
101 #include <dev/pdq/pdqvar.h>
102 #include <dev/pdq/pdqreg.h>
103 #else
104 #include "pdqvar.h"
105 #include "pdqreg.h"
106 #endif
108 void
109 pdq_ifinit(
110 pdq_softc_t *sc)
112 if (sc->sc_if.if_flags & IFF_UP) {
113 sc->sc_if.if_flags |= IFF_RUNNING;
114 #if NBPFILTER > 0
115 if (sc->sc_if.if_flags & IFF_PROMISC) {
116 sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
117 } else {
118 sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
120 #endif
121 if (sc->sc_if.if_flags & IFF_LINK1) {
122 sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
123 } else {
124 sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
126 sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
127 pdq_run(sc->sc_pdq);
128 } else {
129 sc->sc_if.if_flags &= ~IFF_RUNNING;
130 sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
131 pdq_stop(sc->sc_pdq);
135 void
136 pdq_ifwatchdog(
137 struct ifnet *ifp)
140 * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
141 * seconds. Remove all queued packets.
144 ifp->if_flags &= ~IFF_OACTIVE;
145 ifp->if_timer = 0;
146 for (;;) {
147 struct mbuf *m;
148 IFQ_DEQUEUE(&ifp->if_snd, m);
149 if (m == NULL)
150 return;
151 PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
155 ifnet_ret_t
156 pdq_ifstart(
157 struct ifnet *ifp)
159 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
160 struct mbuf *m;
161 int tx = 0;
163 if ((ifp->if_flags & IFF_RUNNING) == 0)
164 return;
166 if (sc->sc_if.if_timer == 0)
167 sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
169 if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
170 sc->sc_if.if_flags |= IFF_OACTIVE;
171 return;
173 sc->sc_flags |= PDQIF_DOWNCALL;
174 for (;; tx = 1) {
175 IFQ_POLL(&ifp->if_snd, m);
176 if (m == NULL)
177 break;
178 #if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
179 if ((m->m_flags & M_HASTXDMAMAP) == 0) {
180 bus_dmamap_t map;
181 if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
182 m->m_data[0] = PDQ_FDDI_PH0;
183 m->m_data[1] = PDQ_FDDI_PH1;
184 m->m_data[2] = PDQ_FDDI_PH2;
186 if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
187 m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
188 if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
189 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
190 bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
191 BUS_DMASYNC_PREWRITE);
192 M_SETCTX(m, map);
193 m->m_flags |= M_HASTXDMAMAP;
196 if ((m->m_flags & M_HASTXDMAMAP) == 0)
197 break;
199 #else
200 if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
201 m->m_data[0] = PDQ_FDDI_PH0;
202 m->m_data[1] = PDQ_FDDI_PH1;
203 m->m_data[2] = PDQ_FDDI_PH2;
205 #endif
207 if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
208 break;
209 IFQ_DEQUEUE(&ifp->if_snd, m);
211 if (m != NULL)
212 ifp->if_flags |= IFF_OACTIVE;
213 if (tx)
214 PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
215 sc->sc_flags &= ~PDQIF_DOWNCALL;
218 void
219 pdq_os_receive_pdu(
220 pdq_t *pdq,
221 struct mbuf *m,
222 size_t pktlen,
223 int drop)
225 pdq_softc_t *sc = pdq->pdq_os_ctx;
226 struct fddi_header *fh;
228 sc->sc_if.if_ipackets++;
229 #if defined(PDQ_BUS_DMA)
232 * Even though the first mbuf start at the first fddi header octet,
233 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier. Any additional
234 * mbufs will start normally.
236 int offset = PDQ_OS_HDR_OFFSET;
237 struct mbuf *m0;
238 for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
239 pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
240 bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
241 bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
242 m0->m_flags &= ~M_HASRXDMAMAP;
243 M_SETCTX(m0, NULL);
246 #endif
247 m->m_pkthdr.len = pktlen;
248 #if NBPFILTER > 0
249 if (sc->sc_bpf != NULL)
250 PDQ_BPF_MTAP(sc, m);
251 #endif
252 fh = mtod(m, struct fddi_header *);
253 if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
254 PDQ_OS_DATABUF_FREE(pdq, m);
255 return;
258 m->m_pkthdr.rcvif = &sc->sc_if;
259 (*sc->sc_if.if_input)(&sc->sc_if, m);
262 void
263 pdq_os_restart_transmitter(
264 pdq_t *pdq)
266 pdq_softc_t *sc = pdq->pdq_os_ctx;
267 sc->sc_if.if_flags &= ~IFF_OACTIVE;
268 if (IFQ_IS_EMPTY(&sc->sc_if.if_snd) == 0) {
269 sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
270 if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
271 pdq_ifstart(&sc->sc_if);
272 } else {
273 sc->sc_if.if_timer = 0;
277 void
278 pdq_os_transmit_done(
279 pdq_t *pdq,
280 struct mbuf *m)
282 pdq_softc_t *sc = pdq->pdq_os_ctx;
283 #if NBPFILTER > 0
284 if (sc->sc_bpf != NULL)
285 PDQ_BPF_MTAP(sc, m);
286 #endif
287 PDQ_OS_DATABUF_FREE(pdq, m);
288 sc->sc_if.if_opackets++;
291 void
292 pdq_os_addr_fill(
293 pdq_t *pdq,
294 pdq_lanaddr_t *addr,
295 size_t num_addrs)
297 pdq_softc_t *sc = pdq->pdq_os_ctx;
298 struct ether_multistep step;
299 struct ether_multi *enm;
302 * ADDR_FILTER_SET is always issued before FILTER_SET so
303 * we can play with PDQ_ALLMULTI and not worry about
304 * queueing a FILTER_SET ourselves.
307 pdq->pdq_flags &= ~PDQ_ALLMULTI;
308 #if defined(IFF_ALLMULTI)
309 sc->sc_if.if_flags &= ~IFF_ALLMULTI;
310 #endif
312 ETHER_FIRST_MULTI(step, PDQ_FDDICOM(sc), enm);
313 while (enm != NULL && num_addrs > 0) {
314 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) {
315 ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) enm->enm_addrlo)[0];
316 ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) enm->enm_addrlo)[1];
317 ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) enm->enm_addrlo)[2];
318 addr++;
319 num_addrs--;
320 } else {
321 pdq->pdq_flags |= PDQ_ALLMULTI;
322 #if defined(IFF_ALLMULTI)
323 sc->sc_if.if_flags |= IFF_ALLMULTI;
324 #endif
326 ETHER_NEXT_MULTI(step, enm);
329 * If not all the address fit into the CAM, turn on all-multicast mode.
331 if (enm != NULL) {
332 pdq->pdq_flags |= PDQ_ALLMULTI;
333 #if defined(IFF_ALLMULTI)
334 sc->sc_if.if_flags |= IFF_ALLMULTI;
335 #endif
339 #if defined(IFM_FDDI)
340 static int
341 pdq_ifmedia_change(
342 struct ifnet *ifp)
344 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
346 if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
347 if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
348 sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
349 if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
350 pdq_run(sc->sc_pdq);
352 } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
353 sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
354 if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
355 pdq_run(sc->sc_pdq);
358 return 0;
361 static void
362 pdq_ifmedia_status(
363 struct ifnet *ifp,
364 struct ifmediareq *ifmr)
366 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
368 ifmr->ifm_status = IFM_AVALID;
369 if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
370 ifmr->ifm_status |= IFM_ACTIVE;
372 ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
373 if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
374 ifmr->ifm_active |= IFM_FDX;
377 void
378 pdq_os_update_status(
379 pdq_t *pdq,
380 const void *arg)
382 pdq_softc_t * const sc = pdq->pdq_os_ctx;
383 const pdq_response_status_chars_get_t *rsp = arg;
384 int media = 0;
386 switch (rsp->status_chars_get.pmd_type[0]) {
387 case PDQ_PMD_TYPE_ANSI_MUTLI_MODE: media = IFM_FDDI_MMF; break;
388 case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
389 case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
390 case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
391 default: media |= IFM_MANUAL;
394 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
395 media |= IFM_FDDI_DA;
397 sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
399 #endif /* defined(IFM_FDDI) */
402 pdq_ifioctl(
403 struct ifnet *ifp,
404 ioctl_cmd_t cmd,
405 void *data)
407 pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
408 int s, error = 0;
410 s = PDQ_OS_SPL_RAISE();
412 switch (cmd) {
413 case SIOCINITIFADDR: {
414 struct ifaddr *ifa = (struct ifaddr *)data;
416 ifp->if_flags |= IFF_UP;
417 pdq_ifinit(sc);
418 switch(ifa->ifa_addr->sa_family) {
419 #if defined(INET)
420 case AF_INET:
421 PDQ_ARP_IFINIT(sc, ifa);
422 break;
423 #endif /* INET */
424 default:
425 break;
427 break;
429 case SIOCSIFFLAGS: {
430 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
431 break;
432 pdq_ifinit(sc);
433 break;
436 case SIOCADDMULTI:
437 case SIOCDELMULTI: {
439 * Update multicast listeners
441 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
442 if (sc->sc_if.if_flags & IFF_RUNNING)
443 pdq_run(sc->sc_pdq);
444 error = 0;
446 break;
449 #if defined(SIOCSIFMTU)
450 #if !defined(ifr_mtu)
451 #define ifr_mtu ifr_metric
452 #endif
453 case SIOCSIFMTU: {
454 struct ifreq *ifr = (struct ifreq *)data;
456 * Set the interface MTU.
458 if (ifr->ifr_mtu > FDDIMTU) {
459 error = EINVAL;
460 break;
462 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
463 error = 0;
464 break;
466 #endif /* SIOCSIFMTU */
468 #if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
469 case SIOCSIFMEDIA:
470 case SIOCGIFMEDIA: {
471 struct ifreq *ifr = (struct ifreq *)data;
472 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
473 break;
475 #endif
477 default: {
478 error = ether_ioctl(ifp, cmd, data);
479 break;
483 PDQ_OS_SPL_LOWER(s);
484 return error;
487 #ifndef IFF_NOTRAILERS
488 #define IFF_NOTRAILERS 0
489 #endif
491 void
492 pdq_ifattach(
493 pdq_softc_t *sc,
494 ifnet_ret_t (*ifwatchdog)(int unit))
496 struct ifnet *ifp = &sc->sc_if;
498 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
500 #if (defined(__FreeBSD__) && BSD >= 199506) || defined(__NetBSD__)
501 ifp->if_watchdog = pdq_ifwatchdog;
502 #else
503 ifp->if_watchdog = ifwatchdog;
504 #endif
506 ifp->if_ioctl = pdq_ifioctl;
507 #if !defined(__NetBSD__)
508 ifp->if_output = fddi_output;
509 #endif
510 ifp->if_start = pdq_ifstart;
511 IFQ_SET_READY(&ifp->if_snd);
513 #if defined(IFM_FDDI)
515 const int media = sc->sc_ifmedia.ifm_media;
516 ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
517 pdq_ifmedia_change, pdq_ifmedia_status);
518 ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
519 ifmedia_set(&sc->sc_ifmedia, media);
521 #endif
523 if_attach(ifp);
524 #if defined(__NetBSD__)
525 fddi_ifattach(ifp, (void *)&sc->sc_pdq->pdq_hwaddr);
526 #else
527 fddi_ifattach(ifp);
528 #endif
531 #if defined(PDQ_BUS_DMA)
533 pdq_os_memalloc_contig(
534 pdq_t *pdq)
536 pdq_softc_t * const sc = pdq->pdq_os_ctx;
537 bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
538 int db_nsegs = 0, ui_nsegs = 0;
539 int steps = 0;
540 int not_ok;
542 not_ok = bus_dmamem_alloc(sc->sc_dmatag,
543 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
544 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
545 #if defined(__sparc__) || defined(__sparc64__)
546 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
547 #else
548 BUS_DMA_NOWAIT);
549 #endif
550 if (!not_ok) {
551 steps = 1;
552 not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
553 sizeof(*pdq->pdq_dbp), (void **) &pdq->pdq_dbp,
554 BUS_DMA_NOWAIT);
556 if (!not_ok) {
557 steps = 2;
558 not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
559 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
561 if (!not_ok) {
562 steps = 3;
563 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
564 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
565 NULL, BUS_DMA_NOWAIT);
567 if (!not_ok) {
568 steps = 4;
569 pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
570 not_ok = bus_dmamem_alloc(sc->sc_dmatag,
571 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
572 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
574 if (!not_ok) {
575 steps = 5;
576 not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
577 PDQ_OS_PAGESIZE,
578 (void **) &pdq->pdq_unsolicited_info.ui_events,
579 BUS_DMA_NOWAIT);
581 if (!not_ok) {
582 steps = 6;
583 not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
584 PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
585 &sc->sc_uimap);
587 if (!not_ok) {
588 steps = 7;
589 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
590 pdq->pdq_unsolicited_info.ui_events,
591 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
593 if (!not_ok) {
594 steps = 8;
595 pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
596 cb_segs[0] = db_segs[0];
597 cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
598 cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
599 #if defined(__sparc__) || defined(__sparc64__)
600 pdq->pdq_cbp = (pdq_consumer_block_t*)((unsigned long int)pdq->pdq_dbp +
601 (unsigned long int)offsetof(pdq_descriptor_block_t,pdqdb_consumer));
602 #else
603 not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
604 sizeof(*pdq->pdq_cbp),
605 (void **)&pdq->pdq_cbp,
606 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
607 #endif
609 if (!not_ok) {
610 steps = 9;
611 not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
612 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
614 if (!not_ok) {
615 steps = 10;
616 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
617 pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
618 NULL, BUS_DMA_NOWAIT);
620 if (!not_ok) {
621 pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
622 return not_ok;
625 switch (steps) {
626 case 11: {
627 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
628 /* FALL THROUGH */
630 case 10: {
631 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
632 /* FALL THROUGH */
634 case 9: {
635 bus_dmamem_unmap(sc->sc_dmatag,
636 (void *)pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
637 /* FALL THROUGH */
639 case 8: {
640 bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
641 /* FALL THROUGH */
643 case 7: {
644 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
645 /* FALL THROUGH */
647 case 6: {
648 bus_dmamem_unmap(sc->sc_dmatag,
649 (void *) pdq->pdq_unsolicited_info.ui_events,
650 PDQ_OS_PAGESIZE);
651 /* FALL THROUGH */
653 case 5: {
654 bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
655 /* FALL THROUGH */
657 case 4: {
658 bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
659 /* FALL THROUGH */
661 case 3: {
662 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
663 /* FALL THROUGH */
665 case 2: {
666 bus_dmamem_unmap(sc->sc_dmatag,
667 (void *) pdq->pdq_dbp,
668 sizeof(*pdq->pdq_dbp));
669 /* FALL THROUGH */
671 case 1: {
672 bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
673 /* FALL THROUGH */
677 return not_ok;
680 extern void
681 pdq_os_descriptor_block_sync(
682 pdq_os_ctx_t *sc,
683 size_t offset,
684 size_t length,
685 int ops)
687 bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
690 extern void
691 pdq_os_consumer_block_sync(
692 pdq_os_ctx_t *sc,
693 int ops)
695 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
698 extern void
699 pdq_os_unsolicited_event_sync(
700 pdq_os_ctx_t *sc,
701 size_t offset,
702 size_t length,
703 int ops)
705 bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
708 extern void
709 pdq_os_databuf_sync(
710 pdq_os_ctx_t *sc,
711 struct mbuf *m,
712 size_t offset,
713 size_t length,
714 int ops)
716 bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
719 extern void
720 pdq_os_databuf_free(
721 pdq_os_ctx_t *sc,
722 struct mbuf *m)
724 if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
725 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
726 bus_dmamap_unload(sc->sc_dmatag, map);
727 bus_dmamap_destroy(sc->sc_dmatag, map);
728 m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
730 m_freem(m);
733 extern struct mbuf *
734 pdq_os_databuf_alloc(
735 pdq_os_ctx_t *sc)
737 struct mbuf *m;
738 bus_dmamap_t map;
740 MGETHDR(m, M_DONTWAIT, MT_DATA);
741 if (m == NULL) {
742 aprint_error_dev(&sc->sc_dev, "can't alloc small buf\n");
743 return NULL;
745 MCLGET(m, M_DONTWAIT);
746 if ((m->m_flags & M_EXT) == 0) {
747 aprint_error_dev(&sc->sc_dev, "can't alloc cluster\n");
748 m_free(m);
749 return NULL;
751 MCLAIM(m, &PDQ_FDDICOM(sc)->ec_rx_mowner);
752 m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
754 if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
755 1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
756 aprint_error_dev(&sc->sc_dev, "can't create dmamap\n");
757 m_free(m);
758 return NULL;
760 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
761 BUS_DMA_READ|BUS_DMA_NOWAIT)) {
762 aprint_error_dev(&sc->sc_dev, "can't load dmamap\n");
763 bus_dmamap_destroy(sc->sc_dmatag, map);
764 m_free(m);
765 return NULL;
767 m->m_flags |= M_HASRXDMAMAP;
768 M_SETCTX(m, map);
769 return m;
771 #endif