1 /* $NetBSD: if_dmc.c,v 1.19 2009/05/12 13:19:12 cegger Exp $ */
3 * Copyright (c) 1982, 1986 Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * @(#)if_dmc.c 7.10 (Berkeley) 12/16/90
34 * DMC11 device driver, internet version
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.19 2009/05/12 13:19:12 cegger Exp $");
46 #undef DMCDEBUG /* for base table dump on fatal error */
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/ioctl.h>
54 #include <sys/socket.h>
55 #include <sys/syslog.h>
56 #include <sys/device.h>
59 #include <net/netisr.h>
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
68 #include <dev/qbus/ubareg.h>
69 #include <dev/qbus/ubavar.h>
70 #include <dev/qbus/if_uba.h>
72 #include <dev/qbus/if_dmcreg.h>
76 * output timeout value, sec.; should depend on line speed.
78 static int dmc_timeout
= 20;
82 #define NCMDS (NRCV+NXMT+4) /* size of command queue */
84 #define DMC_WBYTE(csr, val) \
85 bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
86 #define DMC_WWORD(csr, val) \
87 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
88 #define DMC_RBYTE(csr) \
89 bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
90 #define DMC_RWORD(csr) \
91 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
95 #define printd if(dmcdebug)printf
99 /* error reporting intervals */
100 #define DMC_RPNBFS 50
106 char qp_cmd
; /* command */
107 short qp_ubaddr
; /* buffer address */
108 short qp_cc
; /* character count || XMEM */
109 struct dmc_command
*qp_next
; /* next command on queue */
113 int ubinfo
; /* from uballoc */
114 short cc
; /* buffer size */
115 short flags
; /* access control */
117 #define DBUF_OURS 0 /* buffer is available */
118 #define DBUF_DMCS 1 /* buffer claimed by somebody */
119 #define DBUF_XMIT 4 /* transmit buffer */
120 #define DBUF_RCV 8 /* receive buffer */
124 * DMC software status per interface.
126 * Each interface is referenced by a network interface structure,
127 * sc_if, which the routing code uses to locate the interface.
128 * This structure contains the output queue for the interface, its address, ...
129 * We also have, for each interface, a set of 7 UBA interface structures
131 * contain information about the UNIBUS resources held by the interface:
132 * map registers, buffered data paths, etc. Information is cached in this
133 * structure for use by the if_uba.c routines in running the interface
137 struct device sc_dev
; /* Configuration common part */
138 struct ifnet sc_if
; /* network-visible interface */
139 short sc_oused
; /* output buffers currently in use */
140 short sc_iused
; /* input buffers given to DMC */
141 short sc_flag
; /* flags */
142 struct ubinfo sc_ui
; /* UBA mapping info for base table */
143 int sc_errors
[4]; /* non-fatal error counters */
144 bus_space_tag_t sc_iot
;
146 bus_dma_tag_t sc_dmat
;
147 struct evcnt sc_rintrcnt
; /* Interrupt counting */
148 struct evcnt sc_tintrcnt
; /* Interrupt counting */
149 #define sc_datck sc_errors[0]
150 #define sc_timeo sc_errors[1]
151 #define sc_nobuf sc_errors[2]
152 #define sc_disc sc_errors[3]
153 struct dmcbufs sc_rbufs
[NRCV
]; /* receive buffer info */
154 struct dmcbufs sc_xbufs
[NXMT
]; /* transmit buffer info */
155 struct ifubinfo sc_ifuba
; /* UNIBUS resources */
156 struct ifrw sc_ifr
[NRCV
]; /* UNIBUS receive buffer maps */
157 struct ifxmt sc_ifw
[NXMT
]; /* UNIBUS receive buffer maps */
158 /* command queue stuff */
159 struct dmc_command sc_cmdbuf
[NCMDS
];
160 struct dmc_command
*sc_qhead
; /* head of command queue */
161 struct dmc_command
*sc_qtail
; /* tail of command queue */
162 struct dmc_command
*sc_qactive
; /* command in progress */
163 struct dmc_command
*sc_qfreeh
; /* head of list of free cmd buffers */
164 struct dmc_command
*sc_qfreet
; /* tail of list of free cmd buffers */
165 /* end command queue stuff */
167 short d_base
[128]; /* DMC base table */
171 static int dmcmatch(device_t
, cfdata_t
, void *);
172 static void dmcattach(device_t
, device_t
, void *);
173 static int dmcinit(struct ifnet
*);
174 static void dmcrint(void *);
175 static void dmcxint(void *);
176 static void dmcdown(struct dmc_softc
*sc
);
177 static void dmcrestart(struct dmc_softc
*);
178 static void dmcload(struct dmc_softc
*, int, u_short
, u_short
);
179 static void dmcstart(struct ifnet
*);
180 static void dmctimeout(struct ifnet
*);
181 static int dmcioctl(struct ifnet
*, u_long
, void *);
182 static int dmcoutput(struct ifnet
*, struct mbuf
*, struct sockaddr
*,
184 static void dmcreset(device_t
);
186 CFATTACH_DECL(dmc
, sizeof(struct dmc_softc
),
187 dmcmatch
, dmcattach
, NULL
, NULL
);
190 #define DMC_RUNNING 0x01 /* device initialized */
191 #define DMC_BMAPPED 0x02 /* base table mapped */
192 #define DMC_RESTART 0x04 /* software restart in progress */
193 #define DMC_ONLINE 0x08 /* device running (had a RDYO) */
196 /* queue manipulation macros */
197 #define QUEUE_AT_HEAD(qp, head, tail) \
198 (qp)->qp_next = (head); \
200 if ((tail) == (struct dmc_command *) 0) \
203 #define QUEUE_AT_TAIL(qp, head, tail) \
205 (tail)->qp_next = (qp); \
208 (qp)->qp_next = (struct dmc_command *) 0; \
211 #define DEQUEUE(head, tail) \
212 (head) = (head)->qp_next;\
213 if ((head) == (struct dmc_command *) 0)\
217 dmcmatch(device_t parent
, cfdata_t cf
, void *aux
)
219 struct uba_attach_args
*ua
= aux
;
220 struct dmc_softc ssc
;
221 struct dmc_softc
*sc
= &ssc
;
224 sc
->sc_iot
= ua
->ua_iot
;
225 sc
->sc_ioh
= ua
->ua_ioh
;
227 DMC_WBYTE(DMC_BSEL1
, DMC_MCLR
);
228 for (i
= 100000; i
&& (DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0; i
--)
230 if ((DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0) {
231 printf("dmcprobe: can't start device\n" );
234 DMC_WBYTE(DMC_BSEL0
, DMC_RQI
|DMC_IEI
);
235 /* let's be paranoid */
236 DMC_WBYTE(DMC_BSEL0
, DMC_RBYTE(DMC_BSEL0
) | DMC_RQI
|DMC_IEI
);
238 DMC_WBYTE(DMC_BSEL1
, DMC_MCLR
);
239 for (i
= 100000; i
&& (DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0; i
--)
245 * Interface exists: make available by filling in network interface
246 * record. System will initialize the interface when it is ready
250 dmcattach(device_t parent
, device_t self
, void *aux
)
252 struct uba_attach_args
*ua
= aux
;
253 struct dmc_softc
*sc
= device_private(self
);
255 sc
->sc_iot
= ua
->ua_iot
;
256 sc
->sc_ioh
= ua
->ua_ioh
;
257 sc
->sc_dmat
= ua
->ua_dmat
;
259 strlcpy(sc
->sc_if
.if_xname
, device_xname(&sc
->sc_dev
), IFNAMSIZ
);
260 sc
->sc_if
.if_mtu
= DMCMTU
;
261 sc
->sc_if
.if_init
= dmcinit
;
262 sc
->sc_if
.if_output
= dmcoutput
;
263 sc
->sc_if
.if_ioctl
= dmcioctl
;
264 sc
->sc_if
.if_watchdog
= dmctimeout
;
265 sc
->sc_if
.if_flags
= IFF_POINTOPOINT
;
266 sc
->sc_if
.if_softc
= sc
;
267 IFQ_SET_READY(&sc
->sc_if
.if_snd
);
269 uba_intr_establish(ua
->ua_icookie
, ua
->ua_cvec
, dmcrint
, sc
,
271 uba_intr_establish(ua
->ua_icookie
, ua
->ua_cvec
+4, dmcxint
, sc
,
273 uba_reset_establish(dmcreset
, &sc
->sc_dev
);
274 evcnt_attach_dynamic(&sc
->sc_rintrcnt
, EVCNT_TYPE_INTR
, ua
->ua_evcnt
,
275 device_xname(&sc
->sc_dev
), "intr");
276 evcnt_attach_dynamic(&sc
->sc_tintrcnt
, EVCNT_TYPE_INTR
, ua
->ua_evcnt
,
277 device_xname(&sc
->sc_dev
), "intr");
279 if_attach(&sc
->sc_if
);
283 * Reset of interface after UNIBUS reset.
284 * If interface is on specified UBA, reset its state.
287 dmcreset(device_t dev
)
289 struct dmc_softc
*sc
= (struct dmc_softc
*)dev
;
292 sc
->sc_if
.if_flags
&= ~IFF_RUNNING
;
297 * Initialization of interface; reinitialize UNIBUS usage.
300 dmcinit(struct ifnet
*ifp
)
302 struct dmc_softc
*sc
= ifp
->if_softc
;
306 struct dmc_command
*qp
;
308 cfdata_t ui
= device_cfdata(&sc
->sc_dev
);
313 * Check to see that an address has been set
314 * (both local and destination for an address family).
316 IFADDR_FOREACH(ifa
, ifp
)
317 if (ifa
->ifa_addr
->sa_family
&& ifa
->ifa_dstaddr
->sa_family
)
319 if (ifa
== (struct ifaddr
*) 0)
322 if ((DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0) {
323 printf("dmcinit: DMC not running\n");
324 ifp
->if_flags
&= ~IFF_UP
;
328 if ((sc
->sc_flag
& DMC_BMAPPED
) == 0) {
329 sc
->sc_ui
.ui_size
= sizeof(struct dmc_base
);
330 sc
->sc_ui
.ui_vaddr
= (void *)&sc
->dmc_base
;
331 uballoc((void *)device_parent(&sc
->sc_dev
), &sc
->sc_ui
, 0);
332 sc
->sc_flag
|= DMC_BMAPPED
;
334 /* initialize UNIBUS resources */
335 sc
->sc_iused
= sc
->sc_oused
= 0;
336 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
337 if (if_ubaminit(&sc
->sc_ifuba
,
338 (void *)device_parent(&sc
->sc_dev
),
339 sizeof(struct dmc_header
) + DMCMTU
,
340 sc
->sc_ifr
, NRCV
, sc
->sc_ifw
, NXMT
) == 0) {
341 aprint_error_dev(&sc
->sc_dev
, "can't allocate uba resources\n");
342 ifp
->if_flags
&= ~IFF_UP
;
345 ifp
->if_flags
|= IFF_RUNNING
;
347 sc
->sc_flag
&= ~DMC_ONLINE
;
348 sc
->sc_flag
|= DMC_RUNNING
;
350 * Limit packets enqueued until we see if we're on the air.
352 ifp
->if_snd
.ifq_maxlen
= 3;
354 /* initialize buffer pool */
356 ifrw
= &sc
->sc_ifr
[0];
357 for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
358 rp
->ubinfo
= ifrw
->ifrw_info
;
359 rp
->cc
= DMCMTU
+ sizeof (struct dmc_header
);
360 rp
->flags
= DBUF_OURS
|DBUF_RCV
;
364 ifxp
= &sc
->sc_ifw
[0];
365 for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
366 rp
->ubinfo
= ifxp
->ifw_info
;
368 rp
->flags
= DBUF_OURS
|DBUF_XMIT
;
372 /* set up command queues */
373 sc
->sc_qfreeh
= sc
->sc_qfreet
374 = sc
->sc_qhead
= sc
->sc_qtail
= sc
->sc_qactive
=
375 (struct dmc_command
*)0;
376 /* set up free command buffer list */
377 for (qp
= &sc
->sc_cmdbuf
[0]; qp
< &sc
->sc_cmdbuf
[NCMDS
]; qp
++) {
378 QUEUE_AT_HEAD(qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
382 base
= sc
->sc_ui
.ui_baddr
;
383 dmcload(sc
, DMC_BASEI
, (u_short
)base
, (base
>>2) & DMC_XMEM
);
384 /* specify half duplex operation, flags tell if primary */
385 /* or secondary station */
386 if (ui
->cf_flags
== 0)
387 /* use DDCMP mode in full duplex */
388 dmcload(sc
, DMC_CNTLI
, 0, 0);
389 else if (ui
->cf_flags
== 1)
390 /* use MAINTENENCE mode */
391 dmcload(sc
, DMC_CNTLI
, 0, DMC_MAINT
);
392 else if (ui
->cf_flags
== 2)
393 /* use DDCMP half duplex as primary station */
394 dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
);
395 else if (ui
->cf_flags
== 3)
396 /* use DDCMP half duplex as secondary station */
397 dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
| DMC_SEC
);
399 /* enable operation done interrupts */
400 while ((DMC_RBYTE(DMC_BSEL2
) & DMC_IEO
) == 0)
401 DMC_WBYTE(DMC_BSEL2
, DMC_RBYTE(DMC_BSEL2
) | DMC_IEO
);
403 /* queue first NRCV buffers for DMC to fill */
404 for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
405 rp
->flags
|= DBUF_DMCS
;
406 dmcload(sc
, DMC_READ
, rp
->ubinfo
,
407 (((rp
->ubinfo
>>2)&DMC_XMEM
) | rp
->cc
));
415 * Start output on interface. Get another datagram
416 * to send from the interface queue and map it to
417 * the interface before starting output.
419 * Must be called at spl 5
422 dmcstart(struct ifnet
*ifp
)
424 struct dmc_softc
*sc
= ifp
->if_softc
;
430 * Dequeue up to NXMT requests and map them to the UNIBUS.
431 * If no more requests, or no dmc buffers available, just return.
434 for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++ ) {
435 /* find an available buffer */
436 if ((rp
->flags
& DBUF_DMCS
) == 0) {
437 IFQ_DEQUEUE(&sc
->sc_if
.if_snd
, m
);
441 rp
->flags
|= (DBUF_DMCS
);
443 * Have request mapped to UNIBUS for transmission
444 * and start the output.
446 rp
->cc
= if_ubaput(&sc
->sc_ifuba
, &sc
->sc_ifw
[n
], m
);
447 rp
->cc
&= DMC_CCOUNT
;
448 if (++sc
->sc_oused
== 1)
449 sc
->sc_if
.if_timer
= dmc_timeout
;
450 dmcload(sc
, DMC_WRITE
, rp
->ubinfo
,
451 rp
->cc
| ((rp
->ubinfo
>>2)&DMC_XMEM
));
458 * Utility routine to load the DMC device registers.
461 dmcload(struct dmc_softc
*sc
, int type
, u_short w0
, u_short w1
)
463 struct dmc_command
*qp
;
468 /* grab a command buffer from the free list */
469 if ((qp
= sc
->sc_qfreeh
) == (struct dmc_command
*)0)
470 panic("dmc command queue overflow");
471 DEQUEUE(sc
->sc_qfreeh
, sc
->sc_qfreet
);
473 /* fill in requested info */
474 qp
->qp_cmd
= (type
| DMC_RQI
);
478 if (sc
->sc_qactive
) { /* command in progress */
479 if (type
== DMC_READ
) {
480 QUEUE_AT_HEAD(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
482 QUEUE_AT_TAIL(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
484 } else { /* command port free */
486 DMC_WBYTE(DMC_BSEL0
, qp
->qp_cmd
);
493 * DMC interface receiver interrupt.
494 * Ready to accept another command,
495 * pull one off the command queue.
500 struct dmc_softc
*sc
= arg
;
501 struct dmc_command
*qp
;
504 if ((qp
= sc
->sc_qactive
) == (struct dmc_command
*) 0) {
505 printf("%s: dmcrint no command\n", device_xname(&sc
->sc_dev
));
508 while (DMC_RBYTE(DMC_BSEL0
) & DMC_RDYI
) {
509 DMC_WWORD(DMC_SEL4
, qp
->qp_ubaddr
);
510 DMC_WWORD(DMC_SEL6
, qp
->qp_cc
);
511 DMC_WBYTE(DMC_BSEL0
, DMC_RBYTE(DMC_BSEL0
) & ~(DMC_IEI
|DMC_RQI
));
512 /* free command buffer */
513 QUEUE_AT_HEAD(qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
514 while (DMC_RBYTE(DMC_BSEL0
) & DMC_RDYI
) {
516 * Can't check for RDYO here 'cause
517 * this routine isn't reentrant!
521 /* move on to next command */
522 if ((sc
->sc_qactive
= sc
->sc_qhead
) == (struct dmc_command
*)0)
523 break; /* all done */
524 /* more commands to do, start the next one */
526 DEQUEUE(sc
->sc_qhead
, sc
->sc_qtail
);
527 DMC_WBYTE(DMC_BSEL0
, qp
->qp_cmd
);
530 if ((DMC_RBYTE(DMC_BSEL0
) & DMC_RDYI
) ||
531 (DMC_RBYTE(DMC_BSEL2
) & DMC_RDYO
))
534 if (sc
->sc_qactive
) {
535 DMC_WBYTE(DMC_BSEL0
, DMC_RBYTE(DMC_BSEL0
) & (DMC_IEI
|DMC_RQI
));
536 /* VMS does it twice !*$%@# */
537 DMC_WBYTE(DMC_BSEL0
, DMC_RBYTE(DMC_BSEL0
) & (DMC_IEI
|DMC_RQI
));
543 * DMC interface transmitter interrupt.
544 * A transfer may have completed, check for errors.
545 * If it was a read, notify appropriate protocol.
546 * If it was a write, pull the next one off the queue.
551 struct dmc_softc
*sc
= a
;
556 int arg
, pkaddr
, cmd
, len
, s
;
560 struct dmc_header
*dh
;
565 while (DMC_RBYTE(DMC_BSEL2
) & DMC_RDYO
) {
567 cmd
= DMC_RBYTE(DMC_BSEL2
) & 0xff;
568 arg
= DMC_RWORD(DMC_SEL6
) & 0xffff;
569 /* reconstruct UNIBUS address of buffer returned to us */
570 pkaddr
= ((arg
&DMC_XMEM
)<<2) | (DMC_RWORD(DMC_SEL4
) & 0xffff);
572 DMC_WBYTE(DMC_BSEL2
, DMC_RBYTE(DMC_BSEL2
) & ~DMC_RDYO
);
577 * A read has completed.
578 * Pass packet to type specific
579 * higher-level input routine.
582 /* find location in dmcuba struct */
583 ifrw
= &sc
->sc_ifr
[0];
584 for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
585 if(rp
->ubinfo
== pkaddr
)
589 if (rp
>= &sc
->sc_rbufs
[NRCV
])
591 if ((rp
->flags
& DBUF_DMCS
) == 0)
592 aprint_error_dev(&sc
->sc_dev
, "done unalloc rbuf\n");
594 len
= (arg
& DMC_CCOUNT
) - sizeof (struct dmc_header
);
595 if (len
< 0 || len
> DMCMTU
) {
598 printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
599 device_xname(&sc
->sc_dev
), pkaddr
, len
);
604 * Deal with trailer protocol: if type is trailer
605 * get true type from first 16-bit word past data.
606 * Remember that type was trailer by setting off.
608 dh
= (struct dmc_header
*)ifrw
->ifrw_addr
;
609 dh
->dmc_type
= ntohs((u_short
)dh
->dmc_type
);
614 * Pull packet off interface. Off is nonzero if
615 * packet has trailing header; dmc_get will then
616 * force this header information to be at the front,
617 * but we still have to drop the type and length
618 * which are at the front of any trailer data.
620 m
= if_ubaget(&sc
->sc_ifuba
, ifrw
, ifp
, len
);
623 /* Shave off dmc_header */
624 m_adj(m
, sizeof(struct dmc_header
));
625 switch (dh
->dmc_type
) {
629 schednetisr(NETISR_IP
);
647 /* is this needed? */
648 rp
->ubinfo
= ifrw
->ifrw_info
;
650 dmcload(sc
, DMC_READ
, rp
->ubinfo
,
651 ((rp
->ubinfo
>> 2) & DMC_XMEM
) | rp
->cc
);
656 * A write has completed, start another
657 * transfer if there is more data to send.
660 /* find associated dmcbuf structure */
661 ifxp
= &sc
->sc_ifw
[0];
662 for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
663 if(rp
->ubinfo
== pkaddr
)
667 if (rp
>= &sc
->sc_xbufs
[NXMT
]) {
668 aprint_error_dev(&sc
->sc_dev
, "bad packet address 0x%x\n",
672 if ((rp
->flags
& DBUF_DMCS
) == 0)
673 aprint_error_dev(&sc
->sc_dev
, "unallocated packet 0x%x\n",
675 /* mark buffer free */
676 if_ubaend(&sc
->sc_ifuba
, ifxp
);
677 rp
->flags
&= ~DBUF_DMCS
;
678 if (--sc
->sc_oused
== 0)
679 sc
->sc_if
.if_timer
= 0;
681 sc
->sc_if
.if_timer
= dmc_timeout
;
682 if ((sc
->sc_flag
& DMC_ONLINE
) == 0) {
683 extern int ifqmaxlen
;
687 * Open the queue to the usual value.
689 sc
->sc_flag
|= DMC_ONLINE
;
690 ifp
->if_snd
.ifq_maxlen
= ifqmaxlen
;
696 if (arg
& DMC_FATAL
) {
697 if (arg
!= DMC_START
) {
698 snprintb(buf
, sizeof(buf
), CNTLO_BITS
,
701 "%s: fatal error, flags=%s\n",
702 device_xname(&sc
->sc_dev
), buf
);
707 /* ACCUMULATE STATISTICS */
711 if ((sc
->sc_nobuf
++ % DMC_RPNBFS
) == 0)
715 if ((sc
->sc_disc
++ % DMC_RPDSC
) == 0)
719 if ((sc
->sc_timeo
++ % DMC_RPTMO
) == 0)
724 if ((sc
->sc_datck
++ % DMC_RPDCK
) == 0)
733 snprintb(buf
, sizeof(buf
), CNTLO_BITS
, arg
);
734 printd("%s: soft error, flags=%s\n",
735 device_xname(&sc
->sc_dev
), buf
);
737 if ((sc
->sc_flag
& DMC_RESTART
) == 0) {
739 * kill off the dmc to get things
740 * going again by generating a
743 sc
->sc_flag
|= DMC_RESTART
;
744 arg
= sc
->sc_ui
.ui_baddr
;
745 dmcload(sc
, DMC_BASEI
, arg
, (arg
>>2)&DMC_XMEM
);
750 printf("%s: bad control %o\n",
751 device_xname(&sc
->sc_dev
), cmd
);
759 * DMC output routine.
760 * Encapsulate a packet of type family for the dmc.
761 * Use trailer local net encapsulation if enough data in first
762 * packet leaves a multiple of 512 bytes of data in remainder.
765 dmcoutput(struct ifnet
*ifp
, struct mbuf
*m0
, struct sockaddr
*dst
,
770 struct dmc_header
*dh
;
771 ALTQ_DECL(struct altq_pktattr pktattr
;)
773 if ((ifp
->if_flags
& IFF_UP
) == 0) {
778 IFQ_CLASSIFY(&ifp
->if_snd
, m
, dst
->sa_family
, &pktattr
);
780 switch (dst
->sa_family
) {
788 dh
= (struct dmc_header
*)dst
->sa_data
;
793 printf("%s: can't handle af%d\n", ifp
->if_xname
,
795 error
= EAFNOSUPPORT
;
800 * Add local network header
801 * (there is space for a uba on a vax to step on)
803 M_PREPEND(m
, sizeof(struct dmc_header
), M_DONTWAIT
);
808 dh
= mtod(m
, struct dmc_header
*);
809 dh
->dmc_type
= htons((u_short
)type
);
812 * Queue message on interface, and start output if interface
816 IFQ_ENQUEUE(&ifp
->if_snd
, m
, &pktattr
, error
);
818 /* mbuf is already freed */
833 * Process an ioctl request.
837 dmcioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
839 int s
= splnet(), error
= 0;
840 register struct dmc_softc
*sc
= ifp
->if_softc
;
845 ifp
->if_flags
|= IFF_UP
;
846 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
851 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
856 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
858 if ((ifp
->if_flags
& IFF_UP
) == 0 &&
859 sc
->sc_flag
& DMC_RUNNING
)
861 else if (ifp
->if_flags
& IFF_UP
&&
862 (sc
->sc_flag
& DMC_RUNNING
) == 0)
867 error
= ifioctl_common(ifp
, cmd
, data
);
874 * Restart after a fatal error.
875 * Clear device and reinitialize.
878 dmcrestart(struct dmc_softc
*sc
)
883 /* dump base table */
884 printf("%s base table:\n", device_xname(&sc
->sc_dev
));
885 for (i
= 0; i
< sizeof (struct dmc_base
); i
++)
886 printf("%o\n" ,dmc_base
[unit
].d_base
[i
]);
892 * Let the DMR finish the MCLR. At 1 Mbit, it should do so
893 * in about a max of 6.4 milliseconds with diagnostics enabled.
895 for (i
= 100000; i
&& (DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0; i
--)
897 /* Did the timer expire or did the DMR finish? */
898 if ((DMC_RBYTE(DMC_BSEL1
) & DMC_RUN
) == 0) {
899 log(LOG_ERR
, "%s: M820 Test Failed\n", device_xname(&sc
->sc_dev
));
905 sc
->sc_flag
&= ~DMC_RESTART
;
907 dmcstart(&sc
->sc_if
);
909 sc
->sc_if
.if_collisions
++; /* why not? */
913 * Reset a device and mark down.
914 * Flush output queue and drop queue limit.
917 dmcdown(struct dmc_softc
*sc
)
921 DMC_WBYTE(DMC_BSEL1
, DMC_MCLR
);
922 sc
->sc_flag
&= ~(DMC_RUNNING
| DMC_ONLINE
);
924 for (ifxp
= sc
->sc_ifw
; ifxp
< &sc
->sc_ifw
[NXMT
]; ifxp
++) {
926 if (ifxp
->ifw_xtofree
) {
927 (void) m_freem(ifxp
->ifw_xtofree
);
928 ifxp
->ifw_xtofree
= 0;
932 IF_PURGE(&sc
->sc_if
.if_snd
);
936 * Watchdog timeout to see that transmitted packets don't
937 * lose interrupts. The device has to be online (the first
938 * transmission may block until the other side comes up).
941 dmctimeout(struct ifnet
*ifp
)
943 struct dmc_softc
*sc
= ifp
->if_softc
;
944 char buf1
[64], buf2
[64];
946 if (sc
->sc_flag
& DMC_ONLINE
) {
947 snprintb(buf1
, sizeof(buf1
), DMC0BITS
,
948 DMC_RBYTE(DMC_BSEL0
) & 0xff);
949 snprintb(buf2
, sizeof(buf2
), DMC2BITS
,
950 DMC_RBYTE(DMC_BSEL2
) & 0xff);
951 log(LOG_ERR
, "%s: output timeout, bsel0=%s bsel2=%s\n",
952 device_xname(&sc
->sc_dev
), buf1
, buf2
);