1 /* $NetBSD: xd.c,v 1.85 2009/05/12 13:22:28 cegger Exp $ */
5 * Copyright (c) 1995 Charles D. Cranor
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r
38 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
40 * references: [1] Xylogics Model 753 User's Manual
41 * part number: 166-753-001, Revision B, May 21, 1988.
42 * "Your Partner For Performance"
43 * [2] other NetBSD disk device drivers
45 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
46 * the time to answer some of my questions about the 753/7053.
48 * note: the 753 and the 7053 are programmed the same way, but are
49 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U
50 * VME card (found in many VME based suns).
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.85 2009/05/12 13:22:28 cegger Exp $");
56 #undef XDC_DEBUG /* full debug */
57 #define XDC_DIAG /* extra sanity checks */
58 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
59 #define XDC_DIAG /* link in with master DIAG option */
62 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
68 #include <sys/ioctl.h>
72 #include <sys/malloc.h>
73 #include <sys/device.h>
74 #include <sys/disklabel.h>
76 #include <sys/syslog.h>
77 #include <sys/dkbad.h>
79 #include <sys/kauth.h>
84 #if defined(__sparc__) || defined(sun3)
85 #include <dev/sun/disklabel.h>
88 #include <dev/vme/vmereg.h>
89 #include <dev/vme/vmevar.h>
91 #include <dev/vme/xdreg.h>
92 #include <dev/vme/xdvar.h>
93 #include <dev/vme/xio.h>
102 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
104 #define XDC_TWAIT(SC, N) { \
105 (SC)->waitq[(SC)->waitend] = (N); \
106 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \
111 * XDC_HWAIT: add iorq "N" to head of SC's wait queue
113 #define XDC_HWAIT(SC, N) { \
114 (SC)->waithead = ((SC)->waithead == 0) ? \
115 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \
116 (SC)->waitq[(SC)->waithead] = (N); \
121 * XDC_GET_WAITER: gets the first request waiting on the waitq
122 * and removes it (so it can be submitted)
124 #define XDC_GET_WAITER(XDCSC, RQ) { \
125 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \
126 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \
131 * XDC_FREE: add iorq "N" to SC's free list
133 #define XDC_FREE(SC, N) { \
134 (SC)->freereq[(SC)->nfree++] = (N); \
135 (SC)->reqs[N].mode = 0; \
136 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \
141 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
143 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
146 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
148 #define XDC_GO(XDC, ADDR) { \
149 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \
150 (ADDR) = ((ADDR) >> 8); \
151 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \
152 (ADDR) = ((ADDR) >> 8); \
153 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \
154 (ADDR) = ((ADDR) >> 8); \
155 (XDC)->xdc_iopbaddr3 = (ADDR); \
156 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \
157 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \
161 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
162 * LCV is a counter. If it goes to zero then we timed out.
164 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \
166 while ((LCV) > 0) { \
167 if ((XDC)->xdc_csr & (BITS)) break; \
174 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
176 #define XDC_DONE(SC,RQ,ER) { \
177 if ((RQ) == XD_ERR_FAIL) { \
180 if ((SC)->ndone-- == XDC_SUBWAITLIM) \
181 wakeup(&(SC)->ndone); \
182 (ER) = (SC)->reqs[RQ].errnum; \
183 XDC_FREE((SC), (RQ)); \
188 * XDC_ADVANCE: advance iorq's pointers by a number of sectors
190 #define XDC_ADVANCE(IORQ, N) { \
192 (IORQ)->sectcnt -= (N); \
193 (IORQ)->blockno += (N); \
194 (IORQ)->dbuf += ((N)*XDFM_BPS); \
199 * note - addresses you can sleep on:
200 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
201 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
202 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
203 * to drop below XDC_SUBWAITLIM)
204 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
209 * function prototypes
210 * "xdc_*" functions are internal, all others are external interfaces
213 extern int pil_to_vme
[]; /* from obio.c */
216 int xdc_cmd(struct xdc_softc
*, int, int, int, int, int, char *, int);
217 const char *xdc_e2str(int);
218 int xdc_error(struct xdc_softc
*, struct xd_iorq
*,
219 struct xd_iopb
*, int, int);
220 int xdc_ioctlcmd(struct xd_softc
*, dev_t dev
, struct xd_iocmd
*);
221 void xdc_perror(struct xd_iorq
*, struct xd_iopb
*, int);
222 int xdc_piodriver(struct xdc_softc
*, int, int);
223 int xdc_remove_iorq(struct xdc_softc
*);
224 int xdc_reset(struct xdc_softc
*, int, int, int, struct xd_softc
*);
225 inline void xdc_rqinit(struct xd_iorq
*, struct xdc_softc
*,
226 struct xd_softc
*, int, u_long
, int,
227 void *, struct buf
*);
228 void xdc_rqtopb(struct xd_iorq
*, struct xd_iopb
*, int, int);
229 void xdc_start(struct xdc_softc
*, int);
230 int xdc_startbuf(struct xdc_softc
*, struct xd_softc
*, struct buf
*);
231 int xdc_submit_iorq(struct xdc_softc
*, int, int);
232 void xdc_tick(void *);
233 void xdc_xdreset(struct xdc_softc
*, struct xd_softc
*);
234 int xd_dmamem_alloc(bus_dma_tag_t
, bus_dmamap_t
, bus_dma_segment_t
*,
235 int *, bus_size_t
, void **, bus_addr_t
*);
236 void xd_dmamem_free(bus_dma_tag_t
, bus_dmamap_t
, bus_dma_segment_t
*,
237 int, bus_size_t
, void *);
240 /* machine interrupt hook */
244 int xdcmatch(device_t
, cfdata_t
, void *);
245 void xdcattach(device_t
, device_t
, void *);
246 int xdmatch(device_t
, cfdata_t
, void *);
247 void xdattach(device_t
, device_t
, void *);
248 static int xdc_probe(void *, bus_space_tag_t
, bus_space_handle_t
);
250 static void xddummystrat(struct buf
*);
251 int xdgetdisklabel(struct xd_softc
*, void *);
253 /* XXX - think about this more.. xd_machdep? */
254 void xdc_md_setup(void);
257 #if defined(__sparc__)
258 #include <sparc/sparc/vaddrs.h>
259 #include <sparc/sparc/cpuvar.h>
260 void xdc_md_setup(void)
262 if (CPU_ISSUN4
&& cpuinfo
.cpu_type
== CPUTYP_4_300
)
263 XDC_DELAY
= XDC_DELAY_4_300
;
265 XDC_DELAY
= XDC_DELAY_SPARC
;
268 void xdc_md_setup(void)
270 XDC_DELAY
= XDC_DELAY_SUN3
;
273 void xdc_md_setup(void)
280 * cfattach's: device driver interface to autoconfig
283 CFATTACH_DECL(xdc
, sizeof(struct xdc_softc
),
284 xdcmatch
, xdcattach
, NULL
, NULL
);
286 CFATTACH_DECL(xd
, sizeof(struct xd_softc
),
287 xdmatch
, xdattach
, NULL
, NULL
);
289 extern struct cfdriver xd_cd
;
291 dev_type_open(xdopen
);
292 dev_type_close(xdclose
);
293 dev_type_read(xdread
);
294 dev_type_write(xdwrite
);
295 dev_type_ioctl(xdioctl
);
296 dev_type_strategy(xdstrategy
);
297 dev_type_dump(xddump
);
298 dev_type_size(xdsize
);
300 const struct bdevsw xd_bdevsw
= {
301 xdopen
, xdclose
, xdstrategy
, xdioctl
, xddump
, xdsize
, D_DISK
304 const struct cdevsw xd_cdevsw
= {
305 xdopen
, xdclose
, xdread
, xdwrite
, xdioctl
,
306 nostop
, notty
, nopoll
, nommap
, nokqfilter
, D_DISK
309 struct xdc_attach_args
{ /* this is the "aux" args to xdattach */
310 int driveno
; /* unit number */
311 int fullmode
; /* submit mode */
312 int booting
; /* are we booting or not? */
319 struct dkdriver xddkdriver
= {xdstrategy
};
322 * start: disk label fix code (XXX)
325 static void *xd_labeldata
;
328 xddummystrat(struct buf
*bp
)
330 if (bp
->b_bcount
!= XDFM_BPS
)
331 panic("xddummystrat");
332 memcpy(bp
->b_data
, xd_labeldata
, XDFM_BPS
);
333 bp
->b_oflags
|= BO_DONE
;
334 bp
->b_cflags
&= ~BC_BUSY
;
338 xdgetdisklabel(struct xd_softc
*xd
, void *b
)
341 #if defined(__sparc__) || defined(sun3)
342 struct sun_disklabel
*sdl
;
345 /* We already have the label data in `b'; setup for dummy strategy */
348 /* Required parameter for readdisklabel() */
349 xd
->sc_dk
.dk_label
->d_secsize
= XDFM_BPS
;
351 err
= readdisklabel(MAKEDISKDEV(0, device_unit(&xd
->sc_dev
), RAW_PART
),
353 xd
->sc_dk
.dk_label
, xd
->sc_dk
.dk_cpulabel
);
355 aprint_error_dev(&xd
->sc_dev
, "%s\n", err
);
359 #if defined(__sparc__) || defined(sun3)
360 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
361 sdl
= (struct sun_disklabel
*)xd
->sc_dk
.dk_cpulabel
->cd_block
;
362 if (sdl
->sl_magic
== SUN_DKMAGIC
) {
363 xd
->pcyl
= sdl
->sl_pcylinders
;
367 printf("%s: WARNING: no `pcyl' in disk label.\n",
368 device_xname(&xd
->sc_dev
));
369 xd
->pcyl
= xd
->sc_dk
.dk_label
->d_ncylinders
+
370 xd
->sc_dk
.dk_label
->d_acylinders
;
371 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
372 device_xname(&xd
->sc_dev
), xd
->pcyl
);
375 xd
->ncyl
= xd
->sc_dk
.dk_label
->d_ncylinders
;
376 xd
->acyl
= xd
->sc_dk
.dk_label
->d_acylinders
;
377 xd
->nhead
= xd
->sc_dk
.dk_label
->d_ntracks
;
378 xd
->nsect
= xd
->sc_dk
.dk_label
->d_nsectors
;
379 xd
->sectpercyl
= xd
->nhead
* xd
->nsect
;
380 xd
->sc_dk
.dk_label
->d_secsize
= XDFM_BPS
; /* not handled by
386 * end: disk label fix code (XXX)
390 * Shorthand for allocating, mapping and loading a DMA buffer
393 xd_dmamem_alloc(bus_dma_tag_t tag
, bus_dmamap_t map
, bus_dma_segment_t
*seg
, int *nsegp
, bus_size_t len
, void * *kvap
, bus_addr_t
*dmap
)
398 if ((error
= bus_dmamem_alloc(tag
, len
, 0, 0,
399 seg
, 1, &nseg
, BUS_DMA_NOWAIT
)) != 0) {
403 if ((error
= bus_dmamem_map(tag
, seg
, nseg
,
405 BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
406 bus_dmamem_free(tag
, seg
, nseg
);
410 if ((error
= bus_dmamap_load(tag
, map
,
412 BUS_DMA_NOWAIT
)) != 0) {
413 bus_dmamem_unmap(tag
, *kvap
, len
);
414 bus_dmamem_free(tag
, seg
, nseg
);
418 *dmap
= map
->dm_segs
[0].ds_addr
;
424 xd_dmamem_free(bus_dma_tag_t tag
, bus_dmamap_t map
, bus_dma_segment_t
*seg
, int nseg
, bus_size_t len
, void * kva
)
427 bus_dmamap_unload(tag
, map
);
428 bus_dmamem_unmap(tag
, kva
, len
);
429 bus_dmamem_free(tag
, seg
, nseg
);
434 * a u t o c o n f i g f u n c t i o n s
438 * xdcmatch: determine if xdc is present or not. we do a
439 * soft reset to detect the xdc.
443 xdc_probe(void *arg
, bus_space_tag_t tag
, bus_space_handle_t handle
)
445 struct xdc
*xdc
= (void *)handle
; /* XXX */
448 xdc
->xdc_csr
= XDC_RESET
;
449 XDC_WAIT(xdc
, del
, XDC_RESETUSEC
, XDC_RESET
);
450 return (del
> 0 ? 0 : EIO
);
453 int xdcmatch(parent
, cf
, aux
)
458 struct vme_attach_args
*va
= aux
;
459 vme_chipset_tag_t ct
= va
->va_vct
;
463 mod
= VME_AM_A16
| VME_AM_MBO
| VME_AM_SUPER
| VME_AM_DATA
;
464 if (vme_space_alloc(ct
, va
->r
[0].offset
, sizeof(struct xdc
), mod
))
467 error
= vme_probe(ct
, va
->r
[0].offset
, sizeof(struct xdc
),
468 mod
, VME_D32
, xdc_probe
, 0);
469 vme_space_free(va
->va_vct
, va
->r
[0].offset
, sizeof(struct xdc
), mod
);
475 * xdcattach: attach controller
478 xdcattach(device_t parent
, device_t self
, void *aux
)
480 struct vme_attach_args
*va
= aux
;
481 vme_chipset_tag_t ct
= va
->va_vct
;
483 bus_space_handle_t bh
;
484 vme_intr_handle_t ih
;
486 struct xdc_softc
*xdc
= device_private(self
);
487 struct xdc_attach_args xa
;
488 int lcv
, rqno
, error
;
489 struct xd_iopb_ctrl
*ctl
;
490 bus_dma_segment_t seg
;
497 /* get addressing and intr level stuff from autoconfig and load it
498 * into our xdc_softc. */
500 xdc
->dmatag
= va
->va_bdt
;
501 mod
= VME_AM_A16
| VME_AM_MBO
| VME_AM_SUPER
| VME_AM_DATA
;
503 if (vme_space_alloc(ct
, va
->r
[0].offset
, sizeof(struct xdc
), mod
))
504 panic("xdc: vme alloc");
506 if (vme_space_map(ct
, va
->r
[0].offset
, sizeof(struct xdc
),
507 mod
, VME_D32
, 0, &bt
, &bh
, &resc
) != 0)
508 panic("xdc: vme_map");
510 xdc
->xdc
= (struct xdc
*) bh
; /* XXX */
511 xdc
->ipl
= va
->ilevel
;
512 xdc
->vector
= va
->ivector
;
514 for (lcv
= 0; lcv
< XDC_MAXDEV
; lcv
++)
515 xdc
->sc_drives
[lcv
] = (struct xd_softc
*) 0;
518 * allocate and zero buffers
520 * note: we simplify the code by allocating the max number of iopbs and
521 * iorq's up front. thus, we avoid linked lists and the costs
522 * associated with them in exchange for wasting a little memory.
525 /* Get DMA handle for misc. transfers */
526 if ((error
= vme_dmamap_create(
527 ct
, /* VME chip tag */
529 VME_AM_A24
, /* address modifier */
530 VME_D32
, /* data size */
533 MAXPHYS
, /* maxsegsz */
536 &xdc
->auxmap
)) != 0) {
538 aprint_error_dev(&xdc
->sc_dev
, "DMA buffer map create error %d\n",
544 /* Get DMA handle for mapping iorq descriptors */
545 if ((error
= vme_dmamap_create(
546 ct
, /* VME chip tag */
547 XDC_MAXIOPB
* sizeof(struct xd_iopb
),
548 VME_AM_A24
, /* address modifier */
549 VME_D32
, /* data size */
552 XDC_MAXIOPB
* sizeof(struct xd_iopb
),
555 &xdc
->iopmap
)) != 0) {
557 aprint_error_dev(&xdc
->sc_dev
, "DMA buffer map create error %d\n",
562 /* Get DMA buffer for iorq descriptors */
563 if ((error
= xd_dmamem_alloc(xdc
->dmatag
, xdc
->iopmap
, &seg
, &rseg
,
564 XDC_MAXIOPB
* sizeof(struct xd_iopb
),
565 (void **)&xdc
->iopbase
,
567 aprint_error_dev(&xdc
->sc_dev
, "DMA buffer alloc error %d\n",
571 xdc
->dvmaiopb
= (struct xd_iopb
*)(u_long
)BUS_ADDR_PADDR(busaddr
);
573 memset(xdc
->iopbase
, 0, XDC_MAXIOPB
* sizeof(struct xd_iopb
));
575 xdc
->reqs
= (struct xd_iorq
*)
576 malloc(XDC_MAXIOPB
* sizeof(struct xd_iorq
),
577 M_DEVBUF
, M_NOWAIT
|M_ZERO
);
578 if (xdc
->reqs
== NULL
)
581 /* init free list, iorq to iopb pointers, and non-zero fields in the
582 * iopb which never change. */
584 for (lcv
= 0; lcv
< XDC_MAXIOPB
; lcv
++) {
585 xdc
->reqs
[lcv
].iopb
= &xdc
->iopbase
[lcv
];
586 xdc
->reqs
[lcv
].dmaiopb
= &xdc
->dvmaiopb
[lcv
];
587 xdc
->freereq
[lcv
] = lcv
;
588 xdc
->iopbase
[lcv
].fixd
= 1; /* always the same */
589 xdc
->iopbase
[lcv
].naddrmod
= XDC_ADDRMOD
; /* always the same */
590 xdc
->iopbase
[lcv
].intr_vec
= xdc
->vector
; /* always the same */
592 if ((error
= vme_dmamap_create(
593 ct
, /* VME chip tag */
595 VME_AM_A24
, /* address modifier */
596 VME_D32
, /* data size */
599 MAXPHYS
, /* maxsegsz */
602 &xdc
->reqs
[lcv
].dmamap
)) != 0) {
604 aprint_error_dev(&xdc
->sc_dev
, "DMA buffer map create error %d\n",
609 xdc
->nfree
= XDC_MAXIOPB
;
611 xdc
->waithead
= xdc
->waitend
= xdc
->nwait
= 0;
614 /* init queue of waiting bufs */
616 bufq_alloc(&xdc
->sc_wq
, "fcfs", 0);
617 callout_init(&xdc
->sc_tick_ch
, 0);
620 * section 7 of the manual tells us how to init the controller:
621 * - read controller parameters (6/0)
622 * - write controller parameters (5/0)
625 /* read controller parameters and insure we have a 753/7053 */
627 rqno
= xdc_cmd(xdc
, XDCMD_RDP
, XDFUN_CTL
, 0, 0, 0, 0, XD_SUB_POLL
);
628 if (rqno
== XD_ERR_FAIL
) {
629 printf(": couldn't read controller params\n");
630 return; /* shouldn't ever happen */
632 ctl
= (struct xd_iopb_ctrl
*) &xdc
->iopbase
[rqno
];
633 if (ctl
->ctype
!= XDCT_753
) {
634 if (xdc
->reqs
[rqno
].errnum
)
635 printf(": %s: ", xdc_e2str(xdc
->reqs
[rqno
].errnum
));
636 printf(": doesn't identify as a 753/7053\n");
637 XDC_DONE(xdc
, rqno
, error
);
640 printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
641 ctl
->eprom_partno
, ctl
->eprom_lvl
, ctl
->eprom_rev
);
642 XDC_DONE(xdc
, rqno
, error
);
644 /* now write controller parameters (xdc_cmd sets all params for us) */
646 rqno
= xdc_cmd(xdc
, XDCMD_WRP
, XDFUN_CTL
, 0, 0, 0, 0, XD_SUB_POLL
);
647 XDC_DONE(xdc
, rqno
, error
);
649 aprint_error_dev(&xdc
->sc_dev
, "controller config error: %s\n",
654 /* link in interrupt with higher level software */
655 vme_intr_map(ct
, va
->ilevel
, va
->ivector
, &ih
);
656 vme_intr_establish(ct
, ih
, IPL_BIO
, xdcintr
, xdc
);
657 evcnt_attach_dynamic(&xdc
->sc_intrcnt
, EVCNT_TYPE_INTR
, NULL
,
658 device_xname(&xdc
->sc_dev
), "intr");
661 /* now we must look for disks using autoconfig */
662 xa
.fullmode
= XD_SUB_POLL
;
665 for (xa
.driveno
= 0; xa
.driveno
< XDC_MAXDEV
; xa
.driveno
++)
666 (void) config_found(self
, (void *) &xa
, NULL
);
668 /* start the watchdog clock */
669 callout_reset(&xdc
->sc_tick_ch
, XDC_TICKCNT
, xdc_tick
, xdc
);
674 * xdmatch: probe for disk.
676 * note: we almost always say disk is present. this allows us to
677 * spin up and configure a disk after the system is booted (we can
681 xdmatch(device_t parent
, cfdata_t cf
, void *aux
)
683 struct xdc_attach_args
*xa
= aux
;
685 /* looking for autoconf wildcard or exact match */
687 if (cf
->cf_loc
[XDCCF_DRIVE
] != XDCCF_DRIVE_DEFAULT
&&
688 cf
->cf_loc
[XDCCF_DRIVE
] != xa
->driveno
)
696 * xdattach: attach a disk. this can be called from autoconf and also
697 * from xdopen/xdstrategy.
700 xdattach(device_t parent
, device_t self
, void *aux
)
702 struct xd_softc
*xd
= device_private(self
);
703 struct xdc_softc
*xdc
= device_private(parent
);
704 struct xdc_attach_args
*xa
= aux
;
705 int rqno
, spt
= 0, mb
, blk
, lcv
, fmode
, s
= 0, newstate
;
706 struct xd_iopb_drive
*driopb
;
709 bus_dma_segment_t seg
;
715 * Always re-initialize the disk structure. We want statistics
716 * to start with a clean slate.
718 memset(&xd
->sc_dk
, 0, sizeof(xd
->sc_dk
));
720 /* if booting, init the xd_softc */
723 xd
->state
= XD_DRIVE_UNKNOWN
; /* to start */
727 xd
->xd_drive
= xa
->driveno
;
728 fmode
= xa
->fullmode
;
729 xdc
->sc_drives
[xa
->driveno
] = xd
;
731 /* if not booting, make sure we are the only process in the attach for
732 * this drive. if locked out, sleep on it. */
736 while (xd
->state
== XD_DRIVE_ATTACHING
) {
737 if (tsleep(&xd
->state
, PRIBIO
, "xdattach", 0)) {
743 device_xname(&xd
->sc_dev
), device_xname(&xd
->parent
->sc_dev
));
746 /* we now have control */
747 xd
->state
= XD_DRIVE_ATTACHING
;
748 newstate
= XD_DRIVE_UNKNOWN
;
751 if ((error
= xd_dmamem_alloc(xdc
->dmatag
, xdc
->auxmap
, &seg
, &rseg
,
755 aprint_error_dev(&xdc
->sc_dev
, "DMA buffer alloc error %d\n",
759 dmaddr
= (void *)(u_long
)BUS_ADDR_PADDR(busaddr
);
761 /* first try and reset the drive */
763 rqno
= xdc_cmd(xdc
, XDCMD_RST
, 0, xd
->xd_drive
, 0, 0, 0, fmode
);
764 XDC_DONE(xdc
, rqno
, error
);
765 if (error
== XD_ERR_NRDY
) {
766 printf(" drive %d: off-line\n", xa
->driveno
);
770 printf(": ERROR 0x%02x (%s)\n", error
, xdc_e2str(error
));
773 printf(" drive %d: ready\n", xa
->driveno
);
775 /* now set format parameters */
777 rqno
= xdc_cmd(xdc
, XDCMD_WRP
, XDFUN_FMT
, xd
->xd_drive
, 0, 0, 0, fmode
);
778 XDC_DONE(xdc
, rqno
, error
);
780 aprint_error_dev(&xd
->sc_dev
, "write format parameters failed: %s\n",
785 /* get drive parameters */
786 rqno
= xdc_cmd(xdc
, XDCMD_RDP
, XDFUN_DRV
, xd
->xd_drive
, 0, 0, 0, fmode
);
787 if (rqno
!= XD_ERR_FAIL
) {
788 driopb
= (struct xd_iopb_drive
*) &xdc
->iopbase
[rqno
];
789 spt
= driopb
->sectpertrk
;
791 XDC_DONE(xdc
, rqno
, error
);
793 aprint_error_dev(&xd
->sc_dev
, "read drive parameters failed: %s\n",
799 * now set drive parameters (to semi-bogus values) so we can read the
802 xd
->pcyl
= xd
->ncyl
= 1;
807 for (lcv
= 0; lcv
< 126; lcv
++) /* init empty bad144 table */
808 xd
->dkb
.bt_bad
[lcv
].bt_cyl
= xd
->dkb
.bt_bad
[lcv
].bt_trksec
= 0xffff;
809 rqno
= xdc_cmd(xdc
, XDCMD_WRP
, XDFUN_DRV
, xd
->xd_drive
, 0, 0, 0, fmode
);
810 XDC_DONE(xdc
, rqno
, error
);
812 aprint_error_dev(&xd
->sc_dev
, "write drive parameters failed: %s\n",
817 /* read disk label */
818 rqno
= xdc_cmd(xdc
, XDCMD_RD
, 0, xd
->xd_drive
, 0, 1, dmaddr
, fmode
);
819 XDC_DONE(xdc
, rqno
, error
);
821 aprint_error_dev(&xd
->sc_dev
, "reading disk label failed: %s\n",
825 newstate
= XD_DRIVE_NOLABEL
;
828 /* Attach the disk: must be before getdisklabel to malloc label */
829 disk_init(&xd
->sc_dk
, device_xname(&xd
->sc_dev
), &xddkdriver
);
830 disk_attach(&xd
->sc_dk
);
832 if (xdgetdisklabel(xd
, buf
) != XD_ERR_AOK
)
835 /* inform the user of what is up */
836 printf("%s: <%s>, pcyl %d, hw_spt %d\n", device_xname(&xd
->sc_dev
),
838 mb
= xd
->ncyl
* (xd
->nhead
* xd
->nsect
) / (1048576 / XDFM_BPS
);
839 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
840 device_xname(&xd
->sc_dev
), mb
, xd
->ncyl
, xd
->nhead
, xd
->nsect
,
843 /* now set the real drive parameters! */
845 rqno
= xdc_cmd(xdc
, XDCMD_WRP
, XDFUN_DRV
, xd
->xd_drive
, 0, 0, 0, fmode
);
846 XDC_DONE(xdc
, rqno
, error
);
848 aprint_error_dev(&xd
->sc_dev
, "write real drive parameters failed: %s\n",
852 newstate
= XD_DRIVE_ONLINE
;
855 * read bad144 table. this table resides on the first sector of the
856 * last track of the disk (i.e. second cyl of "acyl" area).
859 blk
= (xd
->ncyl
+ xd
->acyl
- 1) * (xd
->nhead
* xd
->nsect
) + /* last cyl */
860 (xd
->nhead
- 1) * xd
->nsect
; /* last head */
861 rqno
= xdc_cmd(xdc
, XDCMD_RD
, 0, xd
->xd_drive
, blk
, 1, dmaddr
, fmode
);
862 XDC_DONE(xdc
, rqno
, error
);
864 aprint_error_dev(&xd
->sc_dev
, "reading bad144 failed: %s\n",
869 /* check dkbad for sanity */
870 dkb
= (struct dkbad
*) buf
;
871 for (lcv
= 0; lcv
< 126; lcv
++) {
872 if ((dkb
->bt_bad
[lcv
].bt_cyl
== 0xffff ||
873 dkb
->bt_bad
[lcv
].bt_cyl
== 0) &&
874 dkb
->bt_bad
[lcv
].bt_trksec
== 0xffff)
875 continue; /* blank */
876 if (dkb
->bt_bad
[lcv
].bt_cyl
>= xd
->ncyl
)
878 if ((dkb
->bt_bad
[lcv
].bt_trksec
>> 8) >= xd
->nhead
)
880 if ((dkb
->bt_bad
[lcv
].bt_trksec
& 0xff) >= xd
->nsect
)
884 aprint_error_dev(&xd
->sc_dev
, "warning: invalid bad144 sector!\n");
886 memcpy(&xd
->dkb
, buf
, XDFM_BPS
);
891 xd_dmamem_free(xdc
->dmatag
, xdc
->auxmap
,
892 &seg
, rseg
, XDFM_BPS
, buf
);
895 xd
->state
= newstate
;
903 * end of autoconfig functions
907 * { b , c } d e v s w f u n c t i o n s
911 * xdclose: close device
914 xdclose(dev_t dev
, int flag
, int fmt
, struct lwp
*l
)
916 struct xd_softc
*xd
= device_lookup_private(&xd_cd
, DISKUNIT(dev
));
917 int part
= DISKPART(dev
);
919 /* clear mask bits */
923 xd
->sc_dk
.dk_copenmask
&= ~(1 << part
);
926 xd
->sc_dk
.dk_bopenmask
&= ~(1 << part
);
929 xd
->sc_dk
.dk_openmask
= xd
->sc_dk
.dk_copenmask
| xd
->sc_dk
.dk_bopenmask
;
935 * xddump: crash dump system
938 xddump(dev_t dev
, daddr_t blkno
, void *va
, size_t size
)
943 unit
= DISKUNIT(dev
);
944 part
= DISKPART(dev
);
946 xd
= device_lookup_private(&xd_cd
, unit
);
950 printf("%s%c: crash dump not supported (yet)\n", device_xname(&xd
->sc_dev
),
955 /* outline: globals: "dumplo" == sector number of partition to start
956 * dump at (convert to physical sector with partition table)
957 * "dumpsize" == size of dump in clicks "physmem" == size of physical
958 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
961 * dump a copy of physical memory to the dump device starting at sector
962 * "dumplo" in the swap partition (make sure > 0). map in pages as
963 * we go. use polled I/O.
965 * XXX how to handle NON_CONTIG? */
969 static enum kauth_device_req
970 xd_getkauthreq(u_char cmd
)
972 enum kauth_device_req req
;
977 req
= KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE
;
981 req
= KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ
;
986 req
= KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF
;
991 req
= KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF
;
1006 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks.
1009 xdioctl(dev
, command
, addr
, flag
, l
)
1017 struct xd_softc
*xd
;
1018 struct xd_iocmd
*xio
;
1020 #ifdef __HAVE_OLD_DISKLABEL
1021 struct disklabel newlabel
;
1023 struct disklabel
*lp
;
1025 unit
= DISKUNIT(dev
);
1027 if ((xd
= device_lookup_private(&xd_cd
, unit
)) == NULL
)
1030 /* switch on ioctl type */
1033 case DIOCSBAD
: /* set bad144 info */
1034 if ((flag
& FWRITE
) == 0)
1037 memcpy(&xd
->dkb
, addr
, sizeof(xd
->dkb
));
1041 case DIOCGDINFO
: /* get disk label */
1042 memcpy(addr
, xd
->sc_dk
.dk_label
, sizeof(struct disklabel
));
1044 #ifdef __HAVE_OLD_DISKLABEL
1046 newlabel
= *(xd
->sc_dk
.dk_label
);
1047 if (newlabel
.d_npartitions
> OLDMAXPARTITIONS
)
1049 memcpy(addr
, &newlabel
, sizeof (struct olddisklabel
));
1053 case DIOCGPART
: /* get partition info */
1054 ((struct partinfo
*) addr
)->disklab
= xd
->sc_dk
.dk_label
;
1055 ((struct partinfo
*) addr
)->part
=
1056 &xd
->sc_dk
.dk_label
->d_partitions
[DISKPART(dev
)];
1059 case DIOCSDINFO
: /* set disk label */
1060 #ifdef __HAVE_OLD_DISKLABEL
1062 if (command
== ODIOCSDINFO
) {
1063 memset(&newlabel
, 0, sizeof newlabel
);
1064 memcpy(&newlabel
, addr
, sizeof (struct olddisklabel
));
1068 lp
= (struct disklabel
*)addr
;
1070 if ((flag
& FWRITE
) == 0)
1072 error
= setdisklabel(xd
->sc_dk
.dk_label
,
1073 lp
, /* xd->sc_dk.dk_openmask : */ 0,
1074 xd
->sc_dk
.dk_cpulabel
);
1076 if (xd
->state
== XD_DRIVE_NOLABEL
)
1077 xd
->state
= XD_DRIVE_ONLINE
;
1081 case DIOCWLABEL
: /* change write status of disk label */
1082 if ((flag
& FWRITE
) == 0)
1085 xd
->flags
|= XD_WLABEL
;
1087 xd
->flags
&= ~XD_WLABEL
;
1090 case DIOCWDINFO
: /* write disk label */
1091 #ifdef __HAVE_OLD_DISKLABEL
1093 if (command
== ODIOCWDINFO
) {
1094 memset(&newlabel
, 0, sizeof newlabel
);
1095 memcpy(&newlabel
, addr
, sizeof (struct olddisklabel
));
1099 lp
= (struct disklabel
*)addr
;
1101 if ((flag
& FWRITE
) == 0)
1103 error
= setdisklabel(xd
->sc_dk
.dk_label
,
1104 lp
, /* xd->sc_dk.dk_openmask : */ 0,
1105 xd
->sc_dk
.dk_cpulabel
);
1107 if (xd
->state
== XD_DRIVE_NOLABEL
)
1108 xd
->state
= XD_DRIVE_ONLINE
;
1110 /* Simulate opening partition 0 so write succeeds. */
1111 xd
->sc_dk
.dk_openmask
|= (1 << 0);
1112 error
= writedisklabel(MAKEDISKDEV(major(dev
),
1113 DISKUNIT(dev
), RAW_PART
),
1114 xdstrategy
, xd
->sc_dk
.dk_label
,
1115 xd
->sc_dk
.dk_cpulabel
);
1116 xd
->sc_dk
.dk_openmask
=
1117 xd
->sc_dk
.dk_copenmask
| xd
->sc_dk
.dk_bopenmask
;
1122 enum kauth_device_req req
;
1124 xio
= (struct xd_iocmd
*) addr
;
1125 req
= xd_getkauthreq(xio
->cmd
);
1126 if ((error
= kauth_authorize_device_passthru(l
->l_cred
,
1127 dev
, req
, xio
)) != 0)
1129 return (xdc_ioctlcmd(xd
, dev
, xio
));
1137 * xdopen: open drive
1141 xdopen(dev_t dev
, int flag
, int fmt
, struct lwp
*l
)
1144 struct xd_softc
*xd
;
1145 struct xdc_attach_args xa
;
1147 /* first, could it be a valid target? */
1149 unit
= DISKUNIT(dev
);
1150 if ((xd
= device_lookup_private(&xd_cd
, unit
)) == NULL
)
1152 part
= DISKPART(dev
);
1154 /* do we need to attach the drive? */
1156 if (xd
->state
== XD_DRIVE_UNKNOWN
) {
1157 xa
.driveno
= xd
->xd_drive
;
1158 xa
.fullmode
= XD_SUB_WAIT
;
1160 xdattach((device_t
) xd
->parent
, (device_t
) xd
, &xa
);
1161 if (xd
->state
== XD_DRIVE_UNKNOWN
) {
1165 /* check for partition */
1167 if (part
!= RAW_PART
&&
1168 (part
>= xd
->sc_dk
.dk_label
->d_npartitions
||
1169 xd
->sc_dk
.dk_label
->d_partitions
[part
].p_fstype
== FS_UNUSED
)) {
1172 /* set open masks */
1176 xd
->sc_dk
.dk_copenmask
|= (1 << part
);
1179 xd
->sc_dk
.dk_bopenmask
|= (1 << part
);
1182 xd
->sc_dk
.dk_openmask
= xd
->sc_dk
.dk_copenmask
| xd
->sc_dk
.dk_bopenmask
;
1188 xdread(dev_t dev
, struct uio
*uio
, int flags
)
1191 return (physio(xdstrategy
, NULL
, dev
, B_READ
, minphys
, uio
));
1195 xdwrite(dev_t dev
, struct uio
*uio
, int flags
)
1198 return (physio(xdstrategy
, NULL
, dev
, B_WRITE
, minphys
, uio
));
1203 * xdsize: return size of a partition for a dump
1211 struct xd_softc
*xdsc
;
1212 int unit
, part
, size
, omask
;
1215 unit
= DISKUNIT(dev
);
1216 if ((xdsc
= device_lookup_private(&xd_cd
, unit
)) == NULL
)
1219 part
= DISKPART(dev
);
1220 omask
= xdsc
->sc_dk
.dk_openmask
& (1 << part
);
1222 if (omask
== 0 && xdopen(dev
, 0, S_IFBLK
, NULL
) != 0)
1226 if (xdsc
->sc_dk
.dk_label
->d_partitions
[part
].p_fstype
!= FS_SWAP
)
1227 size
= -1; /* only give valid size for swap partitions */
1229 size
= xdsc
->sc_dk
.dk_label
->d_partitions
[part
].p_size
*
1230 (xdsc
->sc_dk
.dk_label
->d_secsize
/ DEV_BSIZE
);
1231 if (omask
== 0 && xdclose(dev
, 0, S_IFBLK
, NULL
) != 0)
1236 * xdstrategy: buffering system interface to xd.
1244 struct xd_softc
*xd
;
1245 struct xdc_softc
*parent
;
1247 struct xdc_attach_args xa
;
1249 unit
= DISKUNIT(bp
->b_dev
);
1251 /* check for live device */
1253 if (!(xd
= device_lookup_private(&xd_cd
, unit
)) ||
1255 (bp
->b_bcount
% xd
->sc_dk
.dk_label
->d_secsize
) != 0) {
1256 bp
->b_error
= EINVAL
;
1259 /* do we need to attach the drive? */
1261 if (xd
->state
== XD_DRIVE_UNKNOWN
) {
1262 xa
.driveno
= xd
->xd_drive
;
1263 xa
.fullmode
= XD_SUB_WAIT
;
1265 xdattach((device_t
)xd
->parent
, (device_t
)xd
, &xa
);
1266 if (xd
->state
== XD_DRIVE_UNKNOWN
) {
1271 if (xd
->state
!= XD_DRIVE_ONLINE
&& DISKPART(bp
->b_dev
) != RAW_PART
) {
1272 /* no I/O to unlabeled disks, unless raw partition */
1276 /* short circuit zero length request */
1278 if (bp
->b_bcount
== 0)
1281 /* check bounds with label (disksubr.c). Determine the size of the
1282 * transfer, and make sure it is within the boundaries of the
1283 * partition. Adjust transfer if needed, and signal errors or early
1286 if (bounds_check_with_label(&xd
->sc_dk
, bp
,
1287 (xd
->flags
& XD_WLABEL
) != 0) <= 0)
1291 * now we know we have a valid buf structure that we need to do I/O
1294 * note that we don't disksort because the controller has a sorting
1295 * algorithm built into the hardware.
1298 s
= splbio(); /* protect the queues */
1300 /* first, give jobs in front of us a chance */
1301 parent
= xd
->parent
;
1302 while (parent
->nfree
> 0 && bufq_peek(parent
->sc_wq
) != NULL
)
1303 if (xdc_startbuf(parent
, NULL
, NULL
) != XD_ERR_AOK
)
1306 /* if there are no free iorq's, then we just queue and return. the
1307 * buffs will get picked up later by xdcintr().
1310 if (parent
->nfree
== 0) {
1311 bufq_put(parent
->sc_wq
, bp
);
1316 /* now we have free iopb's and we are at splbio... start 'em up */
1317 if (xdc_startbuf(parent
, xd
, bp
) != XD_ERR_AOK
) {
1326 done
: /* tells upper layers we are done with this
1328 bp
->b_resid
= bp
->b_bcount
;
1332 * end of {b,c}devsw functions
1336 * i n t e r r u p t f u n c t i o n
1338 * xdcintr: hardware interrupt.
1345 struct xdc_softc
*xdcsc
= v
;
1347 /* kick the event counter */
1349 xdcsc
->sc_intrcnt
.ev_count
++;
1351 /* remove as many done IOPBs as possible */
1353 xdc_remove_iorq(xdcsc
);
1355 /* start any iorq's already waiting */
1357 xdc_start(xdcsc
, XDC_MAXIOPB
);
1359 /* fill up any remaining iorq's with queue'd buffers */
1361 while (xdcsc
->nfree
> 0 && bufq_peek(xdcsc
->sc_wq
) != NULL
)
1362 if (xdc_startbuf(xdcsc
, NULL
, NULL
) != XD_ERR_AOK
)
1368 * end of interrupt function
1372 * i n t e r n a l f u n c t i o n s
1376 * xdc_rqinit: fill out the fields of an I/O request
1380 xdc_rqinit(struct xd_iorq
*rq
, struct xdc_softc
*xdc
, struct xd_softc
*xd
, int md
, u_long blk
, int cnt
, void *db
, struct buf
*bp
)
1384 rq
->ttl
= XDC_MAXTTL
+ 10;
1386 rq
->tries
= rq
->errnum
= rq
->lasterror
= 0;
1393 * xdc_rqtopb: load up an IOPB based on an iorq
1397 xdc_rqtopb(iorq
, iopb
, cmd
, subfun
)
1398 struct xd_iorq
*iorq
;
1399 struct xd_iopb
*iopb
;
1405 /* standard stuff */
1407 iopb
->errs
= iopb
->done
= 0;
1409 iopb
->errnum
= iopb
->status
= 0;
1410 iopb
->subfun
= subfun
;
1412 iopb
->unit
= iorq
->xd
->xd_drive
;
1416 /* check for alternate IOPB format */
1418 if (cmd
== XDCMD_WRP
) {
1421 struct xd_iopb_ctrl
*ctrl
=
1422 (struct xd_iopb_ctrl
*) iopb
;
1424 iopb
->intl
= (XD_STATE(iorq
->mode
) == XD_SUB_POLL
)
1427 ctrl
->param_a
= XDPA_TMOD
| XDPA_DACF
;
1428 ctrl
->param_b
= XDPB_ROR
| XDPB_TDT_3_2USEC
;
1429 ctrl
->param_c
= XDPC_OVS
| XDPC_COP
| XDPC_ASR
|
1430 XDPC_RBC
| XDPC_ECC2
;
1431 ctrl
->throttle
= XDC_THROTTLE
;
1432 ctrl
->delay
= XDC_DELAY
;
1436 struct xd_iopb_drive
*drv
=
1437 (struct xd_iopb_drive
*)iopb
;
1438 /* we assume that the disk label has the right
1440 if (XD_STATE(iorq
->mode
) == XD_SUB_POLL
)
1441 drv
->dparam_ipl
= (XDC_DPARAM
<< 3);
1443 drv
->dparam_ipl
= (XDC_DPARAM
<< 3) |
1445 drv
->maxsect
= iorq
->xd
->nsect
- 1;
1446 drv
->maxsector
= drv
->maxsect
;
1447 /* note: maxsector != maxsect only if you are
1448 * doing cyl sparing */
1450 drv
->maxcyl
= iorq
->xd
->pcyl
- 1;
1451 drv
->maxhead
= iorq
->xd
->nhead
- 1;
1455 struct xd_iopb_format
*form
=
1456 (struct xd_iopb_format
*) iopb
;
1457 if (XD_STATE(iorq
->mode
) == XD_SUB_POLL
)
1458 form
->interleave_ipl
= (XDC_INTERLEAVE
<< 3);
1460 form
->interleave_ipl
= (XDC_INTERLEAVE
<< 3) |
1462 form
->field1
= XDFM_FIELD1
;
1463 form
->field2
= XDFM_FIELD2
;
1464 form
->field3
= XDFM_FIELD3
;
1465 form
->field4
= XDFM_FIELD4
;
1466 form
->bytespersec
= XDFM_BPS
;
1467 form
->field6
= XDFM_FIELD6
;
1468 form
->field7
= XDFM_FIELD7
;
1474 /* normal IOPB case (harmless to RDP command) */
1477 iopb
->intl
= (XD_STATE(iorq
->mode
) == XD_SUB_POLL
)
1480 iopb
->sectcnt
= iorq
->sectcnt
;
1481 block
= iorq
->blockno
;
1482 if (iorq
->xd
== NULL
|| block
== 0) {
1483 iopb
->sectno
= iopb
->headno
= iopb
->cylno
= 0;
1485 iopb
->sectno
= block
% iorq
->xd
->nsect
;
1486 block
= block
/ iorq
->xd
->nsect
;
1487 iopb
->headno
= block
% iorq
->xd
->nhead
;
1488 block
= block
/ iorq
->xd
->nhead
;
1489 iopb
->cylno
= block
;
1491 dp
= (u_long
) iorq
->dbuf
;
1492 dp
= iopb
->daddr
= (iorq
->dbuf
== NULL
) ? 0 : dp
;
1493 iopb
->addrmod
= ((dp
+ (XDFM_BPS
* iorq
->sectcnt
)) > 0x1000000)
1500 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno.
1501 * If you've already got an IORQ, you can call submit directly (currently
1502 * there is no need to do this). NORM requests are handled separately.
1505 xdc_cmd(xdcsc
, cmd
, subfn
, unit
, block
, scnt
, dptr
, fullmode
)
1506 struct xdc_softc
*xdcsc
;
1507 int cmd
, subfn
, unit
, block
, scnt
;
1512 int rqno
, submode
= XD_STATE(fullmode
), retry
;
1513 struct xd_iorq
*iorq
;
1514 struct xd_iopb
*iopb
;
1519 while (xdcsc
->nfree
== 0) {
1520 if (xdc_piodriver(xdcsc
, 0, 1) != XD_ERR_AOK
)
1521 return (XD_ERR_FAIL
);
1527 while (xdcsc
->nfree
== 0) {
1528 if (tsleep(&xdcsc
->nfree
, PRIBIO
, "xdnfree", 0))
1529 return (XD_ERR_FAIL
);
1531 while (xdcsc
->ndone
> XDC_SUBWAITLIM
) {
1532 if (tsleep(&xdcsc
->ndone
, PRIBIO
, "xdsubwait", 0))
1533 return (XD_ERR_FAIL
);
1536 retry
= 0; /* got it */
1540 return (XD_ERR_FAIL
); /* illegal */
1542 if (xdcsc
->nfree
== 0)
1543 panic("xdcmd nfree");
1544 rqno
= XDC_RQALLOC(xdcsc
);
1545 iorq
= &xdcsc
->reqs
[rqno
];
1549 /* init iorq/iopb */
1551 xdc_rqinit(iorq
, xdcsc
,
1552 (unit
== XDC_NOUNIT
) ? NULL
: xdcsc
->sc_drives
[unit
],
1553 fullmode
, block
, scnt
, dptr
, NULL
);
1555 /* load IOPB from iorq */
1557 xdc_rqtopb(iorq
, iopb
, cmd
, subfn
);
1559 /* submit it for processing */
1561 xdc_submit_iorq(xdcsc
, rqno
, fullmode
); /* error code will be in iorq */
1567 * start a buffer running, assumes nfree > 0
1571 xdc_startbuf(xdcsc
, xdsc
, bp
)
1572 struct xdc_softc
*xdcsc
;
1573 struct xd_softc
*xdsc
;
1578 struct xd_iorq
*iorq
;
1579 struct xd_iopb
*iopb
;
1585 panic("xdc_startbuf free");
1586 rqno
= XDC_RQALLOC(xdcsc
);
1587 iorq
= &xdcsc
->reqs
[rqno
];
1593 bp
= bufq_get(xdcsc
->sc_wq
);
1595 panic("xdc_startbuf bp");
1596 xdsc
= xdcsc
->sc_drives
[DISKUNIT(bp
->b_dev
)];
1598 partno
= DISKPART(bp
->b_dev
);
1600 printf("xdc_startbuf: %s%c: %s block %d\n", device_xname(&xdsc
->sc_dev
),
1601 'a' + partno
, (bp
->b_flags
& B_READ
) ? "read" : "write", bp
->b_blkno
);
1602 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n",
1603 bp
->b_bcount
, bp
->b_data
);
1607 * load request. we have to calculate the correct block number based
1608 * on partition info.
1610 * note that iorq points to the buffer as mapped into DVMA space,
1611 * where as the bp->b_data points to its non-DVMA mapping.
1614 block
= bp
->b_blkno
+ ((partno
== RAW_PART
) ? 0 :
1615 xdsc
->sc_dk
.dk_label
->d_partitions
[partno
].p_offset
);
1617 error
= bus_dmamap_load(xdcsc
->dmatag
, iorq
->dmamap
,
1618 bp
->b_data
, bp
->b_bcount
, 0, BUS_DMA_NOWAIT
);
1620 aprint_error_dev(&xdcsc
->sc_dev
, "warning: cannot load DMA map\n");
1621 XDC_FREE(xdcsc
, rqno
);
1622 bufq_put(xdcsc
->sc_wq
, bp
);
1623 return (XD_ERR_FAIL
); /* XXX: need some sort of
1624 * call-back scheme here? */
1626 bus_dmamap_sync(xdcsc
->dmatag
, iorq
->dmamap
, 0,
1627 iorq
->dmamap
->dm_mapsize
, (bp
->b_flags
& B_READ
)
1628 ? BUS_DMASYNC_PREREAD
1629 : BUS_DMASYNC_PREWRITE
);
1631 /* init iorq and load iopb from it */
1632 xdc_rqinit(iorq
, xdcsc
, xdsc
, XD_SUB_NORM
| XD_MODE_VERBO
, block
,
1633 bp
->b_bcount
/ XDFM_BPS
,
1634 (void *)(u_long
)iorq
->dmamap
->dm_segs
[0].ds_addr
,
1637 xdc_rqtopb(iorq
, iopb
, (bp
->b_flags
& B_READ
) ? XDCMD_RD
: XDCMD_WR
, 0);
1639 /* Instrumentation. */
1640 disk_busy(&xdsc
->sc_dk
);
1642 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
1644 xdc_submit_iorq(xdcsc
, rqno
, XD_SUB_NORM
);
1645 return (XD_ERR_AOK
);
1650 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK
1651 * if ok. if it fail returns an error code. type is XD_SUB_*.
1653 * note: caller frees iorq in all cases except NORM
1656 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
1657 * WAIT: XD_AOK (success), <error-code> (failed)
1658 * POLL: <same as WAIT>
1659 * NOQ : <same as NORM>
1661 * there are three sources for i/o requests:
1662 * [1] xdstrategy: normal block I/O, using "struct buf" system.
1663 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1664 * [3] open/ioctl: these are I/O requests done in the context of a process,
1665 * and the process should block until they are done.
1667 * software state is stored in the iorq structure. each iorq has an
1668 * iopb structure. the hardware understands the iopb structure.
1669 * every command must go through an iopb. a 7053 can only handle
1670 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in
1671 * DVMA space at boot up time. what happens if we run out of iopb's?
1672 * for i/o type [1], the buffers are queued at the "buff" layer and
1673 * picked up later by the interrupt routine. for case [2] the
1674 * programmed i/o driver is called with a special flag that says
1675 * return when one iopb is free. for case [3] the process can sleep
1676 * on the iorq free list until some iopbs are available.
1681 xdc_submit_iorq(xdcsc
, iorqno
, type
)
1682 struct xdc_softc
*xdcsc
;
1688 struct xd_iorq
*iorq
= &xdcsc
->reqs
[iorqno
];
1691 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", device_xname(&xdcsc
->sc_dev
),
1695 /* first check and see if controller is busy */
1696 if (xdcsc
->xdc
->xdc_csr
& XDC_ADDING
) {
1698 printf("xdc_submit_iorq: XDC not ready (ADDING)\n");
1700 if (type
== XD_SUB_NOQ
)
1701 return (XD_ERR_FAIL
); /* failed */
1702 XDC_TWAIT(xdcsc
, iorqno
); /* put at end of waitq */
1705 return XD_ERR_AOK
; /* success */
1707 while (iorq
->iopb
->done
== 0) {
1708 (void) tsleep(iorq
, PRIBIO
, "xdciorq", 0);
1710 return (iorq
->errnum
);
1712 return (xdc_piodriver(xdcsc
, iorqno
, 0));
1714 panic("xdc_submit_iorq adding");
1719 u_char
*rio
= (u_char
*) iorq
->iopb
;
1720 int sz
= sizeof(struct xd_iopb
), lcv
;
1721 printf("%s: aio #%d [",
1722 device_xname(&xdcsc
->sc_dev
), iorq
- xdcsc
->reqs
);
1723 for (lcv
= 0; lcv
< sz
; lcv
++)
1724 printf(" %02x", rio
[lcv
]);
1727 #endif /* XDC_DEBUG */
1729 /* controller not busy, start command */
1730 iopbaddr
= (u_long
) iorq
->dmaiopb
;
1731 XDC_GO(xdcsc
->xdc
, iopbaddr
); /* go! */
1733 /* command now running, wrap it up */
1737 return (XD_ERR_AOK
); /* success */
1739 while (iorq
->iopb
->done
== 0) {
1740 (void) tsleep(iorq
, PRIBIO
, "xdciorq", 0);
1742 return (iorq
->errnum
);
1744 return (xdc_piodriver(xdcsc
, iorqno
, 0));
1746 panic("xdc_submit_iorq wrap up");
1748 panic("xdc_submit_iorq");
1749 return 0; /* not reached */
1756 * programmed i/o driver. this function takes over the computer
1757 * and drains off all i/o requests. it returns the status of the iorq
1758 * the caller is interesting in. if freeone is true, then it returns
1759 * when there is a free iorq.
1762 xdc_piodriver(xdcsc
, iorqno
, freeone
)
1763 struct xdc_softc
*xdcsc
;
1771 struct xdc
*xdc
= xdcsc
->xdc
;
1773 printf("xdc_piodriver(%s, %d, freeone=%d)\n", device_xname(&xdcsc
->sc_dev
),
1777 while (xdcsc
->nwait
|| xdcsc
->nrun
) {
1779 printf("xdc_piodriver: wait=%d, run=%d\n",
1780 xdcsc
->nwait
, xdcsc
->nrun
);
1782 XDC_WAIT(xdc
, count
, XDC_MAXTIME
, (XDC_REMIOPB
| XDC_F_ERROR
));
1784 printf("xdc_piodriver: done wait with count = %d\n", count
);
1786 /* we expect some progress soon */
1787 if (count
== 0 && nreset
>= 2) {
1788 xdc_reset(xdcsc
, 0, XD_RSET_ALL
, XD_ERR_FAIL
, 0);
1790 printf("xdc_piodriver: timeout\n");
1792 return (XD_ERR_FAIL
);
1795 if (xdc_reset(xdcsc
, 0,
1796 (nreset
++ == 0) ? XD_RSET_NONE
: iorqno
,
1799 return (XD_ERR_FAIL
); /* flushes all but POLL
1800 * requests, resets */
1803 xdc_remove_iorq(xdcsc
); /* could resubmit request */
1805 if (xdcsc
->nrun
< XDC_MAXIOPB
) {
1807 printf("xdc_piodriver: done: one free\n");
1809 return (XD_ERR_AOK
);
1811 continue; /* don't xdc_start */
1813 xdc_start(xdcsc
, XDC_MAXIOPB
);
1816 /* get return value */
1818 retval
= xdcsc
->reqs
[iorqno
].errnum
;
1821 printf("xdc_piodriver: done, retval = 0x%x (%s)\n",
1822 xdcsc
->reqs
[iorqno
].errnum
, xdc_e2str(xdcsc
->reqs
[iorqno
].errnum
));
1825 /* now that we've drained everything, start up any bufs that have
1828 while (xdcsc
->nfree
> 0 && bufq_peek(xdcsc
->sc_wq
) != NULL
)
1829 if (xdc_startbuf(xdcsc
, NULL
, NULL
) != XD_ERR_AOK
)
1836 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset.
1837 * we steal iopb[0] for this, but we put it back when we are done.
1840 xdc_xdreset(xdcsc
, xdsc
)
1841 struct xdc_softc
*xdcsc
;
1842 struct xd_softc
*xdsc
;
1845 struct xd_iopb tmpiopb
;
1848 memcpy(&tmpiopb
, xdcsc
->iopbase
, sizeof(tmpiopb
));
1849 memset(xdcsc
->iopbase
, 0, sizeof(tmpiopb
));
1850 xdcsc
->iopbase
->comm
= XDCMD_RST
;
1851 xdcsc
->iopbase
->unit
= xdsc
->xd_drive
;
1852 addr
= (u_long
) xdcsc
->dvmaiopb
;
1853 XDC_GO(xdcsc
->xdc
, addr
); /* go! */
1854 XDC_WAIT(xdcsc
->xdc
, del
, XDC_RESETUSEC
, XDC_REMIOPB
);
1855 if (del
<= 0 || xdcsc
->iopbase
->errs
) {
1856 printf("%s: off-line: %s\n", device_xname(&xdcsc
->sc_dev
),
1857 xdc_e2str(xdcsc
->iopbase
->errnum
));
1858 xdcsc
->xdc
->xdc_csr
= XDC_RESET
;
1859 XDC_WAIT(xdcsc
->xdc
, del
, XDC_RESETUSEC
, XDC_RESET
);
1863 xdcsc
->xdc
->xdc_csr
= XDC_CLRRIO
; /* clear RIO */
1865 memcpy(xdcsc
->iopbase
, &tmpiopb
, sizeof(tmpiopb
));
1870 * xdc_reset: reset everything: requests are marked as errors except
1871 * a polled request (which is resubmitted)
1874 xdc_reset(xdcsc
, quiet
, blastmode
, error
, xdsc
)
1875 struct xdc_softc
*xdcsc
;
1876 int quiet
, blastmode
, error
;
1877 struct xd_softc
*xdsc
;
1880 int del
= 0, lcv
, retval
= XD_ERR_AOK
;
1881 int oldfree
= xdcsc
->nfree
;
1883 /* soft reset hardware */
1886 printf("%s: soft reset\n", device_xname(&xdcsc
->sc_dev
));
1887 xdcsc
->xdc
->xdc_csr
= XDC_RESET
;
1888 XDC_WAIT(xdcsc
->xdc
, del
, XDC_RESETUSEC
, XDC_RESET
);
1890 blastmode
= XD_RSET_ALL
; /* dead, flush all requests */
1891 retval
= XD_ERR_FAIL
;
1894 xdc_xdreset(xdcsc
, xdsc
);
1896 /* fix queues based on "blast-mode" */
1898 for (lcv
= 0; lcv
< XDC_MAXIOPB
; lcv
++) {
1899 register struct xd_iorq
*iorq
= &xdcsc
->reqs
[lcv
];
1901 if (XD_STATE(iorq
->mode
) != XD_SUB_POLL
&&
1902 XD_STATE(iorq
->mode
) != XD_SUB_WAIT
&&
1903 XD_STATE(iorq
->mode
) != XD_SUB_NORM
)
1907 xdcsc
->nrun
--; /* it isn't running any more */
1908 if (blastmode
== XD_RSET_ALL
|| blastmode
!= lcv
) {
1910 iorq
->errnum
= error
;
1911 xdcsc
->iopbase
[lcv
].done
= xdcsc
->iopbase
[lcv
].errs
= 1;
1912 switch (XD_STATE(xdcsc
->reqs
[lcv
].mode
)) {
1914 iorq
->buf
->b_error
= EIO
;
1915 iorq
->buf
->b_resid
=
1916 iorq
->sectcnt
* XDFM_BPS
;
1918 bus_dmamap_sync(xdcsc
->dmatag
, iorq
->dmamap
, 0,
1919 iorq
->dmamap
->dm_mapsize
,
1920 (iorq
->buf
->b_flags
& B_READ
)
1921 ? BUS_DMASYNC_POSTREAD
1922 : BUS_DMASYNC_POSTWRITE
);
1924 bus_dmamap_unload(xdcsc
->dmatag
, iorq
->dmamap
);
1926 disk_unbusy(&xdcsc
->reqs
[lcv
].xd
->sc_dk
,
1927 (xdcsc
->reqs
[lcv
].buf
->b_bcount
-
1928 xdcsc
->reqs
[lcv
].buf
->b_resid
),
1929 (iorq
->buf
->b_flags
& B_READ
));
1931 XDC_FREE(xdcsc
, lcv
); /* add to free list */
1938 XD_NEWSTATE(iorq
->mode
, XD_SUB_DONE
);
1944 /* resubmit, put at front of wait queue */
1945 XDC_HWAIT(xdcsc
, lcv
);
1950 * now, if stuff is waiting, start it.
1951 * since we just reset it should go
1953 xdc_start(xdcsc
, XDC_MAXIOPB
);
1956 if (oldfree
== 0 && xdcsc
->nfree
)
1957 wakeup(&xdcsc
->nfree
);
1960 del
= xdcsc
->nwait
+ xdcsc
->nrun
+ xdcsc
->nfree
+ xdcsc
->ndone
;
1961 if (del
!= XDC_MAXIOPB
)
1962 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
1963 device_xname(&xdcsc
->sc_dev
), del
, XDC_MAXIOPB
);
1965 if (xdcsc
->ndone
> XDC_MAXIOPB
- XDC_SUBWAITLIM
)
1966 printf("%s: diag: lots of done jobs (%d)\n",
1967 device_xname(&xdcsc
->sc_dev
), xdcsc
->ndone
);
1969 printf("RESET DONE\n");
1973 * xdc_start: start all waiting buffers
1977 xdc_start(xdcsc
, maxio
)
1978 struct xdc_softc
*xdcsc
;
1983 while (maxio
&& xdcsc
->nwait
&&
1984 (xdcsc
->xdc
->xdc_csr
& XDC_ADDING
) == 0) {
1985 XDC_GET_WAITER(xdcsc
, rqno
); /* note: rqno is an "out"
1987 if (xdc_submit_iorq(xdcsc
, rqno
, XD_SUB_NOQ
) != XD_ERR_AOK
)
1988 panic("xdc_start"); /* should never happen */
1993 * xdc_remove_iorq: remove "done" IOPB's.
1997 xdc_remove_iorq(xdcsc
)
1998 struct xdc_softc
*xdcsc
;
2001 int errnum
, rqno
, comm
, errs
;
2002 struct xdc
*xdc
= xdcsc
->xdc
;
2003 struct xd_iopb
*iopb
;
2004 struct xd_iorq
*iorq
;
2007 if (xdc
->xdc_csr
& XDC_F_ERROR
) {
2009 * FATAL ERROR: should never happen under normal use. This
2010 * error is so bad, you can't even tell which IOPB is bad, so
2013 errnum
= xdc
->xdc_f_err
;
2014 aprint_error_dev(&xdcsc
->sc_dev
, "fatal error 0x%02x: %s\n",
2015 errnum
, xdc_e2str(errnum
));
2016 if (xdc_reset(xdcsc
, 0, XD_RSET_ALL
, errnum
, 0) != XD_ERR_AOK
) {
2017 aprint_error_dev(&xdcsc
->sc_dev
, "soft reset failed!\n");
2018 panic("xdc_remove_iorq: controller DEAD");
2020 return (XD_ERR_AOK
);
2024 * get iopb that is done
2026 * hmm... I used to read the address of the done IOPB off the VME
2027 * registers and calculate the rqno directly from that. that worked
2028 * until I started putting a load on the controller. when loaded, i
2029 * would get interrupts but neither the REMIOPB or F_ERROR bits would
2030 * be set, even after DELAY'ing a while! later on the timeout
2031 * routine would detect IOPBs that were marked "running" but their
2032 * "done" bit was set. rather than dealing directly with this
2033 * problem, it is just easier to look at all running IOPB's for the
2036 if (xdc
->xdc_csr
& XDC_REMIOPB
) {
2037 xdc
->xdc_csr
= XDC_CLRRIO
;
2040 for (rqno
= 0; rqno
< XDC_MAXIOPB
; rqno
++) {
2041 iorq
= &xdcsc
->reqs
[rqno
];
2042 if (iorq
->mode
== 0 || XD_STATE(iorq
->mode
) == XD_SUB_DONE
)
2043 continue; /* free, or done */
2044 iopb
= &xdcsc
->iopbase
[rqno
];
2045 if (iopb
->done
== 0)
2046 continue; /* not done yet */
2050 u_char
*rio
= (u_char
*) iopb
;
2051 int sz
= sizeof(struct xd_iopb
), lcv
;
2052 printf("%s: rio #%d [", device_xname(&xdcsc
->sc_dev
), rqno
);
2053 for (lcv
= 0; lcv
< sz
; lcv
++)
2054 printf(" %02x", rio
[lcv
]);
2057 #endif /* XDC_DEBUG */
2065 iorq
->errnum
= iopb
->errnum
;
2069 /* handle non-fatal errors */
2072 xdc_error(xdcsc
, iorq
, iopb
, rqno
, comm
) == XD_ERR_AOK
)
2073 continue; /* AOK: we resubmitted it */
2076 /* this iorq is now done (hasn't been restarted or anything) */
2078 if ((iorq
->mode
& XD_MODE_VERBO
) && iorq
->lasterror
)
2079 xdc_perror(iorq
, iopb
, 0);
2081 /* now, if read/write check to make sure we got all the data
2082 * we needed. (this may not be the case if we got an error in
2083 * the middle of a multisector request). */
2085 if ((iorq
->mode
& XD_MODE_B144
) != 0 && errs
== 0 &&
2086 (comm
== XDCMD_RD
|| comm
== XDCMD_WR
)) {
2087 /* we just successfully processed a bad144 sector
2088 * note: if we are in bad 144 mode, the pointers have
2089 * been advanced already (see above) and are pointing
2090 * at the bad144 sector. to exit bad144 mode, we
2091 * must advance the pointers 1 sector and issue a new
2092 * request if there are still sectors left to process
2095 XDC_ADVANCE(iorq
, 1); /* advance 1 sector */
2097 /* exit b144 mode */
2098 iorq
->mode
= iorq
->mode
& (~XD_MODE_B144
);
2100 if (iorq
->sectcnt
) { /* more to go! */
2101 iorq
->lasterror
= iorq
->errnum
= iopb
->errnum
= 0;
2102 iopb
->errs
= iopb
->done
= 0;
2104 iopb
->sectcnt
= iorq
->sectcnt
;
2105 iopb
->cylno
= iorq
->blockno
/
2106 iorq
->xd
->sectpercyl
;
2108 (iorq
->blockno
/ iorq
->xd
->nhead
) %
2110 iopb
->sectno
= iorq
->blockno
% XDFM_BPS
;
2111 iopb
->daddr
= (u_long
) iorq
->dbuf
;
2112 XDC_HWAIT(xdcsc
, rqno
);
2113 xdc_start(xdcsc
, 1); /* resubmit */
2117 /* final cleanup, totally done with this request */
2119 switch (XD_STATE(iorq
->mode
)) {
2124 bp
->b_resid
= iorq
->sectcnt
* XDFM_BPS
;
2126 bp
->b_resid
= 0; /* done */
2128 bus_dmamap_sync(xdcsc
->dmatag
, iorq
->dmamap
, 0,
2129 iorq
->dmamap
->dm_mapsize
,
2130 (bp
->b_flags
& B_READ
)
2131 ? BUS_DMASYNC_POSTREAD
2132 : BUS_DMASYNC_POSTWRITE
);
2133 bus_dmamap_unload(xdcsc
->dmatag
, iorq
->dmamap
);
2135 disk_unbusy(&iorq
->xd
->sc_dk
,
2136 (bp
->b_bcount
- bp
->b_resid
),
2137 (bp
->b_flags
& B_READ
));
2138 XDC_FREE(xdcsc
, rqno
);
2142 iorq
->mode
= XD_NEWSTATE(iorq
->mode
, XD_SUB_DONE
);
2147 iorq
->mode
= XD_NEWSTATE(iorq
->mode
, XD_SUB_DONE
);
2153 return (XD_ERR_AOK
);
2157 * xdc_perror: print error.
2158 * - if still_trying is true: we got an error, retried and got a
2159 * different error. in that case lasterror is the old error,
2160 * and errnum is the new one.
2161 * - if still_trying is not true, then if we ever had an error it
2162 * is in lasterror. also, if iorq->errnum == 0, then we recovered
2163 * from that error (otherwise iorq->errnum == iorq->lasterror).
2166 xdc_perror(iorq
, iopb
, still_trying
)
2167 struct xd_iorq
*iorq
;
2168 struct xd_iopb
*iopb
;
2173 int error
= iorq
->lasterror
;
2175 printf("%s", (iorq
->xd
) ? device_xname(&iorq
->xd
->sc_dev
)
2176 : device_xname(&iorq
->xdc
->sc_dev
));
2178 printf("%c: ", 'a' + (char)DISKPART(iorq
->buf
->b_dev
));
2179 if (iopb
->comm
== XDCMD_RD
|| iopb
->comm
== XDCMD_WR
)
2180 printf("%s %d/%d/%d: ",
2181 (iopb
->comm
== XDCMD_RD
) ? "read" : "write",
2182 iopb
->cylno
, iopb
->headno
, iopb
->sectno
);
2183 printf("%s", xdc_e2str(error
));
2186 printf(" [still trying, new error=%s]", xdc_e2str(iorq
->errnum
));
2188 if (iorq
->errnum
== 0)
2189 printf(" [recovered in %d tries]", iorq
->tries
);
2195 * xdc_error: non-fatal error encountered... recover.
2196 * return AOK if resubmitted, return FAIL if this iopb is done
2199 xdc_error(xdcsc
, iorq
, iopb
, rqno
, comm
)
2200 struct xdc_softc
*xdcsc
;
2201 struct xd_iorq
*iorq
;
2202 struct xd_iopb
*iopb
;
2206 int errnum
= iorq
->errnum
;
2207 int erract
= errnum
& XD_ERA_MASK
;
2208 int oldmode
, advance
;
2213 if (erract
== XD_ERA_RSET
) { /* some errors require a reset */
2214 oldmode
= iorq
->mode
;
2215 iorq
->mode
= XD_SUB_DONE
| (~XD_SUB_MASK
& oldmode
);
2217 /* make xdc_start ignore us */
2218 xdc_reset(xdcsc
, 1, XD_RSET_NONE
, errnum
, iorq
->xd
);
2219 iorq
->mode
= oldmode
;
2222 /* check for read/write to a sector in bad144 table if bad: redirect
2223 * request to bad144 area */
2225 if ((comm
== XDCMD_RD
|| comm
== XDCMD_WR
) &&
2226 (iorq
->mode
& XD_MODE_B144
) == 0) {
2227 advance
= iorq
->sectcnt
- iopb
->sectcnt
;
2228 XDC_ADVANCE(iorq
, advance
);
2230 if ((i
= isbad(&iorq
->xd
->dkb
, iorq
->blockno
/ iorq
->xd
->sectpercyl
,
2231 (iorq
->blockno
/ iorq
->xd
->nsect
) % iorq
->xd
->nhead
,
2232 iorq
->blockno
% iorq
->xd
->nsect
)) != -1) {
2233 iorq
->mode
|= XD_MODE_B144
; /* enter bad144 mode &
2235 iopb
->errnum
= iopb
->done
= iopb
->errs
= 0;
2237 iopb
->cylno
= (iorq
->xd
->ncyl
+ iorq
->xd
->acyl
) - 2;
2238 /* second to last acyl */
2239 i
= iorq
->xd
->sectpercyl
- 1 - i
; /* follow bad144
2241 iopb
->headno
= i
/ iorq
->xd
->nhead
;
2242 iopb
->sectno
= i
% iorq
->xd
->nhead
;
2243 XDC_HWAIT(xdcsc
, rqno
);
2244 xdc_start(xdcsc
, 1); /* resubmit */
2245 return (XD_ERR_AOK
); /* recovered! */
2251 * it isn't a bad144 sector, must be real error! see if we can retry
2254 if ((iorq
->mode
& XD_MODE_VERBO
) && iorq
->lasterror
)
2255 xdc_perror(iorq
, iopb
, 1); /* inform of error state
2257 iorq
->lasterror
= errnum
;
2259 if ((erract
== XD_ERA_RSET
|| erract
== XD_ERA_HARD
)
2260 && iorq
->tries
< XDC_MAXTRIES
) { /* retry? */
2262 iorq
->errnum
= iopb
->errnum
= iopb
->done
= iopb
->errs
= 0;
2263 XDC_HWAIT(xdcsc
, rqno
);
2264 xdc_start(xdcsc
, 1); /* restart */
2265 return (XD_ERR_AOK
); /* recovered! */
2268 /* failed to recover from this error */
2269 return (XD_ERR_FAIL
);
2273 * xdc_tick: make sure xd is still alive and ticking (err, kicking).
2280 struct xdc_softc
*xdcsc
= arg
;
2281 int lcv
, s
, reset
= 0;
2283 int nwait
, nrun
, nfree
, ndone
, whd
= 0;
2284 u_char fqc
[XDC_MAXIOPB
], wqc
[XDC_MAXIOPB
], mark
[XDC_MAXIOPB
];
2286 nwait
= xdcsc
->nwait
;
2288 nfree
= xdcsc
->nfree
;
2289 ndone
= xdcsc
->ndone
;
2290 memcpy(wqc
, xdcsc
->waitq
, sizeof(wqc
));
2291 memcpy(fqc
, xdcsc
->freereq
, sizeof(fqc
));
2293 if (nwait
+ nrun
+ nfree
+ ndone
!= XDC_MAXIOPB
) {
2294 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
2295 device_xname(&xdcsc
->sc_dev
), nwait
, nfree
, nrun
, ndone
,
2297 memset(mark
, 0, sizeof(mark
));
2299 for (lcv
= nfree
; lcv
> 0; lcv
--) {
2300 printf("%d ", fqc
[lcv
- 1]);
2301 mark
[fqc
[lcv
- 1]] = 1;
2306 printf("%d ", wqc
[whd
]);
2308 whd
= (whd
+ 1) % XDC_MAXIOPB
;
2312 for (lcv
= 0; lcv
< XDC_MAXIOPB
; lcv
++) {
2314 printf("MARK: running %d: mode %d done %d errs %d errnum 0x%x ttl %d buf %p\n",
2315 lcv
, xdcsc
->reqs
[lcv
].mode
,
2316 xdcsc
->iopbase
[lcv
].done
,
2317 xdcsc
->iopbase
[lcv
].errs
,
2318 xdcsc
->iopbase
[lcv
].errnum
,
2319 xdcsc
->reqs
[lcv
].ttl
, xdcsc
->reqs
[lcv
].buf
);
2322 if (ndone
> XDC_MAXIOPB
- XDC_SUBWAITLIM
)
2323 printf("%s: diag: lots of done jobs (%d)\n",
2324 device_xname(&xdcsc
->sc_dev
), ndone
);
2328 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
2329 device_xname(&xdcsc
->sc_dev
),
2330 xdcsc
->xdc
->xdc_csr
, xdcsc
->nwait
, xdcsc
->nfree
, xdcsc
->nrun
,
2332 for (lcv
= 0; lcv
< XDC_MAXIOPB
; lcv
++) {
2333 if (xdcsc
->reqs
[lcv
].mode
)
2334 printf("running %d: mode %d done %d errs %d errnum 0x%x\n",
2336 xdcsc
->reqs
[lcv
].mode
, xdcsc
->iopbase
[lcv
].done
,
2337 xdcsc
->iopbase
[lcv
].errs
, xdcsc
->iopbase
[lcv
].errnum
);
2341 /* reduce ttl for each request if one goes to zero, reset xdc */
2343 for (lcv
= 0; lcv
< XDC_MAXIOPB
; lcv
++) {
2344 if (xdcsc
->reqs
[lcv
].mode
== 0 ||
2345 XD_STATE(xdcsc
->reqs
[lcv
].mode
) == XD_SUB_DONE
)
2347 xdcsc
->reqs
[lcv
].ttl
--;
2348 if (xdcsc
->reqs
[lcv
].ttl
== 0)
2352 printf("%s: watchdog timeout\n", device_xname(&xdcsc
->sc_dev
));
2353 xdc_reset(xdcsc
, 0, XD_RSET_NONE
, XD_ERR_FAIL
, NULL
);
2357 /* until next time */
2359 callout_reset(&xdcsc
->sc_tick_ch
, XDC_TICKCNT
, xdc_tick
, xdcsc
);
2363 * xdc_ioctlcmd: this function provides a user level interface to the
2364 * controller via ioctl. this allows "format" programs to be written
2365 * in user code, and is also useful for some debugging. we return
2366 * an error code. called at user priority.
2369 xdc_ioctlcmd(xd
, dev
, xio
)
2370 struct xd_softc
*xd
;
2372 struct xd_iocmd
*xio
;
2376 char *dvmabuf
= NULL
, *buf
= NULL
;
2377 struct xdc_softc
*xdcsc
;
2379 bus_dma_segment_t seg
;
2381 /* check sanity of requested command */
2385 case XDCMD_NOP
: /* no op: everything should be zero */
2386 if (xio
->subfn
|| xio
->dptr
|| xio
->dlen
||
2387 xio
->block
|| xio
->sectcnt
)
2391 case XDCMD_RD
: /* read / write sectors (up to XD_IOCMD_MAXS) */
2393 if (xio
->subfn
|| xio
->sectcnt
> XD_IOCMD_MAXS
||
2394 xio
->sectcnt
* XDFM_BPS
!= xio
->dlen
|| xio
->dptr
== NULL
)
2398 case XDCMD_SK
: /* seek: doesn't seem useful to export this */
2401 case XDCMD_WRP
: /* write parameters */
2402 return (EINVAL
);/* not useful, except maybe drive
2403 * parameters... but drive parameters should
2404 * go via disklabel changes */
2406 case XDCMD_RDP
: /* read parameters */
2407 if (xio
->subfn
!= XDFUN_DRV
||
2408 xio
->dlen
|| xio
->block
|| xio
->dptr
)
2409 return (EINVAL
); /* allow read drive params to
2411 xio
->sectcnt
= xd
->hw_spt
; /* we already know the answer */
2415 case XDCMD_XRD
: /* extended read/write */
2418 switch (xio
->subfn
) {
2420 case XDFUN_THD
:/* track headers */
2421 if (xio
->sectcnt
!= xd
->hw_spt
||
2422 (xio
->block
% xd
->nsect
) != 0 ||
2423 xio
->dlen
!= XD_IOCMD_HSZ
* xd
->hw_spt
||
2429 case XDFUN_FMT
:/* NOTE: also XDFUN_VFY */
2430 if (xio
->cmd
== XDCMD_XRD
)
2431 return (EINVAL
); /* no XDFUN_VFY */
2432 if (xio
->sectcnt
|| xio
->dlen
||
2433 (xio
->block
% xd
->nsect
) != 0 || xio
->dptr
)
2437 case XDFUN_HDR
:/* header, header verify, data, data ECC */
2438 return (EINVAL
); /* not yet */
2440 case XDFUN_DM
: /* defect map */
2441 case XDFUN_DMX
:/* defect map (alternate location) */
2442 if (xio
->sectcnt
|| xio
->dlen
!= XD_IOCMD_DMSZ
||
2443 (xio
->block
% xd
->nsect
) != 0 || xio
->dptr
== NULL
)
2452 case XDCMD_TST
: /* diagnostics */
2456 return (EINVAL
);/* ??? */
2461 /* create DVMA buffer for request if needed */
2465 if ((error
= xd_dmamem_alloc(xdcsc
->dmatag
, xdcsc
->auxmap
,
2467 xio
->dlen
, (void **)&buf
,
2471 dvmabuf
= (void *)(u_long
)BUS_ADDR_PADDR(busbuf
);
2473 if (xio
->cmd
== XDCMD_WR
|| xio
->cmd
== XDCMD_XWR
) {
2474 if ((error
= copyin(xio
->dptr
, buf
, xio
->dlen
)) != 0) {
2475 bus_dmamem_unmap(xdcsc
->dmatag
, buf
, xio
->dlen
);
2476 bus_dmamem_free(xdcsc
->dmatag
, &seg
, rseg
);
2486 rqno
= xdc_cmd(xdcsc
, xio
->cmd
, xio
->subfn
, xd
->xd_drive
, xio
->block
,
2487 xio
->sectcnt
, dvmabuf
, XD_SUB_WAIT
);
2488 if (rqno
== XD_ERR_FAIL
) {
2492 xio
->errnum
= xdcsc
->reqs
[rqno
].errnum
;
2493 xio
->tries
= xdcsc
->reqs
[rqno
].tries
;
2494 XDC_DONE(xdcsc
, rqno
, dummy
);
2496 if (xio
->cmd
== XDCMD_RD
|| xio
->cmd
== XDCMD_XRD
)
2497 error
= copyout(buf
, xio
->dptr
, xio
->dlen
);
2502 xd_dmamem_free(xdcsc
->dmatag
, xdcsc
->auxmap
, &seg
, rseg
,
2509 * xdc_e2str: convert error code number into an error string
2516 return ("Software fatal error");
2518 return ("Successful completion");
2520 return ("Illegal cylinder address");
2522 return ("Illegal head address");
2524 return ("Illgal sector address");
2526 return ("Count zero");
2528 return ("Unimplemented command");
2530 return ("Illegal field length 1");
2532 return ("Illegal field length 2");
2534 return ("Illegal field length 3");
2536 return ("Illegal field length 4");
2538 return ("Illegal field length 5");
2540 return ("Illegal field length 6");
2542 return ("Illegal field length 7");
2544 return ("Illegal scatter/gather length");
2546 return ("Not enough sectors per track");
2548 return ("Next IOPB address alignment error");
2550 return ("Scatter/gather address alignment error");
2552 return ("Scatter/gather with auto-ECC");
2554 return ("Soft ECC corrected");
2556 return ("ECC ignored");
2558 return ("Auto-seek retry recovered");
2560 return ("Soft retry recovered");
2562 return ("Hard data ECC");
2564 return ("Header not found");
2566 return ("Drive not ready");
2568 return ("Operation timeout");
2570 return ("VMEDMA timeout");
2572 return ("Disk sequencer error");
2574 return ("Header ECC error");
2576 return ("Read verify");
2578 return ("Fatail VMEDMA error");
2580 return ("VMEbus error");
2582 return ("Drive faulted");
2584 return ("Header error/cyliner");
2586 return ("Header error/head");
2588 return ("Drive not on-cylinder");
2590 return ("Seek error");
2592 return ("Illegal sector size");
2594 return ("Soft ECC");
2596 return ("Write-protect error");
2598 return ("IRAM self test failure");
2600 return ("Maintenance test 3 failure (DSKCEL RAM)");
2602 return ("Maintenance test 4 failure (header shift reg)");
2604 return ("Maintenance test 5 failure (VMEDMA regs)");
2606 return ("Maintenance test 6 failure (REGCEL chip)");
2608 return ("Maintenance test 7 failure (buffer parity)");
2610 return ("Maintenance test 8 failure (disk FIFO)");
2612 return ("IOPB checksum miscompare");
2614 return ("IOPB DMA fatal");
2616 return ("IOPB address alignment error");
2618 return ("Firmware error");
2620 return ("Illegal maintenance mode test number");
2622 return ("ACFAIL asserted");
2624 return ("Unknown error");