No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / sparc / dev / sw.c
blobb588cf98863613ee6e67d409e2581287e9498d50
1 /* $NetBSD: sw.c,v 1.21 2008/04/28 20:23:35 martin Exp $ */
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * This file contains only the machine-dependent parts of the
34 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
35 * The machine-independent parts are in ncr5380sbc.c
37 * Supported hardware includes:
38 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
39 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
41 * The VME variant has a bit to enable or disable the DMA engine,
42 * but that bit also gates the interrupt line from the NCR5380!
43 * Therefore, in order to get any interrupt from the 5380, (i.e.
44 * for reselect) one must clear the DMA engine transfer count and
45 * then enable DMA. This has the further complication that you
46 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
47 * we have to turn DMA back off before we even look at the 5380.
49 * What wonderfully whacky hardware this is!
51 * David Jones wrote the initial version of this module for NetBSD/sun3,
52 * which included support for the VME adapter only. (no reselection).
54 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
55 * both the VME and OBIO code to support disconnect/reselect.
56 * (Required figuring out the hardware "features" noted above.)
58 * The autoconfiguration boilerplate came from Adam Glass.
60 * Jason R. Thorpe ported the autoconfiguration and VME portions to
61 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
62 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
63 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
64 * and Chris Torek for bits of insight needed along the way. Thanks to
65 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
66 * for the sake of testing. Andrew Gillham helped work out the bugs
67 * the 4/100 DMA code.
71 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
72 * works, but interrupts (and, thus, reselection) don't. I don't know
73 * why, and I don't have a machine to test this on further.
75 * DMA, DMA completion interrupts, and reselection work fine on my
76 * 4/260 with modern SCSI-II disks attached. I've had reports of
77 * reselection failing on Sun Shoebox-type configurations where
78 * there are multiple non-SCSI devices behind Emulex or Adaptec
79 * bridges. These devices pre-date the SCSI-I spec, and might not
80 * behave the way the 5380 code expects. For this reason, only
81 * DMA is enabled by default in this driver.
83 * Jason R. Thorpe <thorpej@NetBSD.org>
84 * December 8, 1995
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.21 2008/04/28 20:23:35 martin Exp $");
90 #include "opt_ddb.h"
92 #include <sys/types.h>
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/malloc.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/buf.h>
101 #include <machine/bus.h>
102 #include <machine/intr.h>
103 #include <machine/autoconf.h>
105 #include <dev/scsipi/scsi_all.h>
106 #include <dev/scsipi/scsipi_all.h>
107 #include <dev/scsipi/scsipi_debug.h>
108 #include <dev/scsipi/scsiconf.h>
110 #ifndef DDB
111 #define Debugger()
112 #endif
114 #ifndef DEBUG
115 #define DEBUG XXX
116 #endif
118 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
120 #include <dev/ic/ncr5380reg.h>
121 #include <dev/ic/ncr5380var.h>
123 #include <sparc/dev/swreg.h>
126 * Transfers smaller than this are done using PIO
127 * (on assumption they're not worth DMA overhead)
129 #define MIN_DMA_LEN 128
132 * Transfers lager than 65535 bytes need to be split-up.
133 * (Some of the FIFO logic has only 16 bits counters.)
134 * Make the size an integer multiple of the page size
135 * to avoid buf/cluster remap problems. (paranoid?)
137 #define MAX_DMA_LEN 0xE000
139 #ifdef DEBUG
140 int sw_debug = 0;
141 #endif
144 * This structure is used to keep track of mapped DMA requests.
146 struct sw_dma_handle {
147 int dh_flags;
148 #define SIDH_BUSY 0x01 /* This DH is in use */
149 #define SIDH_OUT 0x02 /* DMA does data out (write) */
150 u_char *dh_addr; /* KVA of start of buffer */
151 int dh_maplen; /* Original data length */
152 long dh_startingpa; /* PA of buffer; for "sw" */
153 bus_dmamap_t dh_dmamap;
154 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
158 * The first structure member has to be the ncr5380_softc
159 * so we can just cast to go back and fourth between them.
161 struct sw_softc {
162 struct ncr5380_softc ncr_sc;
163 bus_space_tag_t sc_bustag; /* bus tags */
164 bus_dma_tag_t sc_dmatag;
166 struct sw_dma_handle *sc_dma;
167 int sc_xlen; /* length of current DMA segment. */
168 int sc_options; /* options for this instance. */
172 * Options. By default, DMA is enabled and DMA completion interrupts
173 * and reselect are disabled. You may enable additional features
174 * the `flags' directive in your kernel's configuration file.
176 * Alternatively, you can patch your kernel with DDB or some other
177 * mechanism. The sc_options member of the softc is OR'd with
178 * the value in sw_options.
180 * On the "sw", interrupts (and thus) reselection don't work, so they're
181 * disabled by default. DMA is still a little dangerous, too.
183 * Note, there's a separate sw_options to make life easier.
185 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
186 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
187 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
188 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
189 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
190 int sw_options = SW_ENABLE_DMA;
192 static int sw_match(device_t, cfdata_t, void *);
193 static void sw_attach(device_t, device_t, void *);
194 static int sw_intr(void *);
195 static void sw_reset_adapter(struct ncr5380_softc *);
196 static void sw_minphys(struct buf *);
198 void sw_dma_alloc(struct ncr5380_softc *);
199 void sw_dma_free(struct ncr5380_softc *);
200 void sw_dma_poll(struct ncr5380_softc *);
202 void sw_dma_setup(struct ncr5380_softc *);
203 void sw_dma_start(struct ncr5380_softc *);
204 void sw_dma_eop(struct ncr5380_softc *);
205 void sw_dma_stop(struct ncr5380_softc *);
207 void sw_intr_on(struct ncr5380_softc *);
208 void sw_intr_off(struct ncr5380_softc *);
210 /* Shorthand bus space access */
211 #define SWREG_READ(sc, index) \
212 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
213 #define SWREG_WRITE(sc, index, v) \
214 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
217 /* The Sun "SCSI Weird" 4/100 obio controller. */
218 CFATTACH_DECL_NEW(sw, sizeof(struct sw_softc),
219 sw_match, sw_attach, NULL, NULL);
221 static int
222 sw_match(device_t parent, cfdata_t cf, void *aux)
224 union obio_attach_args *uoba = aux;
225 struct obio4_attach_args *oba;
227 /* Nothing but a Sun 4/100 is going to have these devices. */
228 if (cpuinfo.cpu_type != CPUTYP_4_100)
229 return (0);
231 if (uoba->uoba_isobio4 == 0)
232 return (0);
234 /* Make sure there is something there... */
235 oba = &uoba->uoba_oba4;
236 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
237 1, /* probe size */
238 1, /* offset */
239 0, /* flags */
240 NULL, NULL));
243 static void
244 sw_attach(device_t parent, device_t self, void *aux)
246 struct sw_softc *sc = device_private(self);
247 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
248 union obio_attach_args *uoba = aux;
249 struct obio4_attach_args *oba = &uoba->uoba_oba4;
250 bus_space_handle_t bh;
251 char bits[64];
252 int i;
254 ncr_sc->sc_dev = self;
255 sc->sc_dmatag = oba->oba_dmatag;
257 /* Map the controller registers. */
258 if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
259 SWREG_BANK_SZ,
260 BUS_SPACE_MAP_LINEAR,
261 &bh) != 0) {
262 aprint_error(": cannot map registers\n");
263 return;
266 ncr_sc->sc_regt = oba->oba_bustag;
267 ncr_sc->sc_regh = bh;
269 sc->sc_options = sw_options;
271 ncr_sc->sc_dma_setup = sw_dma_setup;
272 ncr_sc->sc_dma_start = sw_dma_start;
273 ncr_sc->sc_dma_eop = sw_dma_stop;
274 ncr_sc->sc_dma_stop = sw_dma_stop;
275 ncr_sc->sc_intr_on = sw_intr_on;
276 ncr_sc->sc_intr_off = sw_intr_off;
279 * Establish interrupt channel.
280 * Default interrupt priority always is 3. At least, that's
281 * what my board seems to be at. --thorpej
283 if (oba->oba_pri == -1)
284 oba->oba_pri = 3;
286 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
287 sw_intr, sc);
289 aprint_normal(" pri %d\n", oba->oba_pri);
293 * Pull in the options flags. Allow the user to completely
294 * override the default values.
296 if ((device_cfdata(self)->cf_flags & SW_OPTIONS_MASK) != 0)
297 sc->sc_options =
298 device_cfdata(self)->cf_flags & SW_OPTIONS_MASK;
301 * Initialize fields used by the MI code
304 /* NCR5380 register bank offsets */
305 ncr_sc->sci_r0 = 0;
306 ncr_sc->sci_r1 = 1;
307 ncr_sc->sci_r2 = 2;
308 ncr_sc->sci_r3 = 3;
309 ncr_sc->sci_r4 = 4;
310 ncr_sc->sci_r5 = 5;
311 ncr_sc->sci_r6 = 6;
312 ncr_sc->sci_r7 = 7;
314 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
317 * MD function pointers used by the MI code.
319 ncr_sc->sc_pio_out = ncr5380_pio_out;
320 ncr_sc->sc_pio_in = ncr5380_pio_in;
321 ncr_sc->sc_dma_alloc = sw_dma_alloc;
322 ncr_sc->sc_dma_free = sw_dma_free;
323 ncr_sc->sc_dma_poll = sw_dma_poll;
325 ncr_sc->sc_flags = 0;
326 if ((sc->sc_options & SW_DO_RESELECT) == 0)
327 ncr_sc->sc_no_disconnect = 0xFF;
328 if ((sc->sc_options & SW_DMA_INTR) == 0)
329 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
330 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
334 * Allocate DMA handles.
336 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
337 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
338 if (sc->sc_dma == NULL)
339 panic("sw: DMA handle malloc failed");
341 for (i = 0; i < SCI_OPENINGS; i++) {
342 sc->sc_dma[i].dh_flags = 0;
344 /* Allocate a DMA handle */
345 if (bus_dmamap_create(
346 sc->sc_dmatag, /* tag */
347 MAXPHYS, /* size */
348 1, /* nsegments */
349 MAXPHYS, /* maxsegsz */
350 0, /* boundary */
351 BUS_DMA_NOWAIT,
352 &sc->sc_dma[i].dh_dmamap) != 0) {
354 aprint_error_dev(self, "DMA buffer map create error\n");
355 return;
359 if (sc->sc_options) {
360 snprintb(bits, sizeof(bits),
361 SW_OPTIONS_BITS, sc->sc_options);
362 aprint_normal_dev(self, "options=%s\n", bits);
365 ncr_sc->sc_channel.chan_id = 7;
366 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
368 /* Initialize sw board */
369 sw_reset_adapter(ncr_sc);
371 /* Attach the ncr5380 chip driver */
372 ncr5380_attach(ncr_sc);
375 static void
376 sw_minphys(struct buf *bp)
379 if (bp->b_bcount > MAX_DMA_LEN) {
380 #ifdef DEBUG
381 if (sw_debug) {
382 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
383 Debugger();
385 #endif
386 bp->b_bcount = MAX_DMA_LEN;
388 minphys(bp);
391 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
392 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
394 static int
395 sw_intr(void *arg)
397 struct sw_softc *sc = arg;
398 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
399 int dma_error, claimed;
400 u_short csr;
402 claimed = 0;
403 dma_error = 0;
405 /* SBC interrupt? DMA interrupt? */
406 csr = SWREG_READ(ncr_sc, SWREG_CSR);
408 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
410 if (csr & SW_CSR_DMA_CONFLICT) {
411 dma_error |= SW_CSR_DMA_CONFLICT;
412 printf("%s: DMA conflict\n", __func__);
414 if (csr & SW_CSR_DMA_BUS_ERR) {
415 dma_error |= SW_CSR_DMA_BUS_ERR;
416 printf("%s: DMA bus error\n", __func__);
418 if (dma_error) {
419 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
420 sc->ncr_sc.sc_state |= NCR_ABORTING;
421 /* Make sure we will call the main isr. */
422 csr |= SW_CSR_DMA_IP;
425 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
426 claimed = ncr5380_intr(&sc->ncr_sc);
427 #ifdef DEBUG
428 if (!claimed) {
429 printf("%s: spurious from SBC\n", __func__);
430 if (sw_debug & 4) {
431 Debugger(); /* XXX */
434 #endif
437 return claimed;
441 static void
442 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
445 #ifdef DEBUG
446 if (sw_debug) {
447 printf("%s\n", __func__);
449 #endif
452 * The reset bits in the CSR are active low.
454 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
455 delay(10);
456 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
458 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
459 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
460 delay(10);
461 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
463 SCI_CLR_INTR(ncr_sc);
467 /*****************************************************************
468 * Common functions for DMA
469 ****************************************************************/
472 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
473 * for DMA transfer. On the Sun4, this means mapping the buffer
474 * into DVMA space.
476 void
477 sw_dma_alloc(struct ncr5380_softc *ncr_sc)
479 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
480 struct sci_req *sr = ncr_sc->sc_current;
481 struct scsipi_xfer *xs = sr->sr_xs;
482 struct sw_dma_handle *dh;
483 int i, xlen;
484 u_long addr;
486 #ifdef DIAGNOSTIC
487 if (sr->sr_dma_hand != NULL)
488 panic("%s: already have DMA handle", __func__);
489 #endif
491 #if 1 /* XXX - Temporary */
492 /* XXX - In case we think DMA is completely broken... */
493 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
494 return;
495 #endif
497 addr = (u_long)ncr_sc->sc_dataptr;
498 xlen = ncr_sc->sc_datalen;
500 /* If the DMA start addr is misaligned then do PIO */
501 if ((addr & 1) || (xlen & 1)) {
502 printf("%s: misaligned.\n", __func__);
503 return;
506 /* Make sure our caller checked sc_min_dma_len. */
507 if (xlen < MIN_DMA_LEN)
508 panic("%s: xlen=0x%x", __func__, xlen);
510 /* Find free DMA handle. Guaranteed to find one since we have
511 as many DMA handles as the driver has processes. */
512 for (i = 0; i < SCI_OPENINGS; i++) {
513 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
514 goto found;
516 panic("sw: no free DMA handles.");
518 found:
519 dh = &sc->sc_dma[i];
520 dh->dh_flags = SIDH_BUSY;
521 dh->dh_addr = (u_char *)addr;
522 dh->dh_maplen = xlen;
524 /* Copy the "write" flag for convenience. */
525 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
526 dh->dh_flags |= SIDH_OUT;
529 * Double-map the buffer into DVMA space. If we can't re-map
530 * the buffer, we print a warning and fall back to PIO mode.
532 * NOTE: it is not safe to sleep here!
534 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
535 (void *)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
536 /* Can't remap segment */
537 printf("%s: can't remap 0x%lx/0x%x, doing PIO\n",
538 __func__, addr, dh->dh_maplen);
539 dh->dh_flags = 0;
540 return;
542 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
543 (dh->dh_flags & SIDH_OUT)
544 ? BUS_DMASYNC_PREWRITE
545 : BUS_DMASYNC_PREREAD);
547 /* success */
548 sr->sr_dma_hand = dh;
552 void
553 sw_dma_free(struct ncr5380_softc *ncr_sc)
555 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
556 struct sci_req *sr = ncr_sc->sc_current;
557 struct sw_dma_handle *dh = sr->sr_dma_hand;
559 #ifdef DIAGNOSTIC
560 if (dh == NULL)
561 panic("%s: no DMA handle", __func__);
562 #endif
564 if (ncr_sc->sc_state & NCR_DOINGDMA)
565 panic("%s: free while in progress", __func__);
567 if (dh->dh_flags & SIDH_BUSY) {
568 /* Give back the DVMA space. */
569 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
570 dh->dh_dvma, dh->dh_maplen,
571 (dh->dh_flags & SIDH_OUT)
572 ? BUS_DMASYNC_POSTWRITE
573 : BUS_DMASYNC_POSTREAD);
574 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
575 dh->dh_flags = 0;
577 sr->sr_dma_hand = NULL;
582 * Poll (spin-wait) for DMA completion.
583 * Called right after xx_dma_start(), and
584 * xx_dma_stop() will be called next.
585 * Same for either VME or OBIO.
587 void
588 sw_dma_poll(struct ncr5380_softc *ncr_sc)
590 struct sci_req *sr = ncr_sc->sc_current;
591 int tmo, csr_mask, csr;
593 /* Make sure DMA started successfully. */
594 if (ncr_sc->sc_state & NCR_ABORTING)
595 return;
597 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
598 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
600 tmo = 50000; /* X100 = 5 sec. */
601 for (;;) {
602 csr = SWREG_READ(ncr_sc, SWREG_CSR);
603 if (csr & csr_mask)
604 break;
605 if (--tmo <= 0) {
606 printf("%s: DMA timeout (while polling)\n",
607 device_xname(ncr_sc->sc_dev));
608 /* Indicate timeout as MI code would. */
609 sr->sr_flags |= SR_OVERDUE;
610 break;
612 delay(100);
615 #ifdef DEBUG
616 if (sw_debug) {
617 printf("%s: done, csr=0x%x\n", __func__, csr);
619 #endif
624 * This is called when the bus is going idle,
625 * so we want to enable the SBC interrupts.
626 * That is controlled by the DMA enable!
627 * Who would have guessed!
628 * What a NASTY trick!
630 * XXX THIS MIGHT NOT WORK RIGHT!
632 void
633 sw_intr_on(struct ncr5380_softc *ncr_sc)
635 uint32_t csr;
637 sw_dma_setup(ncr_sc);
638 csr = SWREG_READ(ncr_sc, SWREG_CSR);
639 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
640 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
644 * This is called when the bus is idle and we are
645 * about to start playing with the SBC chip.
647 * XXX THIS MIGHT NOT WORK RIGHT!
649 void
650 sw_intr_off(struct ncr5380_softc *ncr_sc)
652 uint32_t csr;
654 csr = SWREG_READ(ncr_sc, SWREG_CSR);
655 csr &= ~SW_CSR_DMA_EN;
656 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
661 * This function is called during the COMMAND or MSG_IN phase
662 * that precedes a DATA_IN or DATA_OUT phase, in case we need
663 * to setup the DMA engine before the bus enters a DATA phase.
665 * On the OBIO version we just clear the DMA count and address
666 * here (to make sure it stays idle) and do the real setup
667 * later, in dma_start.
669 void
670 sw_dma_setup(struct ncr5380_softc *ncr_sc)
672 uint32_t csr;
674 /* No FIFO to reset on "sw". */
676 /* Set direction (assume recv here) */
677 csr = SWREG_READ(ncr_sc, SWREG_CSR);
678 csr &= ~SW_CSR_SEND;
679 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
681 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
682 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
686 void
687 sw_dma_start(struct ncr5380_softc *ncr_sc)
689 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
690 struct sci_req *sr = ncr_sc->sc_current;
691 struct sw_dma_handle *dh = sr->sr_dma_hand;
692 u_long dva;
693 int xlen, adj, adjlen;
694 u_int mode;
695 uint32_t csr;
698 * Get the DVMA mapping for this segment.
700 dva = (u_long)(dh->dh_dvma);
701 if (dva & 1)
702 panic("%s: bad dva=0x%lx", __func__, dva);
704 xlen = ncr_sc->sc_datalen;
705 xlen &= ~1;
706 sc->sc_xlen = xlen; /* XXX: or less... */
708 #ifdef DEBUG
709 if (sw_debug & 2) {
710 printf("%s: dh=%p, dva=0x%lx, xlen=%d\n",
711 __func__, dh, dva, xlen);
713 #endif
716 * Set up the DMA controller.
717 * Note that (dh->dh_len < sc_datalen)
720 /* Set direction (send/recv) */
721 csr = SWREG_READ(ncr_sc, SWREG_CSR);
722 if (dh->dh_flags & SIDH_OUT) {
723 csr |= SW_CSR_SEND;
724 } else {
725 csr &= ~SW_CSR_SEND;
727 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
730 * The "sw" needs longword aligned transfers. We
731 * detect a shortword aligned transfer here, and adjust the
732 * DMA transfer by 2 bytes. These two bytes are read/written
733 * in PIO mode just before the DMA is started.
735 adj = 0;
736 if (dva & 2) {
737 adj = 2;
738 #ifdef DEBUG
739 if (sw_debug & 2)
740 printf("%s: adjusted up %d bytes\n", __func__, adj);
741 #endif
744 /* We have to frob the address on the "sw". */
745 dh->dh_startingpa = (dva | 0xF00000);
746 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
747 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
750 * Acknowledge the phase change. (After DMA setup!)
751 * Put the SBIC into DMA mode, and start the transfer.
753 if (dh->dh_flags & SIDH_OUT) {
754 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
755 if (adj) {
756 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
757 adj, dh->dh_addr);
758 if (adjlen != adj)
759 printf("%s: bad outgoing adj, %d != %d\n",
760 device_xname(ncr_sc->sc_dev), adjlen, adj);
762 SCI_CLR_INTR(ncr_sc);
763 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
764 mode = NCR5380_READ(ncr_sc, sci_mode);
765 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
766 NCR5380_WRITE(ncr_sc, sci_mode, mode);
767 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
768 } else {
769 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
770 if (adj) {
771 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
772 adj, dh->dh_addr);
773 if (adjlen != adj)
774 printf("%s: bad incoming adj, %d != %d\n",
775 device_xname(ncr_sc->sc_dev), adjlen, adj);
777 SCI_CLR_INTR(ncr_sc);
778 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
779 mode = NCR5380_READ(ncr_sc, sci_mode);
780 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
781 NCR5380_WRITE(ncr_sc, sci_mode, mode);
782 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
785 /* Let'er rip! */
786 csr |= SW_CSR_DMA_EN;
787 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
789 ncr_sc->sc_state |= NCR_DOINGDMA;
791 #ifdef DEBUG
792 if (sw_debug & 2) {
793 printf("%s: started, flags=0x%x\n",
794 __func__, ncr_sc->sc_state);
796 #endif
800 void
801 sw_dma_eop(struct ncr5380_softc *ncr_sc)
804 /* Not needed - DMA was stopped prior to examining sci_csr */
807 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
808 #define COUNT_SW_LEFTOVERS
809 #endif
810 #ifdef COUNT_SW_LEFTOVERS
812 * Let's find out how often these occur. Read these with DDB from time
813 * to time.
815 int sw_3_leftover = 0;
816 int sw_2_leftover = 0;
817 int sw_1_leftover = 0;
818 int sw_0_leftover = 0;
819 #endif
821 void
822 sw_dma_stop(struct ncr5380_softc *ncr_sc)
824 struct sci_req *sr = ncr_sc->sc_current;
825 struct sw_dma_handle *dh = sr->sr_dma_hand;
826 int ntrans = 0, dva;
827 u_int mode;
828 uint32_t csr;
830 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
831 #ifdef DEBUG
832 printf("%s: DMA not running\n", __func__);
833 #endif
834 return;
836 ncr_sc->sc_state &= ~NCR_DOINGDMA;
838 /* First, halt the DMA engine. */
839 csr = SWREG_READ(ncr_sc, SWREG_CSR);
840 csr &= ~SW_CSR_DMA_EN;
841 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
844 * XXX HARDWARE BUG!
845 * Apparently, some early 4/100 SCSI controllers had a hardware
846 * bug that caused the controller to do illegal memory access.
847 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
848 * this, we simply need to clean up after ourselves ... there will
849 * be as many as 3 bytes left over. Since we clean up "left-over"
850 * bytes on every read anyway, we just continue to chug along
851 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
852 * around in hardware later with the "left-over byte" indicator
853 * in the VME controller.)
855 #if 0
856 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR))
857 #else
858 if (csr & (SW_CSR_DMA_CONFLICT))
859 #endif
861 printf("sw: DMA error, csr=0x%x, reset\n", csr);
862 sr->sr_xs->error = XS_DRIVER_STUFFUP;
863 ncr_sc->sc_state |= NCR_ABORTING;
864 sw_reset_adapter(ncr_sc);
867 /* Note that timeout may have set the error flag. */
868 if (ncr_sc->sc_state & NCR_ABORTING)
869 goto out;
872 * Now try to figure out how much actually transferred
874 * The "sw" doesn't have a FIFO or a bcr, so we've stored
875 * the starting PA of the transfer in the DMA handle,
876 * and subtract it from the ending PA left in the dma_addr
877 * register.
879 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
880 ntrans = (dva - dh->dh_startingpa);
882 #ifdef DEBUG
883 if (sw_debug & 2) {
884 printf("%s: ntrans=0x%x\n", __func__, ntrans);
886 #endif
888 if (ntrans > ncr_sc->sc_datalen)
889 panic("%s: excess transfer", __func__);
891 /* Adjust data pointer */
892 ncr_sc->sc_dataptr += ntrans;
893 ncr_sc->sc_datalen -= ntrans;
896 * After a read, we may need to clean-up
897 * "Left-over bytes" (yuck!) The "sw" doesn't
898 * have a "left-over" indicator, so we have to so
899 * this no matter what. Ick.
901 if ((dh->dh_flags & SIDH_OUT) == 0) {
902 char *cp = ncr_sc->sc_dataptr;
903 uint32_t bpr;
905 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
907 switch (dva & 3) {
908 case 3:
909 cp[0] = (bpr & 0xff000000) >> 24;
910 cp[1] = (bpr & 0x00ff0000) >> 16;
911 cp[2] = (bpr & 0x0000ff00) >> 8;
912 #ifdef COUNT_SW_LEFTOVERS
913 ++sw_3_leftover;
914 #endif
915 break;
917 case 2:
918 cp[0] = (bpr & 0xff000000) >> 24;
919 cp[1] = (bpr & 0x00ff0000) >> 16;
920 #ifdef COUNT_SW_LEFTOVERS
921 ++sw_2_leftover;
922 #endif
923 break;
925 case 1:
926 cp[0] = (bpr & 0xff000000) >> 24;
927 #ifdef COUNT_SW_LEFTOVERS
928 ++sw_1_leftover;
929 #endif
930 break;
932 #ifdef COUNT_SW_LEFTOVERS
933 default:
934 ++sw_0_leftover;
935 break;
936 #endif
940 out:
941 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
942 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
944 /* Put SBIC back in PIO mode. */
945 mode = NCR5380_READ(ncr_sc, sci_mode);
946 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
947 NCR5380_WRITE(ncr_sc, sci_mode, mode);
948 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
950 #ifdef DEBUG
951 if (sw_debug & 2) {
952 printf("%s: ntrans=0x%x\n", __func__, ntrans);
954 #endif