Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / newsmips / dev / sc_wrap.c
blobba56abe8e0fbd38bc64546db5d34846ab83e9b30
1 /* $NetBSD: sc_wrap.c,v 1.30 2007/03/04 06:00:26 christos Exp $ */
3 /*
4 * This driver is slow! Need to rewrite.
5 */
7 #include <sys/cdefs.h>
8 __KERNEL_RCSID(0, "$NetBSD: sc_wrap.c,v 1.30 2007/03/04 06:00:26 christos Exp $");
10 #include <sys/types.h>
11 #include <sys/param.h>
12 #include <sys/systm.h>
13 #include <sys/kernel.h>
14 #include <sys/device.h>
15 #include <sys/proc.h>
16 #include <sys/buf.h>
17 #include <sys/malloc.h>
19 #include <uvm/uvm_extern.h>
21 #include <dev/scsipi/scsi_all.h>
22 #include <dev/scsipi/scsipi_all.h>
23 #include <dev/scsipi/scsiconf.h>
24 #include <dev/scsipi/scsi_message.h>
26 #include <newsmips/dev/hbvar.h>
27 #include <newsmips/dev/scsireg.h>
28 #include <newsmips/dev/dmac_0448.h>
29 #include <newsmips/dev/screg_1185.h>
31 #include <machine/adrsmap.h>
32 #include <machine/autoconf.h>
33 #include <machine/machConst.h>
35 #include <mips/cache.h>
37 static int cxd1185_match(device_t, cfdata_t, void *);
38 static void cxd1185_attach(device_t, device_t, void *);
40 CFATTACH_DECL_NEW(sc, sizeof(struct sc_softc),
41 cxd1185_match, cxd1185_attach, NULL, NULL);
43 void cxd1185_init(struct sc_softc *);
44 static void free_scb(struct sc_softc *, struct sc_scb *);
45 static struct sc_scb *get_scb(struct sc_softc *, int);
46 static void sc_scsipi_request(struct scsipi_channel *,
47 scsipi_adapter_req_t, void *);
48 static int sc_poll(struct sc_softc *, int, int);
49 static void sc_sched(struct sc_softc *);
50 void sc_done(struct sc_scb *);
51 int sc_intr(void *);
52 static void cxd1185_timeout(void *);
54 extern void sc_send(struct sc_scb *, int, int);
55 extern int scintr(void);
56 extern void scsi_hardreset(void);
57 extern int sc_busy(struct sc_softc *, int);
58 extern paddr_t kvtophys(vaddr_t);
60 static int sc_disconnect = IDT_DISCON;
62 int
63 cxd1185_match(device_t parent, cfdata_t cf, void *aux)
65 struct hb_attach_args *ha = aux;
67 if (strcmp(ha->ha_name, "sc"))
68 return 0;
70 return 1;
73 void
74 cxd1185_attach(device_t parent, device_t self, void *aux)
76 struct sc_softc *sc = device_private(self);
77 struct hb_attach_args *ha = aux;
78 struct sc_scb *scb;
79 int i, intlevel;
81 sc->sc_dev = self;
83 intlevel = ha->ha_level;
84 if (intlevel == -1) {
85 #if 0
86 aprint_error(": interrupt level not configured\n");
87 return;
88 #else
89 aprint_normal(": interrupt level not configured; using");
90 intlevel = 0;
91 #endif
93 aprint_normal(" level %d\n", intlevel);
95 if (sc_idenr & 0x08)
96 sc->scsi_1185AQ = 1;
97 else
98 sc->scsi_1185AQ = 0;
100 sc->sc_adapter.adapt_dev = self;
101 sc->sc_adapter.adapt_nchannels = 1;
102 sc->sc_adapter.adapt_openings = 7;
103 sc->sc_adapter.adapt_max_periph = 1;
104 sc->sc_adapter.adapt_ioctl = NULL;
105 sc->sc_adapter.adapt_minphys = minphys;
106 sc->sc_adapter.adapt_request = sc_scsipi_request;
108 memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
109 sc->sc_channel.chan_adapter = &sc->sc_adapter;
110 sc->sc_channel.chan_bustype = &scsi_bustype;
111 sc->sc_channel.chan_channel = 0;
112 sc->sc_channel.chan_ntargets = 8;
113 sc->sc_channel.chan_nluns = 8;
114 sc->sc_channel.chan_id = 7;
116 TAILQ_INIT(&sc->ready_list);
117 TAILQ_INIT(&sc->free_list);
119 scb = sc->sc_scb;
120 for (i = 0; i < 24; i++) { /* XXX 24 */
121 TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
122 scb++;
125 cxd1185_init(sc);
126 DELAY(100000);
128 hb_intr_establish(intlevel, INTEN1_DMA, IPL_BIO, sc_intr, sc);
130 config_found(self, &sc->sc_channel, scsiprint);
133 void
134 cxd1185_init(struct sc_softc *sc)
136 int i;
138 for (i = 0; i < 8; i++)
139 sc->inuse[i] = 0;
141 scsi_hardreset();
144 void
145 free_scb(struct sc_softc *sc, struct sc_scb *scb)
147 int s;
149 s = splbio();
151 TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
154 * If there were none, wake anybody waiting for one to come free,
155 * starting with queued entries.
157 if (scb->chain.tqe_next == 0)
158 wakeup(&sc->free_list);
160 splx(s);
163 struct sc_scb *
164 get_scb(struct sc_softc *sc, int flags)
166 int s;
167 struct sc_scb *scb;
169 s = splbio();
171 while ((scb = sc->free_list.tqh_first) == NULL &&
172 (flags & XS_CTL_NOSLEEP) == 0)
173 tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
174 if (scb) {
175 TAILQ_REMOVE(&sc->free_list, scb, chain);
178 splx(s);
179 return scb;
182 void
183 sc_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
184 void *arg)
186 struct scsipi_xfer *xs;
187 struct scsipi_periph *periph;
188 struct sc_softc *sc = device_private(chan->chan_adapter->adapt_dev);
189 struct sc_scb *scb;
190 int flags, s;
191 int target;
193 switch (req) {
194 case ADAPTER_REQ_RUN_XFER:
195 xs = arg;
196 periph = xs->xs_periph;
198 flags = xs->xs_control;
199 if ((scb = get_scb(sc, flags)) == NULL)
200 panic("%s: no scb", __func__);
202 scb->xs = xs;
203 scb->flags = 0;
204 scb->sc_ctag = 0;
205 scb->sc_coffset = 0;
206 scb->istatus = 0;
207 scb->tstatus = 0;
208 scb->message = 0;
209 memset(scb->msgbuf, 0, sizeof(scb->msgbuf));
211 s = splbio();
213 TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
214 sc_sched(sc);
215 splx(s);
217 if (flags & XS_CTL_POLL) {
218 target = periph->periph_target;
219 if (sc_poll(sc, target, xs->timeout)) {
220 printf("sc: timeout (retry)\n");
221 if (sc_poll(sc, target, xs->timeout)) {
222 printf("sc: timeout\n");
225 /* called during autoconfig only... */
226 mips_dcache_wbinv_all(); /* Flush DCache */
228 return;
229 case ADAPTER_REQ_GROW_RESOURCES:
230 /* XXX Not supported. */
231 return;
232 case ADAPTER_REQ_SET_XFER_MODE:
233 /* XXX Not supported. */
234 return;
239 * Used when interrupt driven I/O isn't allowed, e.g. during boot.
242 sc_poll(struct sc_softc *sc, int chan, int count)
244 volatile uint8_t *int_stat = (void *)INTST1;
245 volatile uint8_t *int_clear = (void *)INTCLR1;
247 while (sc_busy(sc, chan)) {
248 if (*int_stat & INTST1_DMA) {
249 *int_clear = INTST1_DMA;
250 if (dmac_gstat & CH_INT(CH_SCSI)) {
251 if (dmac_gstat & CH_MRQ(CH_SCSI)) {
252 DELAY(50);
253 if (dmac_gstat & CH_MRQ(CH_SCSI))
254 printf("dma_poll\n");
256 DELAY(10);
257 scintr();
260 DELAY(1000);
261 count--;
262 if (count <= 0)
263 return 1;
265 return 0;
268 void
269 sc_sched(struct sc_softc *sc)
271 struct scsipi_xfer *xs;
272 struct scsipi_periph *periph;
273 int ie = 0;
274 int flags;
275 int chan, lun;
276 struct sc_scb *scb, *nextscb;
278 scb = sc->ready_list.tqh_first;
279 start:
280 if (scb == NULL)
281 return;
283 xs = scb->xs;
284 periph = xs->xs_periph;
285 chan = periph->periph_target;
286 flags = xs->xs_control;
288 if (sc->inuse[chan]) {
289 scb = scb->chain.tqe_next;
290 goto start;
292 sc->inuse[chan] = 1;
294 if (flags & XS_CTL_RESET)
295 printf("SCSI RESET\n");
297 lun = periph->periph_lun;
299 scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
300 scb->sc_ctrnscnt = xs->datalen;
302 /* make va->pa mapping table for DMA */
303 if (xs->datalen > 0) {
304 uint32_t pn, pages, offset;
305 int i;
306 vaddr_t va;
308 #if 0
309 memset(&sc->sc_map[chan], 0, sizeof(struct sc_map));
310 #endif
312 va = (vaddr_t)xs->data;
314 offset = va & PGOFSET;
315 pages = (offset + xs->datalen + PAGE_SIZE -1 ) >> PGSHIFT;
316 if (pages >= NSCMAP)
317 panic("sc_map: Too many pages");
319 for (i = 0; i < pages; i++) {
320 pn = kvtophys(va) >> PGSHIFT;
321 sc->sc_map[chan].mp_addr[i] = pn;
322 va += PAGE_SIZE;
325 sc->sc_map[chan].mp_offset = offset;
326 sc->sc_map[chan].mp_pages = pages;
327 scb->sc_map = &sc->sc_map[chan];
330 if ((flags & XS_CTL_POLL) == 0)
331 ie = SCSI_INTEN;
333 if (xs->data)
334 scb->sc_cpoint = (void *)xs->data;
335 else
336 scb->sc_cpoint = scb->msgbuf;
337 scb->scb_softc = sc;
339 callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
340 sc_send(scb, chan, ie);
341 callout_stop(&scb->xs->xs_callout);
343 nextscb = scb->chain.tqe_next;
345 TAILQ_REMOVE(&sc->ready_list, scb, chain);
347 scb = nextscb;
349 goto start;
352 void
353 sc_done(struct sc_scb *scb)
355 struct scsipi_xfer *xs = scb->xs;
356 struct scsipi_periph *periph = xs->xs_periph;
357 struct sc_softc *sc;
359 sc = device_private(periph->periph_channel->chan_adapter->adapt_dev);
360 xs->resid = 0;
361 xs->status = 0;
363 if (scb->istatus != INST_EP) {
364 if (scb->istatus == (INST_EP|INST_TO))
365 xs->error = XS_SELTIMEOUT;
366 else {
367 printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
368 scb->istatus, scb->tstatus);
369 xs->error = XS_DRIVER_STUFFUP;
373 switch (scb->tstatus) {
375 case TGST_GOOD:
376 break;
378 case TGST_CC:
379 xs->status = SCSI_CHECK;
380 if (xs->error == 0)
381 xs->error = XS_BUSY;
382 break;
384 default:
385 printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
386 scb->istatus, scb->tstatus);
387 break;
390 scsipi_done(xs);
391 free_scb(sc, scb);
392 sc->inuse[periph->periph_target] = 0;
393 sc_sched(sc);
397 sc_intr(void *v)
399 /* struct sc_softc *sc = v; */
400 volatile uint8_t *gsp = (uint8_t *)DMAC_GSTAT;
401 u_int gstat = *gsp;
402 int mrqb, i;
404 if ((gstat & CH_INT(CH_SCSI)) == 0)
405 return 0;
408 * when DMA interrupt occurs there remain some untransferred data.
409 * wait data transfer completion.
411 mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
412 if (gstat & mrqb) {
414 * XXX SHOULD USE DELAY()
416 for (i = 0; i < 50; i++)
418 if (*gsp & mrqb)
419 printf("%s: MRQ\n", __func__);
421 scintr();
423 return 1;
427 #if 0
429 * SCOP_RSENSE request
431 void
432 scop_rsense(int intr, struct scsi *sc_param, int lun, int ie, int count,
433 void *param)
436 memset(sc_param, 0, sizeof(struct scsi));
437 sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
438 sc_param->sc_lun = lun;
440 sc_param->sc_cpoint = (uint8_t *)param;
441 sc_param->sc_ctrnscnt = count;
443 /* sc_cdb */
444 sc_param->sc_opcode = SCOP_RSENSE;
445 sc_param->sc_count = count;
447 sc_go(intr, sc_param, ie, sc_param);
449 #endif
451 void
452 cxd1185_timeout(void *arg)
454 struct sc_scb *scb = arg;
455 struct scsipi_xfer *xs = scb->xs;
456 struct scsipi_periph *periph = xs->xs_periph;
457 int chan;
459 chan = periph->periph_target;
461 printf("sc: timeout ch=%d\n", chan);
463 /* XXX abort transfer and ... */