Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / ic / siop.c
blob9774628f9a6b717632c5c5f8afa69422f5a1756f
1 /* $NetBSD: siop.c,v 1.93 2009/09/04 18:29:52 tsutsui Exp $ */
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.93 2009/09/04 18:29:52 tsutsui Exp $");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
40 #include <uvm/uvm_extern.h>
42 #include <machine/endian.h>
43 #include <sys/bus.h>
45 #include <dev/microcode/siop/siop.out>
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
51 #include <dev/scsipi/scsiconf.h>
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/siopvar.h>
57 #include "opt_siop.h"
59 #ifndef DEBUG
60 #undef DEBUG
61 #endif
63 #define SIOP_DEBUG
64 #define SIOP_DEBUG_DR
65 #define SIOP_DEBUG_INTR
66 #define SIOP_DEBUG_SCHED
67 #define DUMP_SCRIPT
70 #define SIOP_STATS
72 #ifndef SIOP_DEFAULT_TARGET
73 #define SIOP_DEFAULT_TARGET 7
74 #endif
76 /* number of cmd descriptors per block */
77 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
79 /* Number of scheduler slot (needs to match script) */
80 #define SIOP_NSLOTS 40
82 void siop_reset(struct siop_softc *);
83 void siop_handle_reset(struct siop_softc *);
84 int siop_handle_qtag_reject(struct siop_cmd *);
85 void siop_scsicmd_end(struct siop_cmd *);
86 void siop_unqueue(struct siop_softc *, int, int);
87 static void siop_start(struct siop_softc *, struct siop_cmd *);
88 void siop_timeout(void *);
89 int siop_scsicmd(struct scsipi_xfer *);
90 void siop_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 void siop_dump_script(struct siop_softc *);
93 void siop_morecbd(struct siop_softc *);
94 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
95 void siop_add_reselsw(struct siop_softc *, int);
96 void siop_update_scntl3(struct siop_softc *,
97 struct siop_common_target *);
99 #ifdef SIOP_STATS
100 static int siop_stat_intr = 0;
101 static int siop_stat_intr_shortxfer = 0;
102 static int siop_stat_intr_sdp = 0;
103 static int siop_stat_intr_saveoffset = 0;
104 static int siop_stat_intr_done = 0;
105 static int siop_stat_intr_xferdisc = 0;
106 static int siop_stat_intr_lunresel = 0;
107 static int siop_stat_intr_qfull = 0;
108 void siop_printstats(void);
109 #define INCSTAT(x) x++
110 #else
111 #define INCSTAT(x)
112 #endif
114 static inline void siop_script_sync(struct siop_softc *, int);
115 static inline void
116 siop_script_sync(struct siop_softc *sc, int ops)
119 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
120 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
121 PAGE_SIZE, ops);
124 static inline uint32_t siop_script_read(struct siop_softc *, u_int);
125 static inline uint32_t
126 siop_script_read(struct siop_softc *sc, u_int offset)
129 if (sc->sc_c.features & SF_CHIP_RAM) {
130 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
131 offset * 4);
132 } else {
133 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
137 static inline void siop_script_write(struct siop_softc *, u_int,
138 uint32_t);
139 static inline void
140 siop_script_write(struct siop_softc *sc, u_int offset, uint32_t val)
143 if (sc->sc_c.features & SF_CHIP_RAM) {
144 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
145 offset * 4, val);
146 } else {
147 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
151 void
152 siop_attach(struct siop_softc *sc)
155 if (siop_common_attach(&sc->sc_c) != 0)
156 return;
158 TAILQ_INIT(&sc->free_list);
159 TAILQ_INIT(&sc->cmds);
160 TAILQ_INIT(&sc->lunsw_list);
161 sc->sc_currschedslot = 0;
162 #ifdef SIOP_DEBUG
163 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
164 device_xname(sc->sc_c.sc_dev), (int)sizeof(siop_script),
165 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
166 #endif
168 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
169 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
171 /* Do a bus reset, so that devices fall back to narrow/async */
172 siop_resetbus(&sc->sc_c);
174 * siop_reset() will reset the chip, thus clearing pending interrupts
176 siop_reset(sc);
177 #ifdef DUMP_SCRIPT
178 siop_dump_script(sc);
179 #endif
181 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
184 void
185 siop_reset(struct siop_softc *sc)
187 int i, j;
188 struct siop_lunsw *lunsw;
190 siop_common_reset(&sc->sc_c);
192 /* copy and patch the script */
193 if (sc->sc_c.features & SF_CHIP_RAM) {
194 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
195 siop_script, __arraycount(siop_script));
196 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
197 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
198 E_abs_msgin_Used[j] * 4,
199 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
201 if (sc->sc_c.features & SF_CHIP_LED0) {
202 bus_space_write_region_4(sc->sc_c.sc_ramt,
203 sc->sc_c.sc_ramh,
204 Ent_led_on1, siop_led_on,
205 __arraycount(siop_led_on));
206 bus_space_write_region_4(sc->sc_c.sc_ramt,
207 sc->sc_c.sc_ramh,
208 Ent_led_on2, siop_led_on,
209 __arraycount(siop_led_on));
210 bus_space_write_region_4(sc->sc_c.sc_ramt,
211 sc->sc_c.sc_ramh,
212 Ent_led_off, siop_led_off,
213 __arraycount(siop_led_off));
215 } else {
216 for (j = 0; j < __arraycount(siop_script); j++) {
217 sc->sc_c.sc_script[j] =
218 siop_htoc32(&sc->sc_c, siop_script[j]);
220 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
221 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
222 siop_htoc32(&sc->sc_c,
223 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
225 if (sc->sc_c.features & SF_CHIP_LED0) {
226 for (j = 0; j < __arraycount(siop_led_on); j++)
227 sc->sc_c.sc_script[
228 Ent_led_on1 / sizeof(siop_led_on[0]) + j
229 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
230 for (j = 0; j < __arraycount(siop_led_on); j++)
231 sc->sc_c.sc_script[
232 Ent_led_on2 / sizeof(siop_led_on[0]) + j
233 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
234 for (j = 0; j < __arraycount(siop_led_off); j++)
235 sc->sc_c.sc_script[
236 Ent_led_off / sizeof(siop_led_off[0]) + j
237 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
240 sc->script_free_lo = __arraycount(siop_script);
241 sc->script_free_hi = sc->sc_c.ram_size / 4;
242 sc->sc_ntargets = 0;
244 /* free used and unused lun switches */
245 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
246 #ifdef SIOP_DEBUG
247 printf("%s: free lunsw at offset %d\n",
248 device_xname(sc->sc_c.sc_dev), lunsw->lunsw_off);
249 #endif
250 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
251 free(lunsw, M_DEVBUF);
253 TAILQ_INIT(&sc->lunsw_list);
254 /* restore reselect switch */
255 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
256 struct siop_target *target;
257 if (sc->sc_c.targets[i] == NULL)
258 continue;
259 #ifdef SIOP_DEBUG
260 printf("%s: restore sw for target %d\n",
261 device_xname(sc->sc_c.sc_dev), i);
262 #endif
263 target = (struct siop_target *)sc->sc_c.targets[i];
264 free(target->lunsw, M_DEVBUF);
265 target->lunsw = siop_get_lunsw(sc);
266 if (target->lunsw == NULL) {
267 aprint_error_dev(sc->sc_c.sc_dev,
268 "can't alloc lunsw for target %d\n", i);
269 break;
271 siop_add_reselsw(sc, i);
274 /* start script */
275 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
276 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
277 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
279 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
280 sc->sc_c.sc_scriptaddr + Ent_reselect);
283 #if 0
284 #define CALL_SCRIPT(ent) do { \
285 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
286 siop_cmd->cmd_c.dsa, \
287 sc->sc_c.sc_scriptaddr + ent); \
288 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
289 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
290 } while (/* CONSTCOND */0)
291 #else
292 #define CALL_SCRIPT(ent) do { \
293 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
294 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
295 } while (/* CONSTCOND */0)
296 #endif
299 siop_intr(void *v)
301 struct siop_softc *sc = v;
302 struct siop_target *siop_target;
303 struct siop_cmd *siop_cmd;
304 struct siop_lun *siop_lun;
305 struct scsipi_xfer *xs;
306 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
307 uint32_t irqcode;
308 int need_reset = 0;
309 int offset, target, lun, tag;
310 bus_addr_t dsa;
311 struct siop_cbd *cbdp;
312 int freetarget = 0;
313 int restart = 0;
315 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
316 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
317 return 0;
318 INCSTAT(siop_stat_intr);
319 if (istat & ISTAT_INTF) {
320 printf("INTRF\n");
321 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
322 SIOP_ISTAT, ISTAT_INTF);
324 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
325 (ISTAT_DIP | ISTAT_ABRT)) {
326 /* clear abort */
327 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
328 SIOP_ISTAT, 0);
330 /* use DSA to find the current siop_cmd */
331 siop_cmd = NULL;
332 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
333 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
334 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
335 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
336 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
337 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
338 siop_table_sync(siop_cmd,
339 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
340 break;
343 if (siop_cmd) {
344 xs = siop_cmd->cmd_c.xs;
345 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
346 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
347 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
348 tag = siop_cmd->cmd_c.tag;
349 siop_lun = siop_target->siop_lun[lun];
350 #ifdef DIAGNOSTIC
351 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
352 printf("siop_cmd (lun %d) for DSA 0x%x "
353 "not active (%d)\n", lun, (u_int)dsa,
354 siop_cmd->cmd_c.status);
355 xs = NULL;
356 siop_target = NULL;
357 target = -1;
358 lun = -1;
359 tag = -1;
360 siop_lun = NULL;
361 siop_cmd = NULL;
362 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
363 printf("siop_cmd (lun %d tag %d) not in siop_lun "
364 "active (%p != %p)\n", lun, tag, siop_cmd,
365 siop_lun->siop_tag[tag].active);
367 #endif
368 } else {
369 xs = NULL;
370 siop_target = NULL;
371 target = -1;
372 lun = -1;
373 tag = -1;
374 siop_lun = NULL;
376 if (istat & ISTAT_DIP) {
377 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
378 SIOP_DSTAT);
379 if (dstat & DSTAT_ABRT) {
380 /* was probably generated by a bus reset IOCTL */
381 if ((dstat & DSTAT_DFE) == 0)
382 siop_clearfifo(&sc->sc_c);
383 goto reset;
385 if (dstat & DSTAT_SSI) {
386 printf("single step dsp 0x%08x dsa 0x08%x\n",
387 (int)(bus_space_read_4(sc->sc_c.sc_rt,
388 sc->sc_c.sc_rh, SIOP_DSP) -
389 sc->sc_c.sc_scriptaddr),
390 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
391 SIOP_DSA));
392 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
393 (istat & ISTAT_SIP) == 0) {
394 bus_space_write_1(sc->sc_c.sc_rt,
395 sc->sc_c.sc_rh, SIOP_DCNTL,
396 bus_space_read_1(sc->sc_c.sc_rt,
397 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
399 return 1;
402 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
403 printf("DMA IRQ:");
404 if (dstat & DSTAT_IID)
405 printf(" Illegal instruction");
406 if (dstat & DSTAT_BF)
407 printf(" bus fault");
408 if (dstat & DSTAT_MDPE)
409 printf(" parity");
410 if (dstat & DSTAT_DFE)
411 printf(" DMA fifo empty");
412 else
413 siop_clearfifo(&sc->sc_c);
414 printf(", DSP=0x%x DSA=0x%x: ",
415 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
416 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
417 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
418 if (siop_cmd)
419 printf("last msg_in=0x%x status=0x%x\n",
420 siop_cmd->cmd_tables->msg_in[0],
421 siop_ctoh32(&sc->sc_c,
422 siop_cmd->cmd_tables->status));
423 else
424 aprint_error_dev(sc->sc_c.sc_dev,
425 "current DSA invalid\n");
426 need_reset = 1;
429 if (istat & ISTAT_SIP) {
430 if (istat & ISTAT_DIP)
431 delay(10);
433 * Can't read sist0 & sist1 independently, or we have to
434 * insert delay
436 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437 SIOP_SIST0);
438 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
439 SIOP_SSTAT1);
440 #ifdef SIOP_DEBUG_INTR
441 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
442 "DSA=0x%x DSP=0x%lx\n", sist,
443 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_SSTAT1),
445 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
446 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
447 SIOP_DSP) -
448 sc->sc_c.sc_scriptaddr));
449 #endif
450 if (sist & SIST0_RST) {
451 siop_handle_reset(sc);
452 /* no table to flush here */
453 return 1;
455 if (sist & SIST0_SGE) {
456 if (siop_cmd)
457 scsipi_printaddr(xs->xs_periph);
458 else
459 printf("%s:", device_xname(sc->sc_c.sc_dev));
460 printf("scsi gross error\n");
461 goto reset;
463 if ((sist & SIST0_MA) && need_reset == 0) {
464 if (siop_cmd) {
465 int scratcha0;
466 dstat = bus_space_read_1(sc->sc_c.sc_rt,
467 sc->sc_c.sc_rh, SIOP_DSTAT);
469 * first restore DSA, in case we were in a S/G
470 * operation.
472 bus_space_write_4(sc->sc_c.sc_rt,
473 sc->sc_c.sc_rh,
474 SIOP_DSA, siop_cmd->cmd_c.dsa);
475 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
476 sc->sc_c.sc_rh, SIOP_SCRATCHA);
477 switch (sstat1 & SSTAT1_PHASE_MASK) {
478 case SSTAT1_PHASE_STATUS:
480 * previous phase may be aborted for any reason
481 * ( for example, the target has less data to
482 * transfer than requested). Compute resid and
483 * just go to status, the command should
484 * terminate.
486 INCSTAT(siop_stat_intr_shortxfer);
487 if (scratcha0 & A_flag_data)
488 siop_ma(&siop_cmd->cmd_c);
489 else if ((dstat & DSTAT_DFE) == 0)
490 siop_clearfifo(&sc->sc_c);
491 CALL_SCRIPT(Ent_status);
492 return 1;
493 case SSTAT1_PHASE_MSGIN:
495 * target may be ready to disconnect
496 * Compute resid which would be used later
497 * if a save data pointer is needed.
499 INCSTAT(siop_stat_intr_xferdisc);
500 if (scratcha0 & A_flag_data)
501 siop_ma(&siop_cmd->cmd_c);
502 else if ((dstat & DSTAT_DFE) == 0)
503 siop_clearfifo(&sc->sc_c);
504 bus_space_write_1(sc->sc_c.sc_rt,
505 sc->sc_c.sc_rh, SIOP_SCRATCHA,
506 scratcha0 & ~A_flag_data);
507 CALL_SCRIPT(Ent_msgin);
508 return 1;
510 aprint_error_dev(sc->sc_c.sc_dev,
511 "unexpected phase mismatch %d\n",
512 sstat1 & SSTAT1_PHASE_MASK);
513 } else {
514 aprint_error_dev(sc->sc_c.sc_dev,
515 "phase mismatch without command\n");
517 need_reset = 1;
519 if (sist & SIST0_PAR) {
520 /* parity error, reset */
521 if (siop_cmd)
522 scsipi_printaddr(xs->xs_periph);
523 else
524 printf("%s:", device_xname(sc->sc_c.sc_dev));
525 printf("parity error\n");
526 goto reset;
528 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
529 /* selection time out, assume there's no device here */
530 if (siop_cmd) {
531 siop_cmd->cmd_c.status = CMDST_DONE;
532 xs->error = XS_SELTIMEOUT;
533 freetarget = 1;
534 goto end;
535 } else {
536 aprint_error_dev(sc->sc_c.sc_dev,
537 "selection timeout without "
538 "command\n");
539 need_reset = 1;
542 if (sist & SIST0_UDC) {
544 * unexpected disconnect. Usually the target signals
545 * a fatal condition this way. Attempt to get sense.
547 if (siop_cmd) {
548 siop_cmd->cmd_tables->status =
549 siop_htoc32(&sc->sc_c, SCSI_CHECK);
550 goto end;
552 aprint_error_dev(sc->sc_c.sc_dev,
553 "unexpected disconnect without "
554 "command\n");
555 goto reset;
557 if (sist & (SIST1_SBMC << 8)) {
558 /* SCSI bus mode change */
559 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
560 goto reset;
561 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
563 * we have a script interrupt, it will
564 * restart the script.
566 goto scintr;
569 * else we have to restart it ourselve, at the
570 * interrupted instruction.
572 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
573 SIOP_DSP,
574 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
575 SIOP_DSP) - 8);
576 return 1;
578 /* Else it's an unhandled exception (for now). */
579 aprint_error_dev(sc->sc_c.sc_dev,
580 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
581 "DSA=0x%x DSP=0x%x\n", sist,
582 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 SIOP_SSTAT1),
584 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
585 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
587 if (siop_cmd) {
588 siop_cmd->cmd_c.status = CMDST_DONE;
589 xs->error = XS_SELTIMEOUT;
590 goto end;
592 need_reset = 1;
594 if (need_reset) {
595 reset:
596 /* fatal error, reset the bus */
597 siop_resetbus(&sc->sc_c);
598 /* no table to flush here */
599 return 1;
602 scintr:
603 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
604 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
605 SIOP_DSPS);
606 #ifdef SIOP_DEBUG_INTR
607 printf("script interrupt 0x%x\n", irqcode);
608 #endif
610 * no command, or an inactive command is only valid for a
611 * reselect interrupt
613 if ((irqcode & 0x80) == 0) {
614 if (siop_cmd == NULL) {
615 aprint_error_dev(sc->sc_c.sc_dev,
616 "script interrupt (0x%x) with "
617 "invalid DSA !!!\n",
618 irqcode);
619 goto reset;
621 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
622 aprint_error_dev(sc->sc_c.sc_dev,
623 "command with invalid status "
624 "(IRQ code 0x%x current status %d) !\n",
625 irqcode, siop_cmd->cmd_c.status);
626 xs = NULL;
629 switch(irqcode) {
630 case A_int_err:
631 printf("error, DSP=0x%x\n",
632 (int)(bus_space_read_4(sc->sc_c.sc_rt,
633 sc->sc_c.sc_rh, SIOP_DSP) -
634 sc->sc_c.sc_scriptaddr));
635 if (xs) {
636 xs->error = XS_SELTIMEOUT;
637 goto end;
638 } else {
639 goto reset;
641 case A_int_reseltarg:
642 aprint_error_dev(sc->sc_c.sc_dev,
643 "reselect with invalid target\n");
644 goto reset;
645 case A_int_resellun:
646 INCSTAT(siop_stat_intr_lunresel);
647 target = bus_space_read_1(sc->sc_c.sc_rt,
648 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
649 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
650 SIOP_SCRATCHA + 1);
651 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
652 SIOP_SCRATCHA + 2);
653 siop_target =
654 (struct siop_target *)sc->sc_c.targets[target];
655 if (siop_target == NULL) {
656 printf("%s: reselect with invalid target %d\n",
657 device_xname(sc->sc_c.sc_dev), target);
658 goto reset;
660 siop_lun = siop_target->siop_lun[lun];
661 if (siop_lun == NULL) {
662 printf("%s: target %d reselect with invalid "
663 "lun %d\n", device_xname(sc->sc_c.sc_dev),
664 target, lun);
665 goto reset;
667 if (siop_lun->siop_tag[tag].active == NULL) {
668 printf("%s: target %d lun %d tag %d reselect "
669 "without command\n",
670 device_xname(sc->sc_c.sc_dev),
671 target, lun, tag);
672 goto reset;
674 siop_cmd = siop_lun->siop_tag[tag].active;
675 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
676 SIOP_DSP, siop_cmd->cmd_c.dsa +
677 sizeof(struct siop_common_xfer) +
678 Ent_ldsa_reload_dsa);
679 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
680 return 1;
681 case A_int_reseltag:
682 printf("%s: reselect with invalid tag\n",
683 device_xname(sc->sc_c.sc_dev));
684 goto reset;
685 case A_int_msgin:
687 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SFBR);
690 if (msgin == MSG_MESSAGE_REJECT) {
691 int msg, extmsg;
692 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
694 * message was part of a identify +
695 * something else. Identify shouldn't
696 * have been rejected.
698 msg =
699 siop_cmd->cmd_tables->msg_out[1];
700 extmsg =
701 siop_cmd->cmd_tables->msg_out[3];
702 } else {
703 msg = siop_cmd->cmd_tables->msg_out[0];
704 extmsg =
705 siop_cmd->cmd_tables->msg_out[2];
707 if (msg == MSG_MESSAGE_REJECT) {
708 /* MSG_REJECT for a MSG_REJECT !*/
709 if (xs)
710 scsipi_printaddr(xs->xs_periph);
711 else
712 printf("%s: ", device_xname(
713 sc->sc_c.sc_dev));
714 printf("our reject message was "
715 "rejected\n");
716 goto reset;
718 if (msg == MSG_EXTENDED &&
719 extmsg == MSG_EXT_WDTR) {
720 /* WDTR rejected, initiate sync */
721 if ((siop_target->target_c.flags &
722 TARF_SYNC) == 0) {
723 siop_target->target_c.status =
724 TARST_OK;
725 siop_update_xfer_mode(&sc->sc_c,
726 target);
727 /* no table to flush here */
728 CALL_SCRIPT(Ent_msgin_ack);
729 return 1;
731 siop_target->target_c.status =
732 TARST_SYNC_NEG;
733 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
734 sc->sc_c.st_minsync,
735 sc->sc_c.maxoff);
736 siop_table_sync(siop_cmd,
737 BUS_DMASYNC_PREREAD |
738 BUS_DMASYNC_PREWRITE);
739 CALL_SCRIPT(Ent_send_msgout);
740 return 1;
741 } else if (msg == MSG_EXTENDED &&
742 extmsg == MSG_EXT_SDTR) {
743 /* sync rejected */
744 siop_target->target_c.offset = 0;
745 siop_target->target_c.period = 0;
746 siop_target->target_c.status = TARST_OK;
747 siop_update_xfer_mode(&sc->sc_c,
748 target);
749 /* no table to flush here */
750 CALL_SCRIPT(Ent_msgin_ack);
751 return 1;
752 } else if (msg == MSG_SIMPLE_Q_TAG ||
753 msg == MSG_HEAD_OF_Q_TAG ||
754 msg == MSG_ORDERED_Q_TAG) {
755 if (siop_handle_qtag_reject(
756 siop_cmd) == -1)
757 goto reset;
758 CALL_SCRIPT(Ent_msgin_ack);
759 return 1;
761 if (xs)
762 scsipi_printaddr(xs->xs_periph);
763 else
764 printf("%s: ",
765 device_xname(sc->sc_c.sc_dev));
766 if (msg == MSG_EXTENDED) {
767 printf("scsi message reject, extended "
768 "message sent was 0x%x\n", extmsg);
769 } else {
770 printf("scsi message reject, message "
771 "sent was 0x%x\n", msg);
773 /* no table to flush here */
774 CALL_SCRIPT(Ent_msgin_ack);
775 return 1;
777 if (msgin == MSG_IGN_WIDE_RESIDUE) {
778 /* use the extmsgdata table to get the second byte */
779 siop_cmd->cmd_tables->t_extmsgdata.count =
780 siop_htoc32(&sc->sc_c, 1);
781 siop_table_sync(siop_cmd,
782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
783 CALL_SCRIPT(Ent_get_extmsgdata);
784 return 1;
786 if (xs)
787 scsipi_printaddr(xs->xs_periph);
788 else
789 printf("%s: ", device_xname(sc->sc_c.sc_dev));
790 printf("unhandled message 0x%x\n",
791 siop_cmd->cmd_tables->msg_in[0]);
792 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
793 siop_cmd->cmd_tables->t_msgout.count =
794 siop_htoc32(&sc->sc_c, 1);
795 siop_table_sync(siop_cmd,
796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
797 CALL_SCRIPT(Ent_send_msgout);
798 return 1;
800 case A_int_extmsgin:
801 #ifdef SIOP_DEBUG_INTR
802 printf("extended message: msg 0x%x len %d\n",
803 siop_cmd->cmd_tables->msg_in[2],
804 siop_cmd->cmd_tables->msg_in[1]);
805 #endif
806 if (siop_cmd->cmd_tables->msg_in[1] >
807 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
808 aprint_error_dev(sc->sc_c.sc_dev,
809 "extended message too big (%d)\n",
810 siop_cmd->cmd_tables->msg_in[1]);
811 siop_cmd->cmd_tables->t_extmsgdata.count =
812 siop_htoc32(&sc->sc_c,
813 siop_cmd->cmd_tables->msg_in[1] - 1);
814 siop_table_sync(siop_cmd,
815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
816 CALL_SCRIPT(Ent_get_extmsgdata);
817 return 1;
818 case A_int_extmsgdata:
819 #ifdef SIOP_DEBUG_INTR
821 int i;
822 printf("extended message: 0x%x, data:",
823 siop_cmd->cmd_tables->msg_in[2]);
824 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
825 i++)
826 printf(" 0x%x",
827 siop_cmd->cmd_tables->msg_in[i]);
828 printf("\n");
830 #endif
831 if (siop_cmd->cmd_tables->msg_in[0] ==
832 MSG_IGN_WIDE_RESIDUE) {
833 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
834 if (siop_cmd->cmd_tables->msg_in[3] != 1)
835 printf("MSG_IGN_WIDE_RESIDUE: "
836 "bad len %d\n",
837 siop_cmd->cmd_tables->msg_in[3]);
838 switch (siop_iwr(&siop_cmd->cmd_c)) {
839 case SIOP_NEG_MSGOUT:
840 siop_table_sync(siop_cmd,
841 BUS_DMASYNC_PREREAD |
842 BUS_DMASYNC_PREWRITE);
843 CALL_SCRIPT(Ent_send_msgout);
844 return(1);
845 case SIOP_NEG_ACK:
846 CALL_SCRIPT(Ent_msgin_ack);
847 return(1);
848 default:
849 panic("invalid retval from "
850 "siop_iwr()");
852 return(1);
854 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
855 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
856 case SIOP_NEG_MSGOUT:
857 siop_update_scntl3(sc,
858 siop_cmd->cmd_c.siop_target);
859 siop_table_sync(siop_cmd,
860 BUS_DMASYNC_PREREAD |
861 BUS_DMASYNC_PREWRITE);
862 CALL_SCRIPT(Ent_send_msgout);
863 return(1);
864 case SIOP_NEG_ACK:
865 siop_update_scntl3(sc,
866 siop_cmd->cmd_c.siop_target);
867 CALL_SCRIPT(Ent_msgin_ack);
868 return(1);
869 default:
870 panic("invalid retval from "
871 "siop_wdtr_neg()");
873 return(1);
875 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
876 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
877 case SIOP_NEG_MSGOUT:
878 siop_update_scntl3(sc,
879 siop_cmd->cmd_c.siop_target);
880 siop_table_sync(siop_cmd,
881 BUS_DMASYNC_PREREAD |
882 BUS_DMASYNC_PREWRITE);
883 CALL_SCRIPT(Ent_send_msgout);
884 return(1);
885 case SIOP_NEG_ACK:
886 siop_update_scntl3(sc,
887 siop_cmd->cmd_c.siop_target);
888 CALL_SCRIPT(Ent_msgin_ack);
889 return(1);
890 default:
891 panic("invalid retval from "
892 "siop_wdtr_neg()");
894 return(1);
896 /* send a message reject */
897 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
898 siop_cmd->cmd_tables->t_msgout.count =
899 siop_htoc32(&sc->sc_c, 1);
900 siop_table_sync(siop_cmd,
901 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
902 CALL_SCRIPT(Ent_send_msgout);
903 return 1;
904 case A_int_disc:
905 INCSTAT(siop_stat_intr_sdp);
906 offset = bus_space_read_1(sc->sc_c.sc_rt,
907 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
908 #ifdef SIOP_DEBUG_DR
909 printf("disconnect offset %d\n", offset);
910 #endif
911 siop_sdp(&siop_cmd->cmd_c, offset);
912 /* we start again with no offset */
913 siop_cmd->saved_offset = SIOP_NOOFFSET;
914 siop_table_sync(siop_cmd,
915 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
916 CALL_SCRIPT(Ent_script_sched);
917 return 1;
918 case A_int_saveoffset:
919 INCSTAT(siop_stat_intr_saveoffset);
920 offset = bus_space_read_1(sc->sc_c.sc_rt,
921 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
922 #ifdef SIOP_DEBUG_DR
923 printf("saveoffset offset %d\n", offset);
924 #endif
925 siop_cmd->saved_offset = offset;
926 CALL_SCRIPT(Ent_script_sched);
927 return 1;
928 case A_int_resfail:
929 printf("reselect failed\n");
930 CALL_SCRIPT(Ent_script_sched);
931 return 1;
932 case A_int_done:
933 if (xs == NULL) {
934 printf("%s: done without command, DSA=0x%lx\n",
935 device_xname(sc->sc_c.sc_dev),
936 (u_long)siop_cmd->cmd_c.dsa);
937 siop_cmd->cmd_c.status = CMDST_FREE;
938 CALL_SCRIPT(Ent_script_sched);
939 return 1;
941 #ifdef SIOP_DEBUG_INTR
942 printf("done, DSA=0x%lx target id 0x%x last msg "
943 "in=0x%x status=0x%x\n",
944 (u_long)siop_cmd->cmd_c.dsa,
945 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
946 siop_cmd->cmd_tables->msg_in[0],
947 siop_ctoh32(&sc->sc_c,
948 siop_cmd->cmd_tables->status));
949 #endif
950 INCSTAT(siop_stat_intr_done);
951 /* update resid. */
952 offset = bus_space_read_1(sc->sc_c.sc_rt,
953 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
955 * if we got a disconnect between the last data phase
956 * and the status phase, offset will be 0. In this
957 * case, siop_cmd->saved_offset will have the proper
958 * value if it got updated by the controller
960 if (offset == 0 &&
961 siop_cmd->saved_offset != SIOP_NOOFFSET)
962 offset = siop_cmd->saved_offset;
963 siop_update_resid(&siop_cmd->cmd_c, offset);
964 siop_cmd->cmd_c.status = CMDST_DONE;
965 goto end;
966 default:
967 printf("unknown irqcode %x\n", irqcode);
968 if (xs) {
969 xs->error = XS_SELTIMEOUT;
970 goto end;
972 goto reset;
974 return 1;
976 /* We just should't get there */
977 panic("siop_intr: I shouldn't be there !");
979 end:
981 * restart the script now if command completed properly
982 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
983 * queue
985 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
986 if (xs->status == SCSI_OK)
987 CALL_SCRIPT(Ent_script_sched);
988 else
989 restart = 1;
990 siop_lun->siop_tag[tag].active = NULL;
991 siop_scsicmd_end(siop_cmd);
992 if (freetarget && siop_target->target_c.status == TARST_PROBING)
993 siop_del_dev(sc, target, lun);
994 if (restart)
995 CALL_SCRIPT(Ent_script_sched);
996 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
997 /* a command terminated, so we have free slots now */
998 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
999 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1002 return 1;
1005 void
1006 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1008 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1009 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1011 switch(xs->status) {
1012 case SCSI_OK:
1013 xs->error = XS_NOERROR;
1014 break;
1015 case SCSI_BUSY:
1016 xs->error = XS_BUSY;
1017 break;
1018 case SCSI_CHECK:
1019 xs->error = XS_BUSY;
1020 /* remove commands in the queue and scheduler */
1021 siop_unqueue(sc, xs->xs_periph->periph_target,
1022 xs->xs_periph->periph_lun);
1023 break;
1024 case SCSI_QUEUE_FULL:
1025 INCSTAT(siop_stat_intr_qfull);
1026 #ifdef SIOP_DEBUG
1027 printf("%s:%d:%d: queue full (tag %d)\n",
1028 device_xname(sc->sc_c.sc_dev),
1029 xs->xs_periph->periph_target,
1030 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1031 #endif
1032 xs->error = XS_BUSY;
1033 break;
1034 case SCSI_SIOP_NOCHECK:
1036 * don't check status, xs->error is already valid
1038 break;
1039 case SCSI_SIOP_NOSTATUS:
1041 * the status byte was not updated, cmd was
1042 * aborted
1044 xs->error = XS_SELTIMEOUT;
1045 break;
1046 default:
1047 scsipi_printaddr(xs->xs_periph);
1048 printf("invalid status code %d\n", xs->status);
1049 xs->error = XS_DRIVER_STUFFUP;
1051 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1052 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data,
1053 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1054 (xs->xs_control & XS_CTL_DATA_IN) ?
1055 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1056 bus_dmamap_unload(sc->sc_c.sc_dmat,
1057 siop_cmd->cmd_c.dmamap_data);
1059 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1060 if ((xs->xs_control & XS_CTL_POLL) == 0)
1061 callout_stop(&xs->xs_callout);
1062 siop_cmd->cmd_c.status = CMDST_FREE;
1063 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1064 #if 0
1065 if (xs->resid != 0)
1066 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1067 #endif
1068 scsipi_done(xs);
1071 void
1072 siop_unqueue(struct siop_softc *sc, int target, int lun)
1074 int slot, tag;
1075 struct siop_cmd *siop_cmd;
1076 struct siop_lun *siop_lun =
1077 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1079 /* first make sure to read valid data */
1080 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1082 for (tag = 1; tag < SIOP_NTAG; tag++) {
1083 /* look for commands in the scheduler, not yet started */
1084 if (siop_lun->siop_tag[tag].active == NULL)
1085 continue;
1086 siop_cmd = siop_lun->siop_tag[tag].active;
1087 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1088 if (siop_script_read(sc,
1089 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1090 siop_cmd->cmd_c.dsa +
1091 sizeof(struct siop_common_xfer) +
1092 Ent_ldsa_select)
1093 break;
1095 if (slot > sc->sc_currschedslot)
1096 continue; /* didn't find it */
1097 if (siop_script_read(sc,
1098 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1099 continue; /* already started */
1100 /* clear the slot */
1101 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1102 0x80000000);
1103 /* ask to requeue */
1104 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1105 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1106 siop_lun->siop_tag[tag].active = NULL;
1107 siop_scsicmd_end(siop_cmd);
1109 /* update sc_currschedslot */
1110 sc->sc_currschedslot = 0;
1111 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1112 if (siop_script_read(sc,
1113 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1114 sc->sc_currschedslot = slot;
1119 * handle a rejected queue tag message: the command will run untagged,
1120 * has to adjust the reselect script.
1123 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1125 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1126 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1127 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1128 int tag = siop_cmd->cmd_tables->msg_out[2];
1129 struct siop_lun *siop_lun =
1130 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1132 #ifdef SIOP_DEBUG
1133 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1134 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1135 siop_cmd->cmd_c.tag,
1136 siop_cmd->cmd_c.status);
1137 #endif
1139 if (siop_lun->siop_tag[0].active != NULL) {
1140 printf("%s: untagged command already running for target %d "
1141 "lun %d (status %d)\n", device_xname(sc->sc_c.sc_dev),
1142 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1143 return -1;
1145 /* clear tag slot */
1146 siop_lun->siop_tag[tag].active = NULL;
1147 /* add command to non-tagged slot */
1148 siop_lun->siop_tag[0].active = siop_cmd;
1149 siop_cmd->cmd_c.tag = 0;
1150 /* adjust reselect script if there is one */
1151 if (siop_lun->siop_tag[0].reseloff > 0) {
1152 siop_script_write(sc,
1153 siop_lun->siop_tag[0].reseloff + 1,
1154 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1155 Ent_ldsa_reload_dsa);
1156 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1158 return 0;
1162 * handle a bus reset: reset chip, unqueue all active commands, free all
1163 * target struct and report lossage to upper layer.
1164 * As the upper layer may requeue immediatly we have to first store
1165 * all active commands in a temporary queue.
1167 void
1168 siop_handle_reset(struct siop_softc *sc)
1170 struct siop_cmd *siop_cmd;
1171 struct siop_lun *siop_lun;
1172 int target, lun, tag;
1175 * scsi bus reset. reset the chip and restart
1176 * the queue. Need to clean up all active commands
1178 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1179 /* stop, reset and restart the chip */
1180 siop_reset(sc);
1181 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1182 /* chip has been reset, all slots are free now */
1183 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1184 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1187 * Process all commands: first commands being executed
1189 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1190 target++) {
1191 if (sc->sc_c.targets[target] == NULL)
1192 continue;
1193 for (lun = 0; lun < 8; lun++) {
1194 struct siop_target *siop_target =
1195 (struct siop_target *)sc->sc_c.targets[target];
1196 siop_lun = siop_target->siop_lun[lun];
1197 if (siop_lun == NULL)
1198 continue;
1199 for (tag = 0; tag <
1200 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1201 SIOP_NTAG : 1);
1202 tag++) {
1203 siop_cmd = siop_lun->siop_tag[tag].active;
1204 if (siop_cmd == NULL)
1205 continue;
1206 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1207 printf("command with tag id %d reset\n", tag);
1208 siop_cmd->cmd_c.xs->error =
1209 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1210 XS_TIMEOUT : XS_RESET;
1211 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1212 siop_lun->siop_tag[tag].active = NULL;
1213 siop_cmd->cmd_c.status = CMDST_DONE;
1214 siop_scsicmd_end(siop_cmd);
1217 sc->sc_c.targets[target]->status = TARST_ASYNC;
1218 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1219 sc->sc_c.targets[target]->period =
1220 sc->sc_c.targets[target]->offset = 0;
1221 siop_update_xfer_mode(&sc->sc_c, target);
1224 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1227 void
1228 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1229 void *arg)
1231 struct scsipi_xfer *xs;
1232 struct scsipi_periph *periph;
1233 struct siop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1234 struct siop_cmd *siop_cmd;
1235 struct siop_target *siop_target;
1236 int s, error, i;
1237 int target;
1238 int lun;
1240 switch (req) {
1241 case ADAPTER_REQ_RUN_XFER:
1242 xs = arg;
1243 periph = xs->xs_periph;
1244 target = periph->periph_target;
1245 lun = periph->periph_lun;
1247 s = splbio();
1248 #ifdef SIOP_DEBUG_SCHED
1249 printf("starting cmd for %d:%d\n", target, lun);
1250 #endif
1251 siop_cmd = TAILQ_FIRST(&sc->free_list);
1252 if (siop_cmd == NULL) {
1253 xs->error = XS_RESOURCE_SHORTAGE;
1254 scsipi_done(xs);
1255 splx(s);
1256 return;
1258 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1259 #ifdef DIAGNOSTIC
1260 if (siop_cmd->cmd_c.status != CMDST_FREE)
1261 panic("siop_scsicmd: new cmd not free");
1262 #endif
1263 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1264 if (siop_target == NULL) {
1265 #ifdef SIOP_DEBUG
1266 printf("%s: alloc siop_target for target %d\n",
1267 device_xname(sc->sc_c.sc_dev), target);
1268 #endif
1269 sc->sc_c.targets[target] =
1270 malloc(sizeof(struct siop_target),
1271 M_DEVBUF, M_NOWAIT|M_ZERO);
1272 if (sc->sc_c.targets[target] == NULL) {
1273 aprint_error_dev(sc->sc_c.sc_dev,
1274 "can't malloc memory for "
1275 "target %d\n", target);
1276 xs->error = XS_RESOURCE_SHORTAGE;
1277 scsipi_done(xs);
1278 splx(s);
1279 return;
1281 siop_target =
1282 (struct siop_target *)sc->sc_c.targets[target];
1283 siop_target->target_c.status = TARST_PROBING;
1284 siop_target->target_c.flags = 0;
1285 siop_target->target_c.id =
1286 sc->sc_c.clock_div << 24; /* scntl3 */
1287 siop_target->target_c.id |= target << 16; /* id */
1288 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1290 /* get a lun switch script */
1291 siop_target->lunsw = siop_get_lunsw(sc);
1292 if (siop_target->lunsw == NULL) {
1293 aprint_error_dev(sc->sc_c.sc_dev,
1294 "can't alloc lunsw for target %d\n",
1295 target);
1296 xs->error = XS_RESOURCE_SHORTAGE;
1297 scsipi_done(xs);
1298 splx(s);
1299 return;
1301 for (i=0; i < 8; i++)
1302 siop_target->siop_lun[i] = NULL;
1303 siop_add_reselsw(sc, target);
1305 if (siop_target->siop_lun[lun] == NULL) {
1306 siop_target->siop_lun[lun] =
1307 malloc(sizeof(struct siop_lun), M_DEVBUF,
1308 M_NOWAIT|M_ZERO);
1309 if (siop_target->siop_lun[lun] == NULL) {
1310 aprint_error_dev(sc->sc_c.sc_dev,
1311 "can't alloc siop_lun for "
1312 "target %d lun %d\n",
1313 target, lun);
1314 xs->error = XS_RESOURCE_SHORTAGE;
1315 scsipi_done(xs);
1316 splx(s);
1317 return;
1320 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1321 siop_cmd->cmd_c.xs = xs;
1322 siop_cmd->cmd_c.flags = 0;
1323 siop_cmd->cmd_c.status = CMDST_READY;
1325 /* load the DMA maps */
1326 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1327 siop_cmd->cmd_c.dmamap_cmd,
1328 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1329 if (error) {
1330 aprint_error_dev(sc->sc_c.sc_dev,
1331 "unable to load cmd DMA map: %d\n",
1332 error);
1333 xs->error = XS_DRIVER_STUFFUP;
1334 scsipi_done(xs);
1335 splx(s);
1336 return;
1338 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1339 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1340 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1341 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1342 ((xs->xs_control & XS_CTL_DATA_IN) ?
1343 BUS_DMA_READ : BUS_DMA_WRITE));
1344 if (error) {
1345 aprint_error_dev(sc->sc_c.sc_dev,
1346 "unable to load cmd DMA map: %d",
1347 error);
1348 xs->error = XS_DRIVER_STUFFUP;
1349 scsipi_done(xs);
1350 bus_dmamap_unload(sc->sc_c.sc_dmat,
1351 siop_cmd->cmd_c.dmamap_cmd);
1352 splx(s);
1353 return;
1355 bus_dmamap_sync(sc->sc_c.sc_dmat,
1356 siop_cmd->cmd_c.dmamap_data, 0,
1357 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1358 (xs->xs_control & XS_CTL_DATA_IN) ?
1359 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1361 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1362 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1363 BUS_DMASYNC_PREWRITE);
1365 if (xs->xs_tag_type) {
1366 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1367 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1368 } else {
1369 siop_cmd->cmd_c.tag = 0;
1371 siop_setuptables(&siop_cmd->cmd_c);
1372 siop_cmd->saved_offset = SIOP_NOOFFSET;
1373 siop_table_sync(siop_cmd,
1374 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 siop_start(sc, siop_cmd);
1376 if (xs->xs_control & XS_CTL_POLL) {
1377 /* poll for command completion */
1378 while ((xs->xs_status & XS_STS_DONE) == 0) {
1379 delay(1000);
1380 siop_intr(sc);
1383 splx(s);
1384 return;
1386 case ADAPTER_REQ_GROW_RESOURCES:
1387 #ifdef SIOP_DEBUG
1388 printf("%s grow resources (%d)\n",
1389 device_xname(sc->sc_c.sc_dev),
1390 sc->sc_c.sc_adapt.adapt_openings);
1391 #endif
1392 siop_morecbd(sc);
1393 return;
1395 case ADAPTER_REQ_SET_XFER_MODE:
1397 struct scsipi_xfer_mode *xm = arg;
1398 if (sc->sc_c.targets[xm->xm_target] == NULL)
1399 return;
1400 s = splbio();
1401 if (xm->xm_mode & PERIPH_CAP_TQING)
1402 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1403 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1404 (sc->sc_c.features & SF_BUS_WIDE))
1405 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1406 if (xm->xm_mode & PERIPH_CAP_SYNC)
1407 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1408 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1409 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1410 sc->sc_c.targets[xm->xm_target]->status =
1411 TARST_ASYNC;
1413 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1414 if (scsipi_lookup_periph(chan,
1415 xm->xm_target, lun) != NULL) {
1416 /* allocate a lun sw entry for this device */
1417 siop_add_dev(sc, xm->xm_target, lun);
1421 splx(s);
1426 static void
1427 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1429 struct siop_lun *siop_lun;
1430 struct siop_xfer *siop_xfer;
1431 uint32_t dsa;
1432 int timeout;
1433 int target, lun, slot;
1436 * first make sure to read valid data
1438 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1441 * The queue management here is a bit tricky: the script always looks
1442 * at the slot from first to last, so if we always use the first
1443 * free slot commands can stay at the tail of the queue ~forever.
1444 * The algorithm used here is to restart from the head when we know
1445 * that the queue is empty, and only add commands after the last one.
1446 * When we're at the end of the queue wait for the script to clear it.
1447 * The best thing to do here would be to implement a circular queue,
1448 * but using only 53c720 features this can be "interesting".
1449 * A mid-way solution could be to implement 2 queues and swap orders.
1451 slot = sc->sc_currschedslot;
1453 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1454 * free. As this is the last used slot, all previous slots are free,
1455 * we can restart from 0.
1457 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1458 0x80000000) {
1459 slot = sc->sc_currschedslot = 0;
1460 } else {
1461 slot++;
1463 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1464 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1465 siop_lun =
1466 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1467 /* if non-tagged command active, panic: this shouldn't happen */
1468 if (siop_lun->siop_tag[0].active != NULL) {
1469 panic("siop_start: tagged cmd while untagged running");
1471 #ifdef DIAGNOSTIC
1472 /* sanity check the tag if needed */
1473 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1474 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1475 panic("siop_start: tag not free");
1476 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1477 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1478 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1479 panic("siop_start: invalid tag id");
1482 #endif
1484 * find a free scheduler slot and load it.
1486 for (; slot < SIOP_NSLOTS; slot++) {
1488 * If cmd if 0x80000000 the slot is free
1490 if (siop_script_read(sc,
1491 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1492 0x80000000)
1493 break;
1495 if (slot == SIOP_NSLOTS) {
1497 * no more free slot, no need to continue. freeze the queue
1498 * and requeue this command.
1500 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1501 sc->sc_flags |= SCF_CHAN_NOSLOT;
1502 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1503 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1504 siop_scsicmd_end(siop_cmd);
1505 return;
1507 #ifdef SIOP_DEBUG_SCHED
1508 printf("using slot %d for DSA 0x%lx\n", slot,
1509 (u_long)siop_cmd->cmd_c.dsa);
1510 #endif
1511 /* mark command as active */
1512 if (siop_cmd->cmd_c.status == CMDST_READY)
1513 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1514 else
1515 panic("siop_start: bad status");
1516 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1517 /* patch scripts with DSA addr */
1518 dsa = siop_cmd->cmd_c.dsa;
1519 /* first reselect switch, if we have an entry */
1520 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1521 siop_script_write(sc,
1522 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1523 dsa + sizeof(struct siop_common_xfer) +
1524 Ent_ldsa_reload_dsa);
1525 /* CMD script: MOVE MEMORY addr */
1526 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1527 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1528 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1529 Ent_script_sched_slot0 + slot * 8);
1530 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1531 /* scheduler slot: JUMP ldsa_select */
1532 siop_script_write(sc,
1533 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1534 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1535 /* handle timeout */
1536 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1537 /* start exire timer */
1538 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1539 if (timeout == 0)
1540 timeout = 1;
1541 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1542 timeout, siop_timeout, siop_cmd);
1545 * Change JUMP cmd so that this slot will be handled
1547 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1548 0x80080000);
1549 sc->sc_currschedslot = slot;
1551 /* make sure SCRIPT processor will read valid data */
1552 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1553 /* Signal script it has some work to do */
1554 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1555 SIOP_ISTAT, ISTAT_SIGP);
1556 /* and wait for IRQ */
1559 void
1560 siop_timeout(void *v)
1562 struct siop_cmd *siop_cmd = v;
1563 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1564 int s;
1566 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1567 printf("command timeout, CDB: ");
1568 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1569 printf("\n");
1571 s = splbio();
1572 /* reset the scsi bus */
1573 siop_resetbus(&sc->sc_c);
1575 /* deactivate callout */
1576 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1577 /* mark command as being timed out; siop_intr will handle it */
1579 * mark command has being timed out and just return;
1580 * the bus reset will generate an interrupt,
1581 * it will be handled in siop_intr()
1583 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1584 splx(s);
1587 void
1588 siop_dump_script(struct siop_softc *sc)
1590 int i;
1592 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1593 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1594 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i]),
1595 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i + 1]));
1596 if ((siop_ctoh32(&sc->sc_c,
1597 sc->sc_c.sc_script[i]) & 0xe0000000) == 0xc0000000) {
1598 i++;
1599 printf(" 0x%08x", siop_ctoh32(&sc->sc_c,
1600 sc->sc_c.sc_script[i + 1]));
1602 printf("\n");
1606 void
1607 siop_morecbd(struct siop_softc *sc)
1609 int error, off, i, j, s;
1610 bus_dma_segment_t seg;
1611 int rseg;
1612 struct siop_cbd *newcbd;
1613 struct siop_xfer *xfer;
1614 bus_addr_t dsa;
1615 uint32_t *scr;
1617 /* allocate a new list head */
1618 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1619 if (newcbd == NULL) {
1620 aprint_error_dev(sc->sc_c.sc_dev,
1621 "can't allocate memory for command descriptors head\n");
1622 return;
1625 /* allocate cmd list */
1626 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1627 M_DEVBUF, M_NOWAIT|M_ZERO);
1628 if (newcbd->cmds == NULL) {
1629 aprint_error_dev(sc->sc_c.sc_dev,
1630 "can't allocate memory for command descriptors\n");
1631 goto bad3;
1633 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE,
1634 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1635 if (error) {
1636 aprint_error_dev(sc->sc_c.sc_dev,
1637 "unable to allocate cbd DMA memory, error = %d\n",
1638 error);
1639 goto bad2;
1641 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1642 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1643 if (error) {
1644 aprint_error_dev(sc->sc_c.sc_dev,
1645 "unable to map cbd DMA memory, error = %d\n",
1646 error);
1647 goto bad2;
1649 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1650 BUS_DMA_NOWAIT, &newcbd->xferdma);
1651 if (error) {
1652 aprint_error_dev(sc->sc_c.sc_dev,
1653 "unable to create cbd DMA map, error = %d\n",
1654 error);
1655 goto bad1;
1657 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1658 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1659 if (error) {
1660 aprint_error_dev(sc->sc_c.sc_dev,
1661 "unable to load cbd DMA map, error = %d\n",
1662 error);
1663 goto bad0;
1665 #ifdef DEBUG
1666 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1667 device_xname(sc->sc_c.sc_dev),
1668 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1669 #endif
1670 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1671 for (i = 0; i < SIOP_NCMDPB; i++) {
1672 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1673 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1674 &newcbd->cmds[i].cmd_c.dmamap_data);
1675 if (error) {
1676 aprint_error_dev(sc->sc_c.sc_dev,
1677 "unable to create data DMA map for cbd: "
1678 "error %d\n", error);
1679 goto bad0;
1681 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1682 sizeof(struct scsipi_generic), 1,
1683 sizeof(struct scsipi_generic), 0,
1684 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1685 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1686 if (error) {
1687 aprint_error_dev(sc->sc_c.sc_dev,
1688 "unable to create cmd DMA map for cbd %d\n", error);
1689 goto bad0;
1691 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1692 newcbd->cmds[i].siop_cbdp = newcbd;
1693 xfer = &newcbd->xfers[i];
1694 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1695 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1696 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1697 i * sizeof(struct siop_xfer);
1698 newcbd->cmds[i].cmd_c.dsa = dsa;
1699 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1700 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1701 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1702 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1703 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1704 dsa + offsetof(struct siop_common_xfer, msg_in));
1705 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1706 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1707 dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1708 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1709 dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1710 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1711 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1712 dsa + offsetof(struct siop_common_xfer, status) + off);
1713 /* The select/reselect script */
1714 scr = &xfer->resel[0];
1715 for (j = 0; j < __arraycount(load_dsa); j++)
1716 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1718 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1719 * octet, reg offset is the third.
1721 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1722 0x78100000 | ((dsa & 0x000000ff) << 8));
1723 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1724 0x78110000 | ( dsa & 0x0000ff00 ));
1725 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1726 0x78120000 | ((dsa & 0x00ff0000) >> 8));
1727 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1728 0x78130000 | ((dsa & 0xff000000) >> 16));
1729 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1730 sc->sc_c.sc_scriptaddr + Ent_reselected);
1731 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1732 sc->sc_c.sc_scriptaddr + Ent_reselect);
1733 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1734 sc->sc_c.sc_scriptaddr + Ent_selected);
1735 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1736 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1737 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1738 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1739 s = splbio();
1740 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1741 splx(s);
1742 #ifdef SIOP_DEBUG
1743 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1744 siop_ctoh32(&sc->sc_c,
1745 newcbd->cmds[i].cmd_tables->t_msgin.addr),
1746 siop_ctoh32(&sc->sc_c,
1747 newcbd->cmds[i].cmd_tables->t_msgout.addr),
1748 siop_ctoh32(&sc->sc_c,
1749 newcbd->cmds[i].cmd_tables->t_status.addr));
1750 #endif
1752 s = splbio();
1753 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1754 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1755 splx(s);
1756 return;
1757 bad0:
1758 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1759 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1760 bad1:
1761 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1762 bad2:
1763 free(newcbd->cmds, M_DEVBUF);
1764 bad3:
1765 free(newcbd, M_DEVBUF);
1768 struct siop_lunsw *
1769 siop_get_lunsw(struct siop_softc *sc)
1771 struct siop_lunsw *lunsw;
1772 int i;
1774 if (sc->script_free_lo + __arraycount(lun_switch) >= sc->script_free_hi)
1775 return NULL;
1776 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1777 if (lunsw != NULL) {
1778 #ifdef SIOP_DEBUG
1779 printf("siop_get_lunsw got lunsw at offset %d\n",
1780 lunsw->lunsw_off);
1781 #endif
1782 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1783 return lunsw;
1785 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1786 if (lunsw == NULL)
1787 return NULL;
1788 #ifdef SIOP_DEBUG
1789 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1790 #endif
1791 if (sc->sc_c.features & SF_CHIP_RAM) {
1792 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1793 sc->script_free_lo * 4, lun_switch,
1794 __arraycount(lun_switch));
1795 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1796 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1797 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1798 } else {
1799 for (i = 0; i < __arraycount(lun_switch); i++)
1800 sc->sc_c.sc_script[sc->script_free_lo + i] =
1801 siop_htoc32(&sc->sc_c, lun_switch[i]);
1802 sc->sc_c.sc_script[
1803 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1804 siop_htoc32(&sc->sc_c,
1805 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1807 lunsw->lunsw_off = sc->script_free_lo;
1808 lunsw->lunsw_size = __arraycount(lun_switch);
1809 sc->script_free_lo += lunsw->lunsw_size;
1810 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1811 return lunsw;
1814 void
1815 siop_add_reselsw(struct siop_softc *sc, int target)
1817 int i, j;
1818 struct siop_target *siop_target;
1819 struct siop_lun *siop_lun;
1821 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1823 * add an entry to resel switch
1825 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1826 for (i = 0; i < 15; i++) {
1827 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1828 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1829 == 0xff) { /* it's free */
1830 #ifdef SIOP_DEBUG
1831 printf("siop: target %d slot %d offset %d\n",
1832 target, i, siop_target->reseloff);
1833 #endif
1834 /* JUMP abs_foo, IF target | 0x80; */
1835 siop_script_write(sc, siop_target->reseloff,
1836 0x800c0080 | target);
1837 siop_script_write(sc, siop_target->reseloff + 1,
1838 sc->sc_c.sc_scriptaddr +
1839 siop_target->lunsw->lunsw_off * 4 +
1840 Ent_lun_switch_entry);
1841 break;
1844 if (i == 15) /* no free slot, shouldn't happen */
1845 panic("siop: resel switch full");
1847 sc->sc_ntargets++;
1848 for (i = 0; i < 8; i++) {
1849 siop_lun = siop_target->siop_lun[i];
1850 if (siop_lun == NULL)
1851 continue;
1852 if (siop_lun->reseloff > 0) {
1853 siop_lun->reseloff = 0;
1854 for (j = 0; j < SIOP_NTAG; j++)
1855 siop_lun->siop_tag[j].reseloff = 0;
1856 siop_add_dev(sc, target, i);
1859 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1860 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1863 void
1864 siop_update_scntl3(struct siop_softc *sc,
1865 struct siop_common_target *_siop_target)
1867 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1869 /* MOVE target->id >> 24 TO SCNTL3 */
1870 siop_script_write(sc,
1871 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1872 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1873 /* MOVE target->id >> 8 TO SXFER */
1874 siop_script_write(sc,
1875 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1876 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1877 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1880 void
1881 siop_add_dev(struct siop_softc *sc, int target, int lun)
1883 struct siop_lunsw *lunsw;
1884 struct siop_target *siop_target =
1885 (struct siop_target *)sc->sc_c.targets[target];
1886 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1887 int i, ntargets;
1889 if (siop_lun->reseloff > 0)
1890 return;
1891 lunsw = siop_target->lunsw;
1892 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1894 * can't extend this slot. Probably not worth trying to deal
1895 * with this case
1897 #ifdef DEBUG
1898 aprint_error_dev(sc->sc_c.sc_dev,
1899 "%d:%d: can't allocate a lun sw slot\n", target, lun);
1900 #endif
1901 return;
1903 /* count how many free targets we still have to probe */
1904 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1907 * we need 8 bytes for the lun sw additional entry, and
1908 * eventually sizeof(tag_switch) for the tag switch entry.
1909 * Keep enough free space for the free targets that could be
1910 * probed later.
1912 if (sc->script_free_lo + 2 +
1913 (ntargets * __arraycount(lun_switch)) >=
1914 ((siop_target->target_c.flags & TARF_TAG) ?
1915 sc->script_free_hi - __arraycount(tag_switch) :
1916 sc->script_free_hi)) {
1918 * not enough space, probably not worth dealing with it.
1919 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1921 #ifdef DEBUG
1922 aprint_error_dev(sc->sc_c.sc_dev,
1923 "%d:%d: not enough memory for a lun sw slot\n",
1924 target, lun);
1925 #endif
1926 return;
1928 #ifdef SIOP_DEBUG
1929 printf("%s:%d:%d: allocate lun sw entry\n",
1930 device_xname(sc->sc_c.sc_dev), target, lun);
1931 #endif
1932 /* INT int_resellun */
1933 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1934 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1935 /* Now the slot entry: JUMP abs_foo, IF lun */
1936 siop_script_write(sc, sc->script_free_lo - 2,
1937 0x800c0000 | lun);
1938 siop_script_write(sc, sc->script_free_lo - 1, 0);
1939 siop_lun->reseloff = sc->script_free_lo - 2;
1940 lunsw->lunsw_size += 2;
1941 sc->script_free_lo += 2;
1942 if (siop_target->target_c.flags & TARF_TAG) {
1943 /* we need a tag switch */
1944 sc->script_free_hi -= __arraycount(tag_switch);
1945 if (sc->sc_c.features & SF_CHIP_RAM) {
1946 bus_space_write_region_4(sc->sc_c.sc_ramt,
1947 sc->sc_c.sc_ramh,
1948 sc->script_free_hi * 4, tag_switch,
1949 __arraycount(tag_switch));
1950 } else {
1951 for(i = 0; i < __arraycount(tag_switch); i++) {
1952 sc->sc_c.sc_script[sc->script_free_hi + i] =
1953 siop_htoc32(&sc->sc_c, tag_switch[i]);
1956 siop_script_write(sc,
1957 siop_lun->reseloff + 1,
1958 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1959 Ent_tag_switch_entry);
1961 for (i = 0; i < SIOP_NTAG; i++) {
1962 siop_lun->siop_tag[i].reseloff =
1963 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1965 } else {
1966 /* non-tag case; just work with the lun switch */
1967 siop_lun->siop_tag[0].reseloff =
1968 siop_target->siop_lun[lun]->reseloff;
1970 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1973 void
1974 siop_del_dev(struct siop_softc *sc, int target, int lun)
1976 int i;
1977 struct siop_target *siop_target;
1979 #ifdef SIOP_DEBUG
1980 printf("%s:%d:%d: free lun sw entry\n",
1981 device_xname(sc->sc_c.sc_dev), target, lun);
1982 #endif
1983 if (sc->sc_c.targets[target] == NULL)
1984 return;
1985 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1986 free(siop_target->siop_lun[lun], M_DEVBUF);
1987 siop_target->siop_lun[lun] = NULL;
1988 /* XXX compact sw entry too ? */
1989 /* check if we can free the whole target */
1990 for (i = 0; i < 8; i++) {
1991 if (siop_target->siop_lun[i] != NULL)
1992 return;
1994 #ifdef SIOP_DEBUG
1995 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1996 device_xname(sc->sc_c.sc_dev), target, lun,
1997 siop_target->lunsw->lunsw_off);
1998 #endif
2000 * nothing here, free the target struct and resel
2001 * switch entry
2003 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2004 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2005 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2006 free(sc->sc_c.targets[target], M_DEVBUF);
2007 sc->sc_c.targets[target] = NULL;
2008 sc->sc_ntargets--;
2011 #ifdef SIOP_STATS
2012 void
2013 siop_printstats(void)
2016 printf("siop_stat_intr %d\n", siop_stat_intr);
2017 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2018 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2019 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2020 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2021 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2022 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2023 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2025 #endif